repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
AMAAI-Lab/mustango
diffusers/src/diffusers/models/transformer_2d.py
[ { "identifier": "ConfigMixin", "path": "diffusers/src/diffusers/configuration_utils.py", "snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. Stores all configuration parameters under `self.config` Also handles all\n methods for loading/downloading/saving classes inheriting from [`ConfigMixin`] with\n - [`~ConfigMixin.from_config`]\n - [`~ConfigMixin.save_config`]\n\n Class attributes:\n - **config_name** (`str`) -- A filename under which the config should stored when calling\n [`~ConfigMixin.save_config`] (should be overridden by parent class).\n - **ignore_for_config** (`List[str]`) -- A list of attributes that should not be saved in the config (should be\n overridden by subclass).\n - **has_compatibles** (`bool`) -- Whether the class has compatible classes (should be overridden by subclass).\n - **_deprecated_kwargs** (`List[str]`) -- Keyword arguments that are deprecated. Note that the init function\n should only have a `kwargs` argument if at least one argument is deprecated (should be overridden by\n subclass).\n \"\"\"\n config_name = None\n ignore_for_config = []\n has_compatibles = False\n\n _deprecated_kwargs = []\n\n def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n for key, value in kwargs.items():\n try:\n setattr(self, key, value)\n except AttributeError as err:\n logger.error(f\"Can't set {key} with value {value} for {self}\")\n raise err\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)\n\n def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the\n [`~ConfigMixin.from_config`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file will be saved (will be created if it does not exist).\n \"\"\"\n if os.path.isfile(save_directory):\n raise AssertionError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n\n os.makedirs(save_directory, exist_ok=True)\n\n # If we save using the predefined names, we can load using `from_config`\n output_config_file = os.path.join(save_directory, self.config_name)\n\n self.to_json_file(output_config_file)\n logger.info(f\"Configuration saved in {output_config_file}\")\n\n @classmethod\n def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs):\n r\"\"\"\n Instantiate a Python class from a config dictionary\n\n Parameters:\n config (`Dict[str, Any]`):\n A config dictionary from which the Python class will be instantiated. Make sure to only load\n configuration files of compatible classes.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n\n kwargs (remaining dictionary of keyword arguments, *optional*):\n Can be used to update the configuration object (after it being loaded) and initiate the Python class.\n `**kwargs` will be directly passed to the underlying scheduler/model's `__init__` method and eventually\n overwrite same named arguments of `config`.\n\n Examples:\n\n ```python\n >>> from diffusers import DDPMScheduler, DDIMScheduler, PNDMScheduler\n\n >>> # Download scheduler from huggingface.co and cache.\n >>> scheduler = DDPMScheduler.from_pretrained(\"google/ddpm-cifar10-32\")\n\n >>> # Instantiate DDIM scheduler class with same config as DDPM\n >>> scheduler = DDIMScheduler.from_config(scheduler.config)\n\n >>> # Instantiate PNDM scheduler class with same config as DDPM\n >>> scheduler = PNDMScheduler.from_config(scheduler.config)\n ```\n \"\"\"\n # <===== TO BE REMOVED WITH DEPRECATION\n # TODO(Patrick) - make sure to remove the following lines when config==\"model_path\" is deprecated\n if \"pretrained_model_name_or_path\" in kwargs:\n config = kwargs.pop(\"pretrained_model_name_or_path\")\n\n if config is None:\n raise ValueError(\"Please make sure to provide a config as the first positional argument.\")\n # ======>\n\n if not isinstance(config, dict):\n deprecation_message = \"It is deprecated to pass a pretrained model name or path to `from_config`.\"\n if \"Scheduler\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a scheduler, please use {cls}.from_pretrained(...) instead.\"\n \" Otherwise, please make sure to pass a configuration dictionary instead. This functionality will\"\n \" be removed in v1.0.0.\"\n )\n elif \"Model\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a model, please use {cls}.load_config(...) followed by\"\n f\" {cls}.from_config(...) instead. Otherwise, please make sure to pass a configuration dictionary\"\n \" instead. This functionality will be removed in v1.0.0.\"\n )\n deprecate(\"config-passed-as-path\", \"1.0.0\", deprecation_message, standard_warn=False)\n config, kwargs = cls.load_config(pretrained_model_name_or_path=config, return_unused_kwargs=True, **kwargs)\n\n init_dict, unused_kwargs, hidden_dict = cls.extract_init_dict(config, **kwargs)\n\n # Allow dtype to be specified on initialization\n if \"dtype\" in unused_kwargs:\n init_dict[\"dtype\"] = unused_kwargs.pop(\"dtype\")\n\n # add possible deprecated kwargs\n for deprecated_kwarg in cls._deprecated_kwargs:\n if deprecated_kwarg in unused_kwargs:\n init_dict[deprecated_kwarg] = unused_kwargs.pop(deprecated_kwarg)\n\n # Return model and optionally state and/or unused_kwargs\n model = cls(**init_dict)\n\n # make sure to also save config parameters that might be used for compatible classes\n model.register_to_config(**hidden_dict)\n\n # add hidden kwargs of compatible classes to unused_kwargs\n unused_kwargs = {**unused_kwargs, **hidden_dict}\n\n if return_unused_kwargs:\n return (model, unused_kwargs)\n else:\n return model\n\n @classmethod\n def get_config_dict(cls, *args, **kwargs):\n deprecation_message = (\n f\" The function get_config_dict is deprecated. Please use {cls}.load_config instead. This function will be\"\n \" removed in version v1.0.0\"\n )\n deprecate(\"get_config_dict\", \"1.0.0\", deprecation_message, standard_warn=False)\n return cls.load_config(*args, **kwargs)\n\n @classmethod\n def load_config(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n return_unused_kwargs=False,\n return_commit_hash=False,\n **kwargs,\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n r\"\"\"\n Instantiate a Python class from a config dictionary\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* of a model repo on huggingface.co. Valid model ids should have an\n organization name, like `google/ddpm-celebahq-256`.\n - A path to a *directory* containing model weights saved using [`~ConfigMixin.save_config`], e.g.,\n `./my_model_directory/`.\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory in which a downloaded pretrained model configuration should be cached if the\n standard cache should not be used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to delete incompletely received files. Will attempt to resume the download if such a\n file exists.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether or not to only look at local files (i.e., do not try to download the model).\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated\n when running `transformers-cli login` (stored in `~/.huggingface`).\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a\n git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any\n identifier allowed by git.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n In case the relevant files are located inside a subfolder of the model repo (either remote in\n huggingface.co or downloaded locally), you can specify the folder name here.\n return_unused_kwargs (`bool`, *optional*, defaults to `False):\n Whether unused keyword arguments of the config shall be returned.\n return_commit_hash (`bool`, *optional*, defaults to `False):\n Whether the commit_hash of the loaded configuration shall be returned.\n\n <Tip>\n\n It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated\n models](https://huggingface.co/docs/hub/models-gated#gated-models).\n\n </Tip>\n\n <Tip>\n\n Activate the special [\"offline-mode\"](https://huggingface.co/transformers/installation.html#offline-mode) to\n use this method in a firewalled environment.\n\n </Tip>\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n revision = kwargs.pop(\"revision\", None)\n _ = kwargs.pop(\"mirror\", None)\n subfolder = kwargs.pop(\"subfolder\", None)\n user_agent = kwargs.pop(\"user_agent\", {})\n\n user_agent = {**user_agent, \"file_type\": \"config\"}\n user_agent = http_user_agent(user_agent)\n\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n\n if cls.config_name is None:\n raise ValueError(\n \"`self.config_name` is not defined. Note that one should not load a config from \"\n \"`ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`\"\n )\n\n if os.path.isfile(pretrained_model_name_or_path):\n config_file = pretrained_model_name_or_path\n elif os.path.isdir(pretrained_model_name_or_path):\n if os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)):\n # Load from a PyTorch checkpoint\n config_file = os.path.join(pretrained_model_name_or_path, cls.config_name)\n elif subfolder is not None and os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n ):\n config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n else:\n raise EnvironmentError(\n f\"Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}.\"\n )\n else:\n try:\n # Load from URL or cache if already cached\n config_file = hf_hub_download(\n pretrained_model_name_or_path,\n filename=cls.config_name,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n user_agent=user_agent,\n subfolder=subfolder,\n revision=revision,\n )\n except RepositoryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier\"\n \" listed on 'https://huggingface.co/models'\\nIf this is a private repository, make sure to pass a\"\n \" token having permission to this repo with `use_auth_token` or log in with `huggingface-cli\"\n \" login`.\"\n )\n except RevisionNotFoundError:\n raise EnvironmentError(\n f\"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for\"\n \" this model name. Check the model page at\"\n f\" 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.\"\n )\n except EntryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}.\"\n )\n except HTTPError as err:\n raise EnvironmentError(\n \"There was a specific connection error when trying to load\"\n f\" {pretrained_model_name_or_path}:\\n{err}\"\n )\n except ValueError:\n raise EnvironmentError(\n f\"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it\"\n f\" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a\"\n f\" directory containing a {cls.config_name} file.\\nCheckout your internet connection or see how to\"\n \" run the library in offline mode at\"\n \" 'https://huggingface.co/docs/diffusers/installation#offline-mode'.\"\n )\n except EnvironmentError:\n raise EnvironmentError(\n f\"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from \"\n \"'https://huggingface.co/models', make sure you don't have a local directory with the same name. \"\n f\"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory \"\n f\"containing a {cls.config_name} file\"\n )\n\n try:\n # Load config dict\n config_dict = cls._dict_from_json_file(config_file)\n\n commit_hash = extract_commit_hash(config_file)\n except (json.JSONDecodeError, UnicodeDecodeError):\n raise EnvironmentError(f\"It looks like the config file at '{config_file}' is not a valid JSON file.\")\n\n if not (return_unused_kwargs or return_commit_hash):\n return config_dict\n\n outputs = (config_dict,)\n\n if return_unused_kwargs:\n outputs += (kwargs,)\n\n if return_commit_hash:\n outputs += (commit_hash,)\n\n return outputs\n\n @staticmethod\n def _get_init_keys(cls):\n return set(dict(inspect.signature(cls.__init__).parameters).keys())\n\n @classmethod\n def extract_init_dict(cls, config_dict, **kwargs):\n # 0. Copy origin config dict\n original_dict = dict(config_dict.items())\n\n # 1. Retrieve expected config attributes from __init__ signature\n expected_keys = cls._get_init_keys(cls)\n expected_keys.remove(\"self\")\n # remove general kwargs if present in dict\n if \"kwargs\" in expected_keys:\n expected_keys.remove(\"kwargs\")\n # remove flax internal keys\n if hasattr(cls, \"_flax_internal_args\"):\n for arg in cls._flax_internal_args:\n expected_keys.remove(arg)\n\n # 2. Remove attributes that cannot be expected from expected config attributes\n # remove keys to be ignored\n if len(cls.ignore_for_config) > 0:\n expected_keys = expected_keys - set(cls.ignore_for_config)\n\n # load diffusers library to import compatible and original scheduler\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n\n if cls.has_compatibles:\n compatible_classes = [c for c in cls._get_compatibles() if not isinstance(c, DummyObject)]\n else:\n compatible_classes = []\n\n expected_keys_comp_cls = set()\n for c in compatible_classes:\n expected_keys_c = cls._get_init_keys(c)\n expected_keys_comp_cls = expected_keys_comp_cls.union(expected_keys_c)\n expected_keys_comp_cls = expected_keys_comp_cls - cls._get_init_keys(cls)\n config_dict = {k: v for k, v in config_dict.items() if k not in expected_keys_comp_cls}\n\n # remove attributes from orig class that cannot be expected\n orig_cls_name = config_dict.pop(\"_class_name\", cls.__name__)\n if orig_cls_name != cls.__name__ and hasattr(diffusers_library, orig_cls_name):\n orig_cls = getattr(diffusers_library, orig_cls_name)\n unexpected_keys_from_orig = cls._get_init_keys(orig_cls) - expected_keys\n config_dict = {k: v for k, v in config_dict.items() if k not in unexpected_keys_from_orig}\n\n # remove private attributes\n config_dict = {k: v for k, v in config_dict.items() if not k.startswith(\"_\")}\n\n # 3. Create keyword arguments that will be passed to __init__ from expected keyword arguments\n init_dict = {}\n for key in expected_keys:\n # if config param is passed to kwarg and is present in config dict\n # it should overwrite existing config dict key\n if key in kwargs and key in config_dict:\n config_dict[key] = kwargs.pop(key)\n\n if key in kwargs:\n # overwrite key\n init_dict[key] = kwargs.pop(key)\n elif key in config_dict:\n # use value from config dict\n init_dict[key] = config_dict.pop(key)\n\n # 4. Give nice warning if unexpected values have been passed\n if len(config_dict) > 0:\n logger.warning(\n f\"The config attributes {config_dict} were passed to {cls.__name__}, \"\n \"but are not expected and will be ignored. Please verify your \"\n f\"{cls.config_name} configuration file.\"\n )\n\n # 5. Give nice info if config attributes are initiliazed to default because they have not been passed\n passed_keys = set(init_dict.keys())\n if len(expected_keys - passed_keys) > 0:\n logger.info(\n f\"{expected_keys - passed_keys} was not found in config. Values will be initialized to default values.\"\n )\n\n # 6. Define unused keyword arguments\n unused_kwargs = {**config_dict, **kwargs}\n\n # 7. Define \"hidden\" config parameters that were saved for compatible classes\n hidden_config_dict = {k: v for k, v in original_dict.items() if k not in init_dict}\n\n return init_dict, unused_kwargs, hidden_config_dict\n\n @classmethod\n def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return json.loads(text)\n\n def __repr__(self):\n return f\"{self.__class__.__name__} {self.to_json_string()}\"\n\n @property\n def config(self) -> Dict[str, Any]:\n \"\"\"\n Returns the config of the class as a frozen dictionary\n\n Returns:\n `Dict[str, Any]`: Config of the class.\n \"\"\"\n return self._internal_dict\n\n def to_json_string(self) -> str:\n \"\"\"\n Serializes this instance to a JSON string.\n\n Returns:\n `str`: String containing all the attributes that make up this configuration instance in JSON format.\n \"\"\"\n config_dict = self._internal_dict if hasattr(self, \"_internal_dict\") else {}\n config_dict[\"_class_name\"] = self.__class__.__name__\n config_dict[\"_diffusers_version\"] = __version__\n\n def to_json_saveable(value):\n if isinstance(value, np.ndarray):\n value = value.tolist()\n elif isinstance(value, PosixPath):\n value = str(value)\n return value\n\n config_dict = {k: to_json_saveable(v) for k, v in config_dict.items()}\n return json.dumps(config_dict, indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path: Union[str, os.PathLike]):\n \"\"\"\n Save this instance to a JSON file.\n\n Args:\n json_file_path (`str` or `os.PathLike`):\n Path to the JSON file in which this configuration instance's parameters will be saved.\n \"\"\"\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string())" }, { "identifier": "register_to_config", "path": "diffusers/src/diffusers/configuration_utils.py", "snippet": "def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n for key, value in kwargs.items():\n try:\n setattr(self, key, value)\n except AttributeError as err:\n logger.error(f\"Can't set {key} with value {value} for {self}\")\n raise err\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)" }, { "identifier": "ImagePositionalEmbeddings", "path": "diffusers/src/diffusers/models/embeddings.py", "snippet": "class ImagePositionalEmbeddings(nn.Module):\n \"\"\"\n Converts latent image classes into vector embeddings. Sums the vector embeddings with positional embeddings for the\n height and width of the latent space.\n\n For more details, see figure 10 of the dall-e paper: https://arxiv.org/abs/2102.12092\n\n For VQ-diffusion:\n\n Output vector embeddings are used as input for the transformer.\n\n Note that the vector embeddings for the transformer are different than the vector embeddings from the VQVAE.\n\n Args:\n num_embed (`int`):\n Number of embeddings for the latent pixels embeddings.\n height (`int`):\n Height of the latent image i.e. the number of height embeddings.\n width (`int`):\n Width of the latent image i.e. the number of width embeddings.\n embed_dim (`int`):\n Dimension of the produced vector embeddings. Used for the latent pixel, height, and width embeddings.\n \"\"\"\n\n def __init__(\n self,\n num_embed: int,\n height: int,\n width: int,\n embed_dim: int,\n ):\n super().__init__()\n\n self.height = height\n self.width = width\n self.num_embed = num_embed\n self.embed_dim = embed_dim\n\n self.emb = nn.Embedding(self.num_embed, embed_dim)\n self.height_emb = nn.Embedding(self.height, embed_dim)\n self.width_emb = nn.Embedding(self.width, embed_dim)\n\n def forward(self, index):\n emb = self.emb(index)\n\n height_emb = self.height_emb(torch.arange(self.height, device=index.device).view(1, self.height))\n\n # 1 x H x D -> 1 x H x 1 x D\n height_emb = height_emb.unsqueeze(2)\n\n width_emb = self.width_emb(torch.arange(self.width, device=index.device).view(1, self.width))\n\n # 1 x W x D -> 1 x 1 x W x D\n width_emb = width_emb.unsqueeze(1)\n\n pos_emb = height_emb + width_emb\n\n # 1 x H x W x D -> 1 x L xD\n pos_emb = pos_emb.view(1, self.height * self.width, -1)\n\n emb = emb + pos_emb[:, : emb.shape[1], :]\n\n return emb" }, { "identifier": "deprecate", "path": "diffusers/src/diffusers/utils/deprecation_utils.py", "snippet": "def deprecate(*args, take_from: Optional[Union[Dict, Any]] = None, standard_warn=True):\n from .. import __version__\n\n deprecated_kwargs = take_from\n values = ()\n if not isinstance(args[0], tuple):\n args = (args,)\n\n for attribute, version_name, message in args:\n if version.parse(version.parse(__version__).base_version) >= version.parse(version_name):\n raise ValueError(\n f\"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'\"\n f\" version {__version__} is >= {version_name}\"\n )\n\n warning = None\n if isinstance(deprecated_kwargs, dict) and attribute in deprecated_kwargs:\n values += (deprecated_kwargs.pop(attribute),)\n warning = f\"The `{attribute}` argument is deprecated and will be removed in version {version_name}.\"\n elif hasattr(deprecated_kwargs, attribute):\n values += (getattr(deprecated_kwargs, attribute),)\n warning = f\"The `{attribute}` attribute is deprecated and will be removed in version {version_name}.\"\n elif deprecated_kwargs is None:\n warning = f\"`{attribute}` is deprecated and will be removed in version {version_name}.\"\n\n if warning is not None:\n warning = warning + \" \" if standard_warn else \"\"\n warnings.warn(warning + message, FutureWarning, stacklevel=2)\n\n if isinstance(deprecated_kwargs, dict) and len(deprecated_kwargs) > 0:\n call_frame = inspect.getouterframes(inspect.currentframe())[1]\n filename = call_frame.filename\n line_number = call_frame.lineno\n function = call_frame.function\n key, value = next(iter(deprecated_kwargs.items()))\n raise TypeError(f\"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`\")\n\n if len(values) == 0:\n return\n elif len(values) == 1:\n return values[0]\n return values" }, { "identifier": "BaseOutput", "path": "diffusers/src/diffusers/utils/outputs.py", "snippet": "class BaseOutput(OrderedDict):\n \"\"\"\n Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a\n tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular\n python dictionary.\n\n <Tip warning={true}>\n\n You can't unpack a `BaseOutput` directly. Use the [`~utils.BaseOutput.to_tuple`] method to convert it to a tuple\n before.\n\n </Tip>\n \"\"\"\n\n def __post_init__(self):\n class_fields = fields(self)\n\n # Safety and consistency checks\n if not len(class_fields):\n raise ValueError(f\"{self.__class__.__name__} has no fields.\")\n\n first_field = getattr(self, class_fields[0].name)\n other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])\n\n if other_fields_are_none and isinstance(first_field, dict):\n for key, value in first_field.items():\n self[key] = value\n else:\n for field in class_fields:\n v = getattr(self, field.name)\n if v is not None:\n self[field.name] = v\n\n def __delitem__(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.\")\n\n def setdefault(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.\")\n\n def pop(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``pop`` on a {self.__class__.__name__} instance.\")\n\n def update(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``update`` on a {self.__class__.__name__} instance.\")\n\n def __getitem__(self, k):\n if isinstance(k, str):\n inner_dict = dict(self.items())\n return inner_dict[k]\n else:\n return self.to_tuple()[k]\n\n def __setattr__(self, name, value):\n if name in self.keys() and value is not None:\n # Don't call self.__setitem__ to avoid recursion errors\n super().__setitem__(name, value)\n super().__setattr__(name, value)\n\n def __setitem__(self, key, value):\n # Will raise a KeyException if needed\n super().__setitem__(key, value)\n # Don't call self.__setattr__ to avoid recursion errors\n super().__setattr__(key, value)\n\n def to_tuple(self) -> Tuple[Any]:\n \"\"\"\n Convert self to a tuple containing all the attributes/keys that are not `None`.\n \"\"\"\n return tuple(self[k] for k in self.keys())" }, { "identifier": "BasicTransformerBlock", "path": "diffusers/src/diffusers/models/attention.py", "snippet": "class BasicTransformerBlock(nn.Module):\n r\"\"\"\n A basic Transformer block.\n\n Parameters:\n dim (`int`): The number of channels in the input and output.\n num_attention_heads (`int`): The number of heads to use for multi-head attention.\n attention_head_dim (`int`): The number of channels in each head.\n dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.\n cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.\n only_cross_attention (`bool`, *optional*):\n Whether to use only cross-attention layers. In this case two cross attention layers are used.\n double_self_attention (`bool`, *optional*):\n Whether to use two self-attention layers. In this case no cross attention layers are used.\n activation_fn (`str`, *optional*, defaults to `\"geglu\"`): Activation function to be used in feed-forward.\n num_embeds_ada_norm (:\n obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`.\n attention_bias (:\n obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter.\n \"\"\"\n\n def __init__(\n self,\n dim: int,\n num_attention_heads: int,\n attention_head_dim: int,\n dropout=0.0,\n cross_attention_dim: Optional[int] = None,\n activation_fn: str = \"geglu\",\n num_embeds_ada_norm: Optional[int] = None,\n attention_bias: bool = False,\n only_cross_attention: bool = False,\n double_self_attention: bool = False,\n upcast_attention: bool = False,\n norm_elementwise_affine: bool = True,\n norm_type: str = \"layer_norm\",\n final_dropout: bool = False,\n ):\n super().__init__()\n self.only_cross_attention = only_cross_attention\n\n self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == \"ada_norm_zero\"\n self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == \"ada_norm\"\n\n if norm_type in (\"ada_norm\", \"ada_norm_zero\") and num_embeds_ada_norm is None:\n raise ValueError(\n f\"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to\"\n f\" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.\"\n )\n\n # 1. Self-Attn\n self.attn1 = Attention(\n query_dim=dim,\n heads=num_attention_heads,\n dim_head=attention_head_dim,\n dropout=dropout,\n bias=attention_bias,\n cross_attention_dim=cross_attention_dim if only_cross_attention else None,\n upcast_attention=upcast_attention,\n )\n\n self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout)\n\n # 2. Cross-Attn\n if cross_attention_dim is not None or double_self_attention:\n self.attn2 = Attention(\n query_dim=dim,\n cross_attention_dim=cross_attention_dim if not double_self_attention else None,\n heads=num_attention_heads,\n dim_head=attention_head_dim,\n dropout=dropout,\n bias=attention_bias,\n upcast_attention=upcast_attention,\n ) # is self-attn if encoder_hidden_states is none\n else:\n self.attn2 = None\n\n if self.use_ada_layer_norm:\n self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)\n elif self.use_ada_layer_norm_zero:\n self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm)\n else:\n self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)\n\n if cross_attention_dim is not None or double_self_attention:\n # We currently only use AdaLayerNormZero for self attention where there will only be one attention block.\n # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during\n # the second cross attention block.\n self.norm2 = (\n AdaLayerNorm(dim, num_embeds_ada_norm)\n if self.use_ada_layer_norm\n else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)\n )\n else:\n self.norm2 = None\n\n # 3. Feed-forward\n self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n timestep: Optional[torch.LongTensor] = None,\n cross_attention_kwargs: Dict[str, Any] = None,\n class_labels: Optional[torch.LongTensor] = None,\n ):\n if self.use_ada_layer_norm:\n norm_hidden_states = self.norm1(hidden_states, timestep)\n elif self.use_ada_layer_norm_zero:\n norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(\n hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype\n )\n else:\n norm_hidden_states = self.norm1(hidden_states)\n\n # 1. Self-Attention\n cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n if self.use_ada_layer_norm_zero:\n attn_output = gate_msa.unsqueeze(1) * attn_output\n hidden_states = attn_output + hidden_states\n\n if self.attn2 is not None:\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n\n # 2. Cross-Attention\n attn_output = self.attn2(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n **cross_attention_kwargs,\n )\n # print (f\"attn output {attn_output.shape}, hidden states {hidden_states.shape}\")\n hidden_states = attn_output + hidden_states\n\n # 3. Feed-forward\n norm_hidden_states = self.norm3(hidden_states)\n\n if self.use_ada_layer_norm_zero:\n norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]\n\n ff_output = self.ff(norm_hidden_states)\n\n if self.use_ada_layer_norm_zero:\n ff_output = gate_mlp.unsqueeze(1) * ff_output\n\n hidden_states = ff_output + hidden_states\n\n return hidden_states" }, { "identifier": "PatchEmbed", "path": "diffusers/src/diffusers/models/embeddings.py", "snippet": "class PatchEmbed(nn.Module):\n \"\"\"2D Image to Patch Embedding\"\"\"\n\n def __init__(\n self,\n height=224,\n width=224,\n patch_size=16,\n in_channels=3,\n embed_dim=768,\n layer_norm=False,\n flatten=True,\n bias=True,\n ):\n super().__init__()\n\n num_patches = (height // patch_size) * (width // patch_size)\n self.flatten = flatten\n self.layer_norm = layer_norm\n\n self.proj = nn.Conv2d(\n in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias\n )\n if layer_norm:\n self.norm = nn.LayerNorm(embed_dim, elementwise_affine=False, eps=1e-6)\n else:\n self.norm = None\n\n pos_embed = get_2d_sincos_pos_embed(embed_dim, int(num_patches**0.5))\n self.register_buffer(\"pos_embed\", torch.from_numpy(pos_embed).float().unsqueeze(0), persistent=False)\n\n def forward(self, latent):\n latent = self.proj(latent)\n if self.flatten:\n latent = latent.flatten(2).transpose(1, 2) # BCHW -> BNC\n if self.layer_norm:\n latent = self.norm(latent)\n return latent + self.pos_embed" }, { "identifier": "ModelMixin", "path": "diffusers/src/diffusers/models/modeling_utils.py", "snippet": "class ModelMixin(torch.nn.Module):\n r\"\"\"\n Base class for all models.\n\n [`ModelMixin`] takes care of storing the configuration of the models and handles methods for loading, downloading\n and saving models.\n\n - **config_name** ([`str`]) -- A filename under which the model should be stored when calling\n [`~models.ModelMixin.save_pretrained`].\n \"\"\"\n config_name = CONFIG_NAME\n _automatically_saved_args = [\"_diffusers_version\", \"_class_name\", \"_name_or_path\"]\n _supports_gradient_checkpointing = False\n\n def __init__(self):\n super().__init__()\n\n @property\n def is_gradient_checkpointing(self) -> bool:\n \"\"\"\n Whether gradient checkpointing is activated for this model or not.\n\n Note that in other frameworks this feature can be referred to as \"activation checkpointing\" or \"checkpoint\n activations\".\n \"\"\"\n return any(hasattr(m, \"gradient_checkpointing\") and m.gradient_checkpointing for m in self.modules())\n\n def enable_gradient_checkpointing(self):\n \"\"\"\n Activates gradient checkpointing for the current model.\n\n Note that in other frameworks this feature can be referred to as \"activation checkpointing\" or \"checkpoint\n activations\".\n \"\"\"\n if not self._supports_gradient_checkpointing:\n raise ValueError(f\"{self.__class__.__name__} does not support gradient checkpointing.\")\n self.apply(partial(self._set_gradient_checkpointing, value=True))\n\n def disable_gradient_checkpointing(self):\n \"\"\"\n Deactivates gradient checkpointing for the current model.\n\n Note that in other frameworks this feature can be referred to as \"activation checkpointing\" or \"checkpoint\n activations\".\n \"\"\"\n if self._supports_gradient_checkpointing:\n self.apply(partial(self._set_gradient_checkpointing, value=False))\n\n def set_use_memory_efficient_attention_xformers(\n self, valid: bool, attention_op: Optional[Callable] = None\n ) -> None:\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid, attention_op)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n for module in self.children():\n if isinstance(module, torch.nn.Module):\n fn_recursive_set_mem_eff(module)\n\n def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None):\n r\"\"\"\n Enable memory efficient attention as implemented in xformers.\n\n When this option is enabled, you should observe lower GPU memory usage and a potential speed up at inference\n time. Speed up at training time is not guaranteed.\n\n Warning: When Memory Efficient Attention and Sliced attention are both enabled, the Memory Efficient Attention\n is used.\n\n Parameters:\n attention_op (`Callable`, *optional*):\n Override the default `None` operator for use as `op` argument to the\n [`memory_efficient_attention()`](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention)\n function of xFormers.\n\n Examples:\n\n ```py\n >>> import torch\n >>> from diffusers import UNet2DConditionModel\n >>> from xformers.ops import MemoryEfficientAttentionFlashAttentionOp\n\n >>> model = UNet2DConditionModel.from_pretrained(\n ... \"stabilityai/stable-diffusion-2-1\", subfolder=\"unet\", torch_dtype=torch.float16\n ... )\n >>> model = model.to(\"cuda\")\n >>> model.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp)\n ```\n \"\"\"\n self.set_use_memory_efficient_attention_xformers(True, attention_op)\n\n def disable_xformers_memory_efficient_attention(self):\n r\"\"\"\n Disable memory efficient attention as implemented in xformers.\n \"\"\"\n self.set_use_memory_efficient_attention_xformers(False)\n\n def save_pretrained(\n self,\n save_directory: Union[str, os.PathLike],\n is_main_process: bool = True,\n save_function: Callable = None,\n safe_serialization: bool = False,\n variant: Optional[str] = None,\n ):\n \"\"\"\n Save a model and its configuration file to a directory, so that it can be re-loaded using the\n `[`~models.ModelMixin.from_pretrained`]` class method.\n\n Arguments:\n save_directory (`str` or `os.PathLike`):\n Directory to which to save. Will be created if it doesn't exist.\n is_main_process (`bool`, *optional*, defaults to `True`):\n Whether the process calling this is the main process or not. Useful when in distributed training like\n TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on\n the main process to avoid race conditions.\n save_function (`Callable`):\n The function to use to save the state dictionary. Useful on distributed training like TPUs when one\n need to replace `torch.save` by another method. Can be configured with the environment variable\n `DIFFUSERS_SAVE_MODE`.\n safe_serialization (`bool`, *optional*, defaults to `False`):\n Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).\n variant (`str`, *optional*):\n If specified, weights are saved in the format pytorch_model.<variant>.bin.\n \"\"\"\n if safe_serialization and not is_safetensors_available():\n raise ImportError(\"`safe_serialization` requires the `safetensors library: `pip install safetensors`.\")\n\n if os.path.isfile(save_directory):\n logger.error(f\"Provided path ({save_directory}) should be a directory, not a file\")\n return\n\n os.makedirs(save_directory, exist_ok=True)\n\n model_to_save = self\n\n # Attach architecture to the config\n # Save the config\n if is_main_process:\n model_to_save.save_config(save_directory)\n\n # Save the model\n state_dict = model_to_save.state_dict()\n\n weights_name = SAFETENSORS_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME\n weights_name = _add_variant(weights_name, variant)\n\n # Save the model\n if safe_serialization:\n safetensors.torch.save_file(\n state_dict, os.path.join(save_directory, weights_name), metadata={\"format\": \"pt\"}\n )\n else:\n torch.save(state_dict, os.path.join(save_directory, weights_name))\n\n logger.info(f\"Model weights saved in {os.path.join(save_directory, weights_name)}\")\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):\n r\"\"\"\n Instantiate a pretrained pytorch model from a pre-trained model configuration.\n\n The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train\n the model, you should first set it back in training mode with `model.train()`.\n\n The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come\n pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning\n task.\n\n The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those\n weights are discarded.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.\n Valid model ids should have an organization name, like `google/ddpm-celebahq-256`.\n - A path to a *directory* containing model weights saved using [`~ModelMixin.save_config`], e.g.,\n `./my_model_directory/`.\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory in which a downloaded pretrained model configuration should be cached if the\n standard cache should not be used.\n torch_dtype (`str` or `torch.dtype`, *optional*):\n Override the default `torch.dtype` and load the model under this dtype. If `\"auto\"` is passed the dtype\n will be automatically derived from the model's weights.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to delete incompletely received files. Will attempt to resume the download if such a\n file exists.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether or not to only look at local files (i.e., do not try to download the model).\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated\n when running `diffusers-cli login` (stored in `~/.huggingface`).\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a\n git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any\n identifier allowed by git.\n from_flax (`bool`, *optional*, defaults to `False`):\n Load the model weights from a Flax checkpoint save file.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n In case the relevant files are located inside a subfolder of the model repo (either remote in\n huggingface.co or downloaded locally), you can specify the folder name here.\n\n mirror (`str`, *optional*):\n Mirror source to accelerate downloads in China. If you are from China and have an accessibility\n problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.\n Please refer to the mirror site for more information.\n device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):\n A map that specifies where each submodule should go. It doesn't need to be refined to each\n parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the\n same device.\n\n To have Accelerate compute the most optimized `device_map` automatically, set `device_map=\"auto\"`. For\n more information about each option see [designing a device\n map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).\n low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):\n Speed up model loading by not initializing the weights and only loading the pre-trained weights. This\n also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the\n model. This is only supported when torch version >= 1.9.0. If you are using an older version of torch,\n setting this argument to `True` will raise an error.\n variant (`str`, *optional*):\n If specified load weights from `variant` filename, *e.g.* pytorch_model.<variant>.bin. `variant` is\n ignored when using `from_flax`.\n use_safetensors (`bool`, *optional* ):\n If set to `True`, the pipeline will forcibly load the models from `safetensors` weights. If set to\n `None` (the default). The pipeline will load using `safetensors` if safetensors weights are available\n *and* if `safetensors` is installed. If the to `False` the pipeline will *not* use `safetensors`.\n\n <Tip>\n\n It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated\n models](https://huggingface.co/docs/hub/models-gated#gated-models).\n\n </Tip>\n\n <Tip>\n\n Activate the special [\"offline-mode\"](https://huggingface.co/diffusers/installation.html#offline-mode) to use\n this method in a firewalled environment.\n\n </Tip>\n\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n ignore_mismatched_sizes = kwargs.pop(\"ignore_mismatched_sizes\", False)\n force_download = kwargs.pop(\"force_download\", False)\n from_flax = kwargs.pop(\"from_flax\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n output_loading_info = kwargs.pop(\"output_loading_info\", False)\n local_files_only = kwargs.pop(\"local_files_only\", HF_HUB_OFFLINE)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n revision = kwargs.pop(\"revision\", None)\n torch_dtype = kwargs.pop(\"torch_dtype\", None)\n subfolder = kwargs.pop(\"subfolder\", None)\n device_map = kwargs.pop(\"device_map\", None)\n low_cpu_mem_usage = kwargs.pop(\"low_cpu_mem_usage\", _LOW_CPU_MEM_USAGE_DEFAULT)\n variant = kwargs.pop(\"variant\", None)\n use_safetensors = kwargs.pop(\"use_safetensors\", None)\n\n if use_safetensors and not is_safetensors_available():\n raise ValueError(\n \"`use_safetensors`=True but safetensors is not installed. Please install safetensors with `pip install safetenstors\"\n )\n\n allow_pickle = False\n if use_safetensors is None:\n use_safetensors = is_safetensors_available()\n allow_pickle = True\n\n if low_cpu_mem_usage and not is_accelerate_available():\n low_cpu_mem_usage = False\n logger.warning(\n \"Cannot initialize model with low cpu memory usage because `accelerate` was not found in the\"\n \" environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install\"\n \" `accelerate` for faster and less memory-intense model loading. You can do so with: \\n```\\npip\"\n \" install accelerate\\n```\\n.\"\n )\n\n if device_map is not None and not is_accelerate_available():\n raise NotImplementedError(\n \"Loading and dispatching requires `accelerate`. Please make sure to install accelerate or set\"\n \" `device_map=None`. You can install accelerate with `pip install accelerate`.\"\n )\n\n # Check if we can handle device_map and dispatching the weights\n if device_map is not None and not is_torch_version(\">=\", \"1.9.0\"):\n raise NotImplementedError(\n \"Loading and dispatching requires torch >= 1.9.0. Please either update your PyTorch version or set\"\n \" `device_map=None`.\"\n )\n\n if low_cpu_mem_usage is True and not is_torch_version(\">=\", \"1.9.0\"):\n raise NotImplementedError(\n \"Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set\"\n \" `low_cpu_mem_usage=False`.\"\n )\n\n if low_cpu_mem_usage is False and device_map is not None:\n raise ValueError(\n f\"You cannot set `low_cpu_mem_usage` to `False` while using device_map={device_map} for loading and\"\n \" dispatching. Please make sure to set `low_cpu_mem_usage=True`.\"\n )\n\n # Load config if we don't provide a configuration\n config_path = pretrained_model_name_or_path\n\n user_agent = {\n \"diffusers\": __version__,\n \"file_type\": \"model\",\n \"framework\": \"pytorch\",\n }\n\n # load config\n config, unused_kwargs, commit_hash = cls.load_config(\n config_path,\n cache_dir=cache_dir,\n return_unused_kwargs=True,\n return_commit_hash=True,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n subfolder=subfolder,\n device_map=device_map,\n user_agent=user_agent,\n **kwargs,\n )\n\n # load model\n model_file = None\n if from_flax:\n model_file = _get_model_file(\n pretrained_model_name_or_path,\n weights_name=FLAX_WEIGHTS_NAME,\n cache_dir=cache_dir,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n subfolder=subfolder,\n user_agent=user_agent,\n commit_hash=commit_hash,\n )\n model = cls.from_config(config, **unused_kwargs)\n\n # Convert the weights\n from .modeling_pytorch_flax_utils import load_flax_checkpoint_in_pytorch_model\n\n model = load_flax_checkpoint_in_pytorch_model(model, model_file)\n else:\n if use_safetensors:\n try:\n model_file = _get_model_file(\n pretrained_model_name_or_path,\n weights_name=_add_variant(SAFETENSORS_WEIGHTS_NAME, variant),\n cache_dir=cache_dir,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n subfolder=subfolder,\n user_agent=user_agent,\n commit_hash=commit_hash,\n )\n except IOError as e:\n if not allow_pickle:\n raise e\n pass\n if model_file is None:\n model_file = _get_model_file(\n pretrained_model_name_or_path,\n weights_name=_add_variant(WEIGHTS_NAME, variant),\n cache_dir=cache_dir,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n subfolder=subfolder,\n user_agent=user_agent,\n commit_hash=commit_hash,\n )\n\n if low_cpu_mem_usage:\n # Instantiate model with empty weights\n with accelerate.init_empty_weights():\n model = cls.from_config(config, **unused_kwargs)\n\n # if device_map is None, load the state dict and move the params from meta device to the cpu\n if device_map is None:\n param_device = \"cpu\"\n state_dict = load_state_dict(model_file, variant=variant)\n # move the params from meta device to cpu\n missing_keys = set(model.state_dict().keys()) - set(state_dict.keys())\n if len(missing_keys) > 0:\n raise ValueError(\n f\"Cannot load {cls} from {pretrained_model_name_or_path} because the following keys are\"\n f\" missing: \\n {', '.join(missing_keys)}. \\n Please make sure to pass\"\n \" `low_cpu_mem_usage=False` and `device_map=None` if you want to randomly initialize\"\n \" those weights or else make sure your checkpoint file is correct.\"\n )\n\n empty_state_dict = model.state_dict()\n for param_name, param in state_dict.items():\n accepts_dtype = \"dtype\" in set(\n inspect.signature(set_module_tensor_to_device).parameters.keys()\n )\n\n if empty_state_dict[param_name].shape != param.shape:\n raise ValueError(\n f\"Cannot load {pretrained_model_name_or_path} because {param_name} expected shape {empty_state_dict[param_name]}, but got {param.shape}. If you want to instead overwrite randomly initialized weights, please make sure to pass both `low_cpu_mem_usage=False` and `ignore_mismatched_sizes=True`. For more information, see also: https://github.com/huggingface/diffusers/issues/1619#issuecomment-1345604389 as an example.\"\n )\n\n if accepts_dtype:\n set_module_tensor_to_device(\n model, param_name, param_device, value=param, dtype=torch_dtype\n )\n else:\n set_module_tensor_to_device(model, param_name, param_device, value=param)\n else: # else let accelerate handle loading and dispatching.\n # Load weights and dispatch according to the device_map\n # by default the device_map is None and the weights are loaded on the CPU\n accelerate.load_checkpoint_and_dispatch(model, model_file, device_map, dtype=torch_dtype)\n\n loading_info = {\n \"missing_keys\": [],\n \"unexpected_keys\": [],\n \"mismatched_keys\": [],\n \"error_msgs\": [],\n }\n else:\n model = cls.from_config(config, **unused_kwargs)\n\n state_dict = load_state_dict(model_file, variant=variant)\n\n model, missing_keys, unexpected_keys, mismatched_keys, error_msgs = cls._load_pretrained_model(\n model,\n state_dict,\n model_file,\n pretrained_model_name_or_path,\n ignore_mismatched_sizes=ignore_mismatched_sizes,\n )\n\n loading_info = {\n \"missing_keys\": missing_keys,\n \"unexpected_keys\": unexpected_keys,\n \"mismatched_keys\": mismatched_keys,\n \"error_msgs\": error_msgs,\n }\n\n if torch_dtype is not None and not isinstance(torch_dtype, torch.dtype):\n raise ValueError(\n f\"{torch_dtype} needs to be of type `torch.dtype`, e.g. `torch.float16`, but is {type(torch_dtype)}.\"\n )\n elif torch_dtype is not None:\n model = model.to(torch_dtype)\n\n model.register_to_config(_name_or_path=pretrained_model_name_or_path)\n\n # Set model in evaluation mode to deactivate DropOut modules by default\n model.eval()\n if output_loading_info:\n return model, loading_info\n\n return model\n\n @classmethod\n def _load_pretrained_model(\n cls,\n model,\n state_dict,\n resolved_archive_file,\n pretrained_model_name_or_path,\n ignore_mismatched_sizes=False,\n ):\n # Retrieve missing & unexpected_keys\n model_state_dict = model.state_dict()\n loaded_keys = list(state_dict.keys())\n\n expected_keys = list(model_state_dict.keys())\n\n original_loaded_keys = loaded_keys\n\n missing_keys = list(set(expected_keys) - set(loaded_keys))\n unexpected_keys = list(set(loaded_keys) - set(expected_keys))\n\n # Make sure we are able to load base models as well as derived models (with heads)\n model_to_load = model\n\n def _find_mismatched_keys(\n state_dict,\n model_state_dict,\n loaded_keys,\n ignore_mismatched_sizes,\n ):\n mismatched_keys = []\n if ignore_mismatched_sizes:\n for checkpoint_key in loaded_keys:\n model_key = checkpoint_key\n\n if (\n model_key in model_state_dict\n and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape\n ):\n mismatched_keys.append(\n (checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape)\n )\n del state_dict[checkpoint_key]\n return mismatched_keys\n\n if state_dict is not None:\n # Whole checkpoint\n mismatched_keys = _find_mismatched_keys(\n state_dict,\n model_state_dict,\n original_loaded_keys,\n ignore_mismatched_sizes,\n )\n error_msgs = _load_state_dict_into_model(model_to_load, state_dict)\n\n if len(error_msgs) > 0:\n error_msg = \"\\n\\t\".join(error_msgs)\n if \"size mismatch\" in error_msg:\n error_msg += (\n \"\\n\\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method.\"\n )\n raise RuntimeError(f\"Error(s) in loading state_dict for {model.__class__.__name__}:\\n\\t{error_msg}\")\n\n if len(unexpected_keys) > 0:\n logger.warning(\n f\"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when\"\n f\" initializing {model.__class__.__name__}: {unexpected_keys}\\n- This IS expected if you are\"\n f\" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task\"\n \" or with another architecture (e.g. initializing a BertForSequenceClassification model from a\"\n \" BertForPreTraining model).\\n- This IS NOT expected if you are initializing\"\n f\" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly\"\n \" identical (initializing a BertForSequenceClassification model from a\"\n \" BertForSequenceClassification model).\"\n )\n else:\n logger.info(f\"All model checkpoint weights were used when initializing {model.__class__.__name__}.\\n\")\n if len(missing_keys) > 0:\n logger.warning(\n f\"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at\"\n f\" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\\nYou should probably\"\n \" TRAIN this model on a down-stream task to be able to use it for predictions and inference.\"\n )\n elif len(mismatched_keys) == 0:\n logger.info(\n f\"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at\"\n f\" {pretrained_model_name_or_path}.\\nIf your task is similar to the task the model of the\"\n f\" checkpoint was trained on, you can already use {model.__class__.__name__} for predictions\"\n \" without further training.\"\n )\n if len(mismatched_keys) > 0:\n mismatched_warning = \"\\n\".join(\n [\n f\"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated\"\n for key, shape1, shape2 in mismatched_keys\n ]\n )\n logger.warning(\n f\"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at\"\n f\" {pretrained_model_name_or_path} and are newly initialized because the shapes did not\"\n f\" match:\\n{mismatched_warning}\\nYou should probably TRAIN this model on a down-stream task to be\"\n \" able to use it for predictions and inference.\"\n )\n\n return model, missing_keys, unexpected_keys, mismatched_keys, error_msgs\n\n @property\n def device(self) -> device:\n \"\"\"\n `torch.device`: The device on which the module is (assuming that all the module parameters are on the same\n device).\n \"\"\"\n return get_parameter_device(self)\n\n @property\n def dtype(self) -> torch.dtype:\n \"\"\"\n `torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).\n \"\"\"\n return get_parameter_dtype(self)\n\n def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int:\n \"\"\"\n Get number of (optionally, trainable or non-embeddings) parameters in the module.\n\n Args:\n only_trainable (`bool`, *optional*, defaults to `False`):\n Whether or not to return only the number of trainable parameters\n\n exclude_embeddings (`bool`, *optional*, defaults to `False`):\n Whether or not to return only the number of non-embeddings parameters\n\n Returns:\n `int`: The number of parameters.\n \"\"\"\n\n if exclude_embeddings:\n embedding_param_names = [\n f\"{name}.weight\"\n for name, module_type in self.named_modules()\n if isinstance(module_type, torch.nn.Embedding)\n ]\n non_embedding_parameters = [\n parameter for name, parameter in self.named_parameters() if name not in embedding_param_names\n ]\n return sum(p.numel() for p in non_embedding_parameters if p.requires_grad or not only_trainable)\n else:\n return sum(p.numel() for p in self.parameters() if p.requires_grad or not only_trainable)" } ]
from dataclasses import dataclass from typing import Any, Dict, Optional from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..models.embeddings import ImagePositionalEmbeddings from ..utils import BaseOutput, deprecate from .attention import BasicTransformerBlock from .embeddings import PatchEmbed from .modeling_utils import ModelMixin import torch import torch.nn.functional as F
18,093
class Transformer2DModel(ModelMixin, ConfigMixin): """ Transformer model for image-like data. Takes either discrete (classes of vector embeddings) or continuous (actual embeddings) inputs. When input is continuous: First, project the input (aka embedding) and reshape to b, t, d. Then apply standard transformer action. Finally, reshape to image. When input is discrete: First, input (classes of latent pixels) is converted to embeddings and has positional embeddings applied, see `ImagePositionalEmbeddings`. Then apply standard transformer action. Finally, predict classes of unnoised image. Note that it is assumed one of the input classes is the masked latent pixel. The predicted classes of the unnoised image do not contain a prediction for the masked pixel as the unnoised image cannot be masked. Parameters: num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): Pass if the input is continuous. The number of channels in the input and output. num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use. sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images. Note that this is fixed at training time as it is used for learning a number of position embeddings. See `ImagePositionalEmbeddings`. num_vector_embeds (`int`, *optional*): Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels. Includes the class for the masked latent pixel. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`. The number of diffusion steps used during training. Note that this is fixed at training time as it is used to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for up to but not more than steps than `num_embeds_ada_norm`. attention_bias (`bool`, *optional*): Configure if the TransformerBlocks' attention should contain a bias parameter. """ @register_to_config def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: Optional[int] = None, out_channels: Optional[int] = None, num_layers: int = 1, dropout: float = 0.0, norm_num_groups: int = 32, cross_attention_dim: Optional[int] = None, attention_bias: bool = False, sample_size: Optional[int] = None, num_vector_embeds: Optional[int] = None, patch_size: Optional[int] = None, activation_fn: str = "geglu", num_embeds_ada_norm: Optional[int] = None, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, norm_type: str = "layer_norm", norm_elementwise_affine: bool = True, ): super().__init__() self.use_linear_projection = use_linear_projection self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim inner_dim = num_attention_heads * attention_head_dim # 1. Transformer2DModel can process both standard continuous images of shape `(batch_size, num_channels, width, height)` as well as quantized image embeddings of shape `(batch_size, num_image_vectors)` # Define whether input is continuous or discrete depending on configuration self.is_input_continuous = (in_channels is not None) and (patch_size is None) self.is_input_vectorized = num_vector_embeds is not None self.is_input_patches = in_channels is not None and patch_size is not None if norm_type == "layer_norm" and num_embeds_ada_norm is not None: deprecation_message = ( f"The configuration file of this model: {self.__class__} is outdated. `norm_type` is either not set or" " incorrectly set to `'layer_norm'`.Make sure to set `norm_type` to `'ada_norm'` in the config." " Please make sure to update the config accordingly as leaving `norm_type` might led to incorrect" " results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it" " would be very nice if you could open a Pull request for the `transformer/config.json` file" ) deprecate("norm_type!=num_embeds_ada_norm", "1.0.0", deprecation_message, standard_warn=False) norm_type = "ada_norm" if self.is_input_continuous and self.is_input_vectorized: raise ValueError( f"Cannot define both `in_channels`: {in_channels} and `num_vector_embeds`: {num_vector_embeds}. Make" " sure that either `in_channels` or `num_vector_embeds` is None." ) elif self.is_input_vectorized and self.is_input_patches: raise ValueError( f"Cannot define both `num_vector_embeds`: {num_vector_embeds} and `patch_size`: {patch_size}. Make" " sure that either `num_vector_embeds` or `num_patches` is None." ) elif not self.is_input_continuous and not self.is_input_vectorized and not self.is_input_patches: raise ValueError( f"Has to define `in_channels`: {in_channels}, `num_vector_embeds`: {num_vector_embeds}, or patch_size:" f" {patch_size}. Make sure that `in_channels`, `num_vector_embeds` or `num_patches` is not None." ) # 2. Define input layers if self.is_input_continuous: self.in_channels = in_channels self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True) if use_linear_projection: self.proj_in = nn.Linear(in_channels, inner_dim) else: self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0) elif self.is_input_vectorized: assert sample_size is not None, "Transformer2DModel over discrete input must provide sample_size" assert num_vector_embeds is not None, "Transformer2DModel over discrete input must provide num_embed" self.height = sample_size self.width = sample_size self.num_vector_embeds = num_vector_embeds self.num_latent_pixels = self.height * self.width
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @dataclass class Transformer2DModelOutput(BaseOutput): """ Args: sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `(batch size, num_vector_embeds - 1, num_latent_pixels)` if [`Transformer2DModel`] is discrete): Hidden states conditioned on `encoder_hidden_states` input. If discrete, returns probability distributions for the unnoised latent pixels. """ sample: torch.FloatTensor class Transformer2DModel(ModelMixin, ConfigMixin): """ Transformer model for image-like data. Takes either discrete (classes of vector embeddings) or continuous (actual embeddings) inputs. When input is continuous: First, project the input (aka embedding) and reshape to b, t, d. Then apply standard transformer action. Finally, reshape to image. When input is discrete: First, input (classes of latent pixels) is converted to embeddings and has positional embeddings applied, see `ImagePositionalEmbeddings`. Then apply standard transformer action. Finally, predict classes of unnoised image. Note that it is assumed one of the input classes is the masked latent pixel. The predicted classes of the unnoised image do not contain a prediction for the masked pixel as the unnoised image cannot be masked. Parameters: num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): Pass if the input is continuous. The number of channels in the input and output. num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use. sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images. Note that this is fixed at training time as it is used for learning a number of position embeddings. See `ImagePositionalEmbeddings`. num_vector_embeds (`int`, *optional*): Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels. Includes the class for the masked latent pixel. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`. The number of diffusion steps used during training. Note that this is fixed at training time as it is used to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for up to but not more than steps than `num_embeds_ada_norm`. attention_bias (`bool`, *optional*): Configure if the TransformerBlocks' attention should contain a bias parameter. """ @register_to_config def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: Optional[int] = None, out_channels: Optional[int] = None, num_layers: int = 1, dropout: float = 0.0, norm_num_groups: int = 32, cross_attention_dim: Optional[int] = None, attention_bias: bool = False, sample_size: Optional[int] = None, num_vector_embeds: Optional[int] = None, patch_size: Optional[int] = None, activation_fn: str = "geglu", num_embeds_ada_norm: Optional[int] = None, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, norm_type: str = "layer_norm", norm_elementwise_affine: bool = True, ): super().__init__() self.use_linear_projection = use_linear_projection self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim inner_dim = num_attention_heads * attention_head_dim # 1. Transformer2DModel can process both standard continuous images of shape `(batch_size, num_channels, width, height)` as well as quantized image embeddings of shape `(batch_size, num_image_vectors)` # Define whether input is continuous or discrete depending on configuration self.is_input_continuous = (in_channels is not None) and (patch_size is None) self.is_input_vectorized = num_vector_embeds is not None self.is_input_patches = in_channels is not None and patch_size is not None if norm_type == "layer_norm" and num_embeds_ada_norm is not None: deprecation_message = ( f"The configuration file of this model: {self.__class__} is outdated. `norm_type` is either not set or" " incorrectly set to `'layer_norm'`.Make sure to set `norm_type` to `'ada_norm'` in the config." " Please make sure to update the config accordingly as leaving `norm_type` might led to incorrect" " results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it" " would be very nice if you could open a Pull request for the `transformer/config.json` file" ) deprecate("norm_type!=num_embeds_ada_norm", "1.0.0", deprecation_message, standard_warn=False) norm_type = "ada_norm" if self.is_input_continuous and self.is_input_vectorized: raise ValueError( f"Cannot define both `in_channels`: {in_channels} and `num_vector_embeds`: {num_vector_embeds}. Make" " sure that either `in_channels` or `num_vector_embeds` is None." ) elif self.is_input_vectorized and self.is_input_patches: raise ValueError( f"Cannot define both `num_vector_embeds`: {num_vector_embeds} and `patch_size`: {patch_size}. Make" " sure that either `num_vector_embeds` or `num_patches` is None." ) elif not self.is_input_continuous and not self.is_input_vectorized and not self.is_input_patches: raise ValueError( f"Has to define `in_channels`: {in_channels}, `num_vector_embeds`: {num_vector_embeds}, or patch_size:" f" {patch_size}. Make sure that `in_channels`, `num_vector_embeds` or `num_patches` is not None." ) # 2. Define input layers if self.is_input_continuous: self.in_channels = in_channels self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True) if use_linear_projection: self.proj_in = nn.Linear(in_channels, inner_dim) else: self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0) elif self.is_input_vectorized: assert sample_size is not None, "Transformer2DModel over discrete input must provide sample_size" assert num_vector_embeds is not None, "Transformer2DModel over discrete input must provide num_embed" self.height = sample_size self.width = sample_size self.num_vector_embeds = num_vector_embeds self.num_latent_pixels = self.height * self.width
self.latent_image_embedding = ImagePositionalEmbeddings(
2
2023-11-14 23:29:31+00:00
24k
BraveGroup/Drive-WM
src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_diffnext.py
[ { "identifier": "ConfigMixin", "path": "src/diffusers/configuration_utils.py", "snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also\n provides the [`~ConfigMixin.from_config`] and [`~ConfigMixin.save_config`] methods for loading, downloading, and\n saving classes that inherit from [`ConfigMixin`].\n\n Class attributes:\n - **config_name** (`str`) -- A filename under which the config should stored when calling\n [`~ConfigMixin.save_config`] (should be overridden by parent class).\n - **ignore_for_config** (`List[str]`) -- A list of attributes that should not be saved in the config (should be\n overridden by subclass).\n - **has_compatibles** (`bool`) -- Whether the class has compatible classes (should be overridden by subclass).\n - **_deprecated_kwargs** (`List[str]`) -- Keyword arguments that are deprecated. Note that the `init` function\n should only have a `kwargs` argument if at least one argument is deprecated (should be overridden by\n subclass).\n \"\"\"\n\n config_name = None\n ignore_for_config = []\n has_compatibles = False\n\n _deprecated_kwargs = []\n\n def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)\n\n def __getattr__(self, name: str) -> Any:\n \"\"\"The only reason we overwrite `getattr` here is to gracefully deprecate accessing\n config attributes directly. See https://github.com/huggingface/diffusers/pull/3129\n\n Tihs funtion is mostly copied from PyTorch's __getattr__ overwrite:\n https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module\n \"\"\"\n\n is_in_config = \"_internal_dict\" in self.__dict__ and hasattr(self.__dict__[\"_internal_dict\"], name)\n is_attribute = name in self.__dict__\n\n if is_in_config and not is_attribute:\n deprecation_message = f\"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'scheduler.config.{name}'.\"\n deprecate(\"direct config name access\", \"1.0.0\", deprecation_message, standard_warn=False)\n return self._internal_dict[name]\n\n raise AttributeError(f\"'{type(self).__name__}' object has no attribute '{name}'\")\n\n def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a configuration object to the directory specified in `save_directory` so that it can be reloaded using the\n [`~ConfigMixin.from_config`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file is saved (will be created if it does not exist).\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n if os.path.isfile(save_directory):\n raise AssertionError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n\n os.makedirs(save_directory, exist_ok=True)\n\n # If we save using the predefined names, we can load using `from_config`\n output_config_file = os.path.join(save_directory, self.config_name)\n\n self.to_json_file(output_config_file)\n logger.info(f\"Configuration saved in {output_config_file}\")\n\n if push_to_hub:\n commit_message = kwargs.pop(\"commit_message\", None)\n private = kwargs.pop(\"private\", False)\n create_pr = kwargs.pop(\"create_pr\", False)\n token = kwargs.pop(\"token\", None)\n repo_id = kwargs.pop(\"repo_id\", save_directory.split(os.path.sep)[-1])\n repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id\n\n self._upload_folder(\n save_directory,\n repo_id,\n token=token,\n commit_message=commit_message,\n create_pr=create_pr,\n )\n\n @classmethod\n def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs):\n r\"\"\"\n Instantiate a Python class from a config dictionary.\n\n Parameters:\n config (`Dict[str, Any]`):\n A config dictionary from which the Python class is instantiated. Make sure to only load configuration\n files of compatible classes.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n kwargs (remaining dictionary of keyword arguments, *optional*):\n Can be used to update the configuration object (after it is loaded) and initiate the Python class.\n `**kwargs` are passed directly to the underlying scheduler/model's `__init__` method and eventually\n overwrite the same named arguments in `config`.\n\n Returns:\n [`ModelMixin`] or [`SchedulerMixin`]:\n A model or scheduler object instantiated from a config dictionary.\n\n Examples:\n\n ```python\n >>> from diffusers import DDPMScheduler, DDIMScheduler, PNDMScheduler\n\n >>> # Download scheduler from huggingface.co and cache.\n >>> scheduler = DDPMScheduler.from_pretrained(\"google/ddpm-cifar10-32\")\n\n >>> # Instantiate DDIM scheduler class with same config as DDPM\n >>> scheduler = DDIMScheduler.from_config(scheduler.config)\n\n >>> # Instantiate PNDM scheduler class with same config as DDPM\n >>> scheduler = PNDMScheduler.from_config(scheduler.config)\n ```\n \"\"\"\n # <===== TO BE REMOVED WITH DEPRECATION\n # TODO(Patrick) - make sure to remove the following lines when config==\"model_path\" is deprecated\n if \"pretrained_model_name_or_path\" in kwargs:\n config = kwargs.pop(\"pretrained_model_name_or_path\")\n\n if config is None:\n raise ValueError(\"Please make sure to provide a config as the first positional argument.\")\n # ======>\n\n if not isinstance(config, dict):\n deprecation_message = \"It is deprecated to pass a pretrained model name or path to `from_config`.\"\n if \"Scheduler\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a scheduler, please use {cls}.from_pretrained(...) instead.\"\n \" Otherwise, please make sure to pass a configuration dictionary instead. This functionality will\"\n \" be removed in v1.0.0.\"\n )\n elif \"Model\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a model, please use {cls}.load_config(...) followed by\"\n f\" {cls}.from_config(...) instead. Otherwise, please make sure to pass a configuration dictionary\"\n \" instead. This functionality will be removed in v1.0.0.\"\n )\n deprecate(\"config-passed-as-path\", \"1.0.0\", deprecation_message, standard_warn=False)\n config, kwargs = cls.load_config(pretrained_model_name_or_path=config, return_unused_kwargs=True, **kwargs)\n\n init_dict, unused_kwargs, hidden_dict = cls.extract_init_dict(config, **kwargs)\n\n # Allow dtype to be specified on initialization\n if \"dtype\" in unused_kwargs:\n init_dict[\"dtype\"] = unused_kwargs.pop(\"dtype\")\n\n # add possible deprecated kwargs\n for deprecated_kwarg in cls._deprecated_kwargs:\n if deprecated_kwarg in unused_kwargs:\n init_dict[deprecated_kwarg] = unused_kwargs.pop(deprecated_kwarg)\n\n # Return model and optionally state and/or unused_kwargs\n model = cls(**init_dict)\n\n # make sure to also save config parameters that might be used for compatible classes\n model.register_to_config(**hidden_dict)\n\n # add hidden kwargs of compatible classes to unused_kwargs\n unused_kwargs = {**unused_kwargs, **hidden_dict}\n\n if return_unused_kwargs:\n return (model, unused_kwargs)\n else:\n return model\n\n @classmethod\n def get_config_dict(cls, *args, **kwargs):\n deprecation_message = (\n f\" The function get_config_dict is deprecated. Please use {cls}.load_config instead. This function will be\"\n \" removed in version v1.0.0\"\n )\n deprecate(\"get_config_dict\", \"1.0.0\", deprecation_message, standard_warn=False)\n return cls.load_config(*args, **kwargs)\n\n @classmethod\n def load_config(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n return_unused_kwargs=False,\n return_commit_hash=False,\n **kwargs,\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n r\"\"\"\n Load a model or scheduler configuration.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on\n the Hub.\n - A path to a *directory* (for example `./my_model_directory`) containing model weights saved with\n [`~ConfigMixin.save_config`].\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only (`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n The subfolder location of a model file within a larger model repository on the Hub or locally.\n return_unused_kwargs (`bool`, *optional*, defaults to `False):\n Whether unused keyword arguments of the config are returned.\n return_commit_hash (`bool`, *optional*, defaults to `False):\n Whether the `commit_hash` of the loaded configuration are returned.\n\n Returns:\n `dict`:\n A dictionary of all the parameters stored in a JSON configuration file.\n\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n revision = kwargs.pop(\"revision\", None)\n _ = kwargs.pop(\"mirror\", None)\n subfolder = kwargs.pop(\"subfolder\", None)\n user_agent = kwargs.pop(\"user_agent\", {})\n\n user_agent = {**user_agent, \"file_type\": \"config\"}\n user_agent = http_user_agent(user_agent)\n\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n\n if cls.config_name is None:\n raise ValueError(\n \"`self.config_name` is not defined. Note that one should not load a config from \"\n \"`ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`\"\n )\n\n if os.path.isfile(pretrained_model_name_or_path):\n config_file = pretrained_model_name_or_path\n elif os.path.isdir(pretrained_model_name_or_path):\n if os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)):\n # Load from a PyTorch checkpoint\n config_file = os.path.join(pretrained_model_name_or_path, cls.config_name)\n elif subfolder is not None and os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n ):\n config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n else:\n raise EnvironmentError(\n f\"Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}.\"\n )\n else:\n try:\n # Load from URL or cache if already cached\n config_file = hf_hub_download(\n pretrained_model_name_or_path,\n filename=cls.config_name,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n user_agent=user_agent,\n subfolder=subfolder,\n revision=revision,\n )\n except RepositoryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier\"\n \" listed on 'https://huggingface.co/models'\\nIf this is a private repository, make sure to pass a\"\n \" token having permission to this repo with `use_auth_token` or log in with `huggingface-cli\"\n \" login`.\"\n )\n except RevisionNotFoundError:\n raise EnvironmentError(\n f\"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for\"\n \" this model name. Check the model page at\"\n f\" 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.\"\n )\n except EntryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}.\"\n )\n except HTTPError as err:\n raise EnvironmentError(\n \"There was a specific connection error when trying to load\"\n f\" {pretrained_model_name_or_path}:\\n{err}\"\n )\n except ValueError:\n raise EnvironmentError(\n f\"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it\"\n f\" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a\"\n f\" directory containing a {cls.config_name} file.\\nCheckout your internet connection or see how to\"\n \" run the library in offline mode at\"\n \" 'https://huggingface.co/docs/diffusers/installation#offline-mode'.\"\n )\n except EnvironmentError:\n raise EnvironmentError(\n f\"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from \"\n \"'https://huggingface.co/models', make sure you don't have a local directory with the same name. \"\n f\"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory \"\n f\"containing a {cls.config_name} file\"\n )\n\n try:\n # Load config dict\n config_dict = cls._dict_from_json_file(config_file)\n\n commit_hash = extract_commit_hash(config_file)\n except (json.JSONDecodeError, UnicodeDecodeError):\n raise EnvironmentError(f\"It looks like the config file at '{config_file}' is not a valid JSON file.\")\n\n if not (return_unused_kwargs or return_commit_hash):\n return config_dict\n\n outputs = (config_dict,)\n\n if return_unused_kwargs:\n outputs += (kwargs,)\n\n if return_commit_hash:\n outputs += (commit_hash,)\n\n return outputs\n\n @staticmethod\n def _get_init_keys(cls):\n return set(dict(inspect.signature(cls.__init__).parameters).keys())\n\n @classmethod\n def extract_init_dict(cls, config_dict, **kwargs):\n # Skip keys that were not present in the original config, so default __init__ values were used\n used_defaults = config_dict.get(\"_use_default_values\", [])\n config_dict = {k: v for k, v in config_dict.items() if k not in used_defaults and k != \"_use_default_values\"}\n\n # 0. Copy origin config dict\n original_dict = dict(config_dict.items())\n\n # 1. Retrieve expected config attributes from __init__ signature\n expected_keys = cls._get_init_keys(cls)\n expected_keys.remove(\"self\")\n # remove general kwargs if present in dict\n if \"kwargs\" in expected_keys:\n expected_keys.remove(\"kwargs\")\n # remove flax internal keys\n if hasattr(cls, \"_flax_internal_args\"):\n for arg in cls._flax_internal_args:\n expected_keys.remove(arg)\n\n # 2. Remove attributes that cannot be expected from expected config attributes\n # remove keys to be ignored\n if len(cls.ignore_for_config) > 0:\n expected_keys = expected_keys - set(cls.ignore_for_config)\n\n # load diffusers library to import compatible and original scheduler\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n\n if cls.has_compatibles:\n compatible_classes = [c for c in cls._get_compatibles() if not isinstance(c, DummyObject)]\n else:\n compatible_classes = []\n\n expected_keys_comp_cls = set()\n for c in compatible_classes:\n expected_keys_c = cls._get_init_keys(c)\n expected_keys_comp_cls = expected_keys_comp_cls.union(expected_keys_c)\n expected_keys_comp_cls = expected_keys_comp_cls - cls._get_init_keys(cls)\n config_dict = {k: v for k, v in config_dict.items() if k not in expected_keys_comp_cls}\n\n # remove attributes from orig class that cannot be expected\n orig_cls_name = config_dict.pop(\"_class_name\", cls.__name__)\n if (\n isinstance(orig_cls_name, str)\n and orig_cls_name != cls.__name__\n and hasattr(diffusers_library, orig_cls_name)\n ):\n orig_cls = getattr(diffusers_library, orig_cls_name)\n unexpected_keys_from_orig = cls._get_init_keys(orig_cls) - expected_keys\n config_dict = {k: v for k, v in config_dict.items() if k not in unexpected_keys_from_orig}\n elif not isinstance(orig_cls_name, str) and not isinstance(orig_cls_name, (list, tuple)):\n raise ValueError(\n \"Make sure that the `_class_name` is of type string or list of string (for custom pipelines).\"\n )\n\n # remove private attributes\n config_dict = {k: v for k, v in config_dict.items() if not k.startswith(\"_\")}\n\n # 3. Create keyword arguments that will be passed to __init__ from expected keyword arguments\n init_dict = {}\n for key in expected_keys:\n # if config param is passed to kwarg and is present in config dict\n # it should overwrite existing config dict key\n if key in kwargs and key in config_dict:\n config_dict[key] = kwargs.pop(key)\n\n if key in kwargs:\n # overwrite key\n init_dict[key] = kwargs.pop(key)\n elif key in config_dict:\n # use value from config dict\n init_dict[key] = config_dict.pop(key)\n\n # 4. Give nice warning if unexpected values have been passed\n if len(config_dict) > 0:\n logger.warning(\n f\"The config attributes {config_dict} were passed to {cls.__name__}, \"\n \"but are not expected and will be ignored. Please verify your \"\n f\"{cls.config_name} configuration file.\"\n )\n\n # 5. Give nice info if config attributes are initiliazed to default because they have not been passed\n passed_keys = set(init_dict.keys())\n if len(expected_keys - passed_keys) > 0:\n logger.info(\n f\"{expected_keys - passed_keys} was not found in config. Values will be initialized to default values.\"\n )\n\n # 6. Define unused keyword arguments\n unused_kwargs = {**config_dict, **kwargs}\n\n # 7. Define \"hidden\" config parameters that were saved for compatible classes\n hidden_config_dict = {k: v for k, v in original_dict.items() if k not in init_dict}\n\n return init_dict, unused_kwargs, hidden_config_dict\n\n @classmethod\n def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return json.loads(text)\n\n def __repr__(self):\n return f\"{self.__class__.__name__} {self.to_json_string()}\"\n\n @property\n def config(self) -> Dict[str, Any]:\n \"\"\"\n Returns the config of the class as a frozen dictionary\n\n Returns:\n `Dict[str, Any]`: Config of the class.\n \"\"\"\n return self._internal_dict\n\n def to_json_string(self) -> str:\n \"\"\"\n Serializes the configuration instance to a JSON string.\n\n Returns:\n `str`:\n String containing all the attributes that make up the configuration instance in JSON format.\n \"\"\"\n config_dict = self._internal_dict if hasattr(self, \"_internal_dict\") else {}\n config_dict[\"_class_name\"] = self.__class__.__name__\n config_dict[\"_diffusers_version\"] = __version__\n\n def to_json_saveable(value):\n if isinstance(value, np.ndarray):\n value = value.tolist()\n elif isinstance(value, PosixPath):\n value = str(value)\n return value\n\n config_dict = {k: to_json_saveable(v) for k, v in config_dict.items()}\n # Don't save \"_ignore_files\" or \"_use_default_values\"\n config_dict.pop(\"_ignore_files\", None)\n config_dict.pop(\"_use_default_values\", None)\n\n return json.dumps(config_dict, indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path: Union[str, os.PathLike]):\n \"\"\"\n Save the configuration instance's parameters to a JSON file.\n\n Args:\n json_file_path (`str` or `os.PathLike`):\n Path to the JSON file to save a configuration instance's parameters.\n \"\"\"\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string())" }, { "identifier": "register_to_config", "path": "src/diffusers/configuration_utils.py", "snippet": "def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)" }, { "identifier": "ModelMixin", "path": "src/diffusers/models/modeling_utils.py", "snippet": "class ModelMixin(torch.nn.Module, PushToHubMixin):\n r\"\"\"\n Base class for all models.\n\n [`ModelMixin`] takes care of storing the model configuration and provides methods for loading, downloading and\n saving models.\n\n - **config_name** ([`str`]) -- Filename to save a model to when calling [`~models.ModelMixin.save_pretrained`].\n \"\"\"\n\n config_name = CONFIG_NAME\n _automatically_saved_args = [\"_diffusers_version\", \"_class_name\", \"_name_or_path\"]\n _supports_gradient_checkpointing = False\n _keys_to_ignore_on_load_unexpected = None\n _hf_peft_config_loaded = False\n\n def __init__(self):\n super().__init__()\n\n def __getattr__(self, name: str) -> Any:\n \"\"\"The only reason we overwrite `getattr` here is to gracefully deprecate accessing\n config attributes directly. See https://github.com/huggingface/diffusers/pull/3129 We need to overwrite\n __getattr__ here in addition so that we don't trigger `torch.nn.Module`'s __getattr__':\n https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module\n \"\"\"\n\n is_in_config = \"_internal_dict\" in self.__dict__ and hasattr(self.__dict__[\"_internal_dict\"], name)\n is_attribute = name in self.__dict__\n\n if is_in_config and not is_attribute:\n deprecation_message = f\"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'unet.config.{name}'.\"\n deprecate(\"direct config name access\", \"1.0.0\", deprecation_message, standard_warn=False, stacklevel=3)\n return self._internal_dict[name]\n\n # call PyTorch's https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module\n return super().__getattr__(name)\n\n @property\n def is_gradient_checkpointing(self) -> bool:\n \"\"\"\n Whether gradient checkpointing is activated for this model or not.\n \"\"\"\n return any(hasattr(m, \"gradient_checkpointing\") and m.gradient_checkpointing for m in self.modules())\n\n def enable_gradient_checkpointing(self) -> None:\n \"\"\"\n Activates gradient checkpointing for the current model (may be referred to as *activation checkpointing* or\n *checkpoint activations* in other frameworks).\n \"\"\"\n if not self._supports_gradient_checkpointing:\n raise ValueError(f\"{self.__class__.__name__} does not support gradient checkpointing.\")\n self.apply(partial(self._set_gradient_checkpointing, value=True))\n\n def disable_gradient_checkpointing(self) -> None:\n \"\"\"\n Deactivates gradient checkpointing for the current model (may be referred to as *activation checkpointing* or\n *checkpoint activations* in other frameworks).\n \"\"\"\n if self._supports_gradient_checkpointing:\n self.apply(partial(self._set_gradient_checkpointing, value=False))\n\n def set_use_memory_efficient_attention_xformers(\n self, valid: bool, attention_op: Optional[Callable] = None\n ) -> None:\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid, attention_op)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n for module in self.children():\n if isinstance(module, torch.nn.Module):\n fn_recursive_set_mem_eff(module)\n\n def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None) -> None:\n r\"\"\"\n Enable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/).\n\n When this option is enabled, you should observe lower GPU memory usage and a potential speed up during\n inference. Speed up during training is not guaranteed.\n\n <Tip warning={true}>\n\n ⚠️ When memory efficient attention and sliced attention are both enabled, memory efficient attention takes\n precedent.\n\n </Tip>\n\n Parameters:\n attention_op (`Callable`, *optional*):\n Override the default `None` operator for use as `op` argument to the\n [`memory_efficient_attention()`](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention)\n function of xFormers.\n\n Examples:\n\n ```py\n >>> import torch\n >>> from diffusers import UNet2DConditionModel\n >>> from xformers.ops import MemoryEfficientAttentionFlashAttentionOp\n\n >>> model = UNet2DConditionModel.from_pretrained(\n ... \"stabilityai/stable-diffusion-2-1\", subfolder=\"unet\", torch_dtype=torch.float16\n ... )\n >>> model = model.to(\"cuda\")\n >>> model.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp)\n ```\n \"\"\"\n self.set_use_memory_efficient_attention_xformers(True, attention_op)\n\n def disable_xformers_memory_efficient_attention(self) -> None:\n r\"\"\"\n Disable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/).\n \"\"\"\n self.set_use_memory_efficient_attention_xformers(False)\n\n def add_adapter(self, adapter_config, adapter_name: str = \"default\") -> None:\n r\"\"\"\n Adds a new adapter to the current model for training. If no adapter name is passed, a default name is assigned\n to the adapter to follow the convention of the PEFT library.\n\n If you are not familiar with adapters and PEFT methods, we invite you to read more about them in the PEFT\n [documentation](https://huggingface.co/docs/peft).\n\n Args:\n adapter_config (`[~peft.PeftConfig]`):\n The configuration of the adapter to add; supported adapters are non-prefix tuning and adaption prompt\n methods.\n adapter_name (`str`, *optional*, defaults to `\"default\"`):\n The name of the adapter to add. If no name is passed, a default name is assigned to the adapter.\n \"\"\"\n check_peft_version(min_version=MIN_PEFT_VERSION)\n\n from peft import PeftConfig, inject_adapter_in_model\n\n if not self._hf_peft_config_loaded:\n self._hf_peft_config_loaded = True\n elif adapter_name in self.peft_config:\n raise ValueError(f\"Adapter with name {adapter_name} already exists. Please use a different name.\")\n\n if not isinstance(adapter_config, PeftConfig):\n raise ValueError(\n f\"adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead.\"\n )\n\n # Unlike transformers, here we don't need to retrieve the name_or_path of the unet as the loading logic is\n # handled by the `load_lora_layers` or `LoraLoaderMixin`. Therefore we set it to `None` here.\n adapter_config.base_model_name_or_path = None\n inject_adapter_in_model(adapter_config, self, adapter_name)\n self.set_adapter(adapter_name)\n\n def set_adapter(self, adapter_name: Union[str, List[str]]) -> None:\n \"\"\"\n Sets a specific adapter by forcing the model to only use that adapter and disables the other adapters.\n\n If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT\n official documentation: https://huggingface.co/docs/peft\n\n Args:\n adapter_name (Union[str, List[str]])):\n The list of adapters to set or the adapter name in case of single adapter.\n \"\"\"\n check_peft_version(min_version=MIN_PEFT_VERSION)\n\n if not self._hf_peft_config_loaded:\n raise ValueError(\"No adapter loaded. Please load an adapter first.\")\n\n if isinstance(adapter_name, str):\n adapter_name = [adapter_name]\n\n missing = set(adapter_name) - set(self.peft_config)\n if len(missing) > 0:\n raise ValueError(\n f\"Following adapter(s) could not be found: {', '.join(missing)}. Make sure you are passing the correct adapter name(s).\"\n f\" current loaded adapters are: {list(self.peft_config.keys())}\"\n )\n\n from peft.tuners.tuners_utils import BaseTunerLayer\n\n _adapters_has_been_set = False\n\n for _, module in self.named_modules():\n if isinstance(module, BaseTunerLayer):\n if hasattr(module, \"set_adapter\"):\n module.set_adapter(adapter_name)\n # Previous versions of PEFT does not support multi-adapter inference\n elif not hasattr(module, \"set_adapter\") and len(adapter_name) != 1:\n raise ValueError(\n \"You are trying to set multiple adapters and you have a PEFT version that does not support multi-adapter inference. Please upgrade to the latest version of PEFT.\"\n \" `pip install -U peft` or `pip install -U git+https://github.com/huggingface/peft.git`\"\n )\n else:\n module.active_adapter = adapter_name\n _adapters_has_been_set = True\n\n if not _adapters_has_been_set:\n raise ValueError(\n \"Did not succeeded in setting the adapter. Please make sure you are using a model that supports adapters.\"\n )\n\n def disable_adapters(self) -> None:\n r\"\"\"\n Disable all adapters attached to the model and fallback to inference with the base model only.\n\n If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT\n official documentation: https://huggingface.co/docs/peft\n \"\"\"\n check_peft_version(min_version=MIN_PEFT_VERSION)\n\n if not self._hf_peft_config_loaded:\n raise ValueError(\"No adapter loaded. Please load an adapter first.\")\n\n from peft.tuners.tuners_utils import BaseTunerLayer\n\n for _, module in self.named_modules():\n if isinstance(module, BaseTunerLayer):\n if hasattr(module, \"enable_adapters\"):\n module.enable_adapters(enabled=False)\n else:\n # support for older PEFT versions\n module.disable_adapters = True\n\n def enable_adapters(self) -> None:\n \"\"\"\n Enable adapters that are attached to the model. The model will use `self.active_adapters()` to retrieve the\n list of adapters to enable.\n\n If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT\n official documentation: https://huggingface.co/docs/peft\n \"\"\"\n check_peft_version(min_version=MIN_PEFT_VERSION)\n\n if not self._hf_peft_config_loaded:\n raise ValueError(\"No adapter loaded. Please load an adapter first.\")\n\n from peft.tuners.tuners_utils import BaseTunerLayer\n\n for _, module in self.named_modules():\n if isinstance(module, BaseTunerLayer):\n if hasattr(module, \"enable_adapters\"):\n module.enable_adapters(enabled=True)\n else:\n # support for older PEFT versions\n module.disable_adapters = False\n\n def active_adapters(self) -> List[str]:\n \"\"\"\n Gets the current list of active adapters of the model.\n\n If you are not familiar with adapters and PEFT methods, we invite you to read more about them on the PEFT\n official documentation: https://huggingface.co/docs/peft\n \"\"\"\n check_peft_version(min_version=MIN_PEFT_VERSION)\n\n if not self._hf_peft_config_loaded:\n raise ValueError(\"No adapter loaded. Please load an adapter first.\")\n\n from peft.tuners.tuners_utils import BaseTunerLayer\n\n for _, module in self.named_modules():\n if isinstance(module, BaseTunerLayer):\n return module.active_adapter\n\n def save_pretrained(\n self,\n save_directory: Union[str, os.PathLike],\n is_main_process: bool = True,\n save_function: Optional[Callable] = None,\n safe_serialization: bool = True,\n variant: Optional[str] = None,\n push_to_hub: bool = False,\n **kwargs,\n ):\n \"\"\"\n Save a model and its configuration file to a directory so that it can be reloaded using the\n [`~models.ModelMixin.from_pretrained`] class method.\n\n Arguments:\n save_directory (`str` or `os.PathLike`):\n Directory to save a model and its configuration file to. Will be created if it doesn't exist.\n is_main_process (`bool`, *optional*, defaults to `True`):\n Whether the process calling this is the main process or not. Useful during distributed training and you\n need to call this function on all processes. In this case, set `is_main_process=True` only on the main\n process to avoid race conditions.\n save_function (`Callable`):\n The function to use to save the state dictionary. Useful during distributed training when you need to\n replace `torch.save` with another method. Can be configured with the environment variable\n `DIFFUSERS_SAVE_MODE`.\n safe_serialization (`bool`, *optional*, defaults to `True`):\n Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.\n variant (`str`, *optional*):\n If specified, weights are saved in the format `pytorch_model.<variant>.bin`.\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n if os.path.isfile(save_directory):\n logger.error(f\"Provided path ({save_directory}) should be a directory, not a file\")\n return\n\n os.makedirs(save_directory, exist_ok=True)\n\n if push_to_hub:\n commit_message = kwargs.pop(\"commit_message\", None)\n private = kwargs.pop(\"private\", False)\n create_pr = kwargs.pop(\"create_pr\", False)\n token = kwargs.pop(\"token\", None)\n repo_id = kwargs.pop(\"repo_id\", save_directory.split(os.path.sep)[-1])\n repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id\n\n # Only save the model itself if we are using distributed training\n model_to_save = self\n\n # Attach architecture to the config\n # Save the config\n if is_main_process:\n model_to_save.save_config(save_directory)\n\n # Save the model\n state_dict = model_to_save.state_dict()\n\n weights_name = SAFETENSORS_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME\n weights_name = _add_variant(weights_name, variant)\n\n # Save the model\n if safe_serialization:\n safetensors.torch.save_file(\n state_dict, os.path.join(save_directory, weights_name), metadata={\"format\": \"pt\"}\n )\n else:\n torch.save(state_dict, os.path.join(save_directory, weights_name))\n\n logger.info(f\"Model weights saved in {os.path.join(save_directory, weights_name)}\")\n\n if push_to_hub:\n self._upload_folder(\n save_directory,\n repo_id,\n token=token,\n commit_message=commit_message,\n create_pr=create_pr,\n )\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):\n r\"\"\"\n Instantiate a pretrained PyTorch model from a pretrained model configuration.\n\n The model is set in evaluation mode - `model.eval()` - by default, and dropout modules are deactivated. To\n train the model, set it back in training mode with `model.train()`.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on\n the Hub.\n - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved\n with [`~ModelMixin.save_pretrained`].\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n torch_dtype (`str` or `torch.dtype`, *optional*):\n Override the default `torch.dtype` and load the model with another dtype. If `\"auto\"` is passed, the\n dtype is automatically derived from the model's weights.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info (`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n from_flax (`bool`, *optional*, defaults to `False`):\n Load the model weights from a Flax checkpoint save file.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n The subfolder location of a model file within a larger model repository on the Hub or locally.\n mirror (`str`, *optional*):\n Mirror source to resolve accessibility issues if you're downloading a model in China. We do not\n guarantee the timeliness or safety of the source, and you should refer to the mirror site for more\n information.\n device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):\n A map that specifies where each submodule should go. It doesn't need to be defined for each\n parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the\n same device.\n\n Set `device_map=\"auto\"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For\n more information about each option see [designing a device\n map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).\n max_memory (`Dict`, *optional*):\n A dictionary device identifier for the maximum memory. Will default to the maximum memory available for\n each GPU and the available CPU RAM if unset.\n offload_folder (`str` or `os.PathLike`, *optional*):\n The path to offload weights if `device_map` contains the value `\"disk\"`.\n offload_state_dict (`bool`, *optional*):\n If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if\n the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True`\n when there is some disk offload.\n low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):\n Speed up model loading only loading the pretrained weights and not initializing the weights. This also\n tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.\n Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this\n argument to `True` will raise an error.\n variant (`str`, *optional*):\n Load weights from a specified `variant` filename such as `\"fp16\"` or `\"ema\"`. This is ignored when\n loading `from_flax`.\n use_safetensors (`bool`, *optional*, defaults to `None`):\n If set to `None`, the `safetensors` weights are downloaded if they're available **and** if the\n `safetensors` library is installed. If set to `True`, the model is forcibly loaded from `safetensors`\n weights. If set to `False`, `safetensors` weights are not loaded.\n\n <Tip>\n\n To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with\n `huggingface-cli login`. You can also activate the special\n [\"offline-mode\"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a\n firewalled environment.\n\n </Tip>\n\n Example:\n\n ```py\n from diffusers import UNet2DConditionModel\n\n unet = UNet2DConditionModel.from_pretrained(\"runwayml/stable-diffusion-v1-5\", subfolder=\"unet\")\n ```\n\n If you get the error message below, you need to finetune the weights for your downstream task:\n\n ```bash\n Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:\n - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated\n You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n ```\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n ignore_mismatched_sizes = kwargs.pop(\"ignore_mismatched_sizes\", False)\n force_download = kwargs.pop(\"force_download\", False)\n from_flax = kwargs.pop(\"from_flax\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n output_loading_info = kwargs.pop(\"output_loading_info\", False)\n local_files_only = kwargs.pop(\"local_files_only\", HF_HUB_OFFLINE)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n revision = kwargs.pop(\"revision\", None)\n torch_dtype = kwargs.pop(\"torch_dtype\", None)\n subfolder = kwargs.pop(\"subfolder\", None)\n device_map = kwargs.pop(\"device_map\", None)\n max_memory = kwargs.pop(\"max_memory\", None)\n offload_folder = kwargs.pop(\"offload_folder\", None)\n offload_state_dict = kwargs.pop(\"offload_state_dict\", False)\n low_cpu_mem_usage = kwargs.pop(\"low_cpu_mem_usage\", _LOW_CPU_MEM_USAGE_DEFAULT)\n variant = kwargs.pop(\"variant\", None)\n use_safetensors = kwargs.pop(\"use_safetensors\", None)\n\n allow_pickle = False\n if use_safetensors is None:\n use_safetensors = True\n allow_pickle = True\n\n if low_cpu_mem_usage and not is_accelerate_available():\n low_cpu_mem_usage = False\n logger.warning(\n \"Cannot initialize model with low cpu memory usage because `accelerate` was not found in the\"\n \" environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install\"\n \" `accelerate` for faster and less memory-intense model loading. You can do so with: \\n```\\npip\"\n \" install accelerate\\n```\\n.\"\n )\n\n if device_map is not None and not is_accelerate_available():\n raise NotImplementedError(\n \"Loading and dispatching requires `accelerate`. Please make sure to install accelerate or set\"\n \" `device_map=None`. You can install accelerate with `pip install accelerate`.\"\n )\n\n # Check if we can handle device_map and dispatching the weights\n if device_map is not None and not is_torch_version(\">=\", \"1.9.0\"):\n raise NotImplementedError(\n \"Loading and dispatching requires torch >= 1.9.0. Please either update your PyTorch version or set\"\n \" `device_map=None`.\"\n )\n\n if low_cpu_mem_usage is True and not is_torch_version(\">=\", \"1.9.0\"):\n raise NotImplementedError(\n \"Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set\"\n \" `low_cpu_mem_usage=False`.\"\n )\n\n if low_cpu_mem_usage is False and device_map is not None:\n raise ValueError(\n f\"You cannot set `low_cpu_mem_usage` to `False` while using device_map={device_map} for loading and\"\n \" dispatching. Please make sure to set `low_cpu_mem_usage=True`.\"\n )\n\n # Load config if we don't provide a configuration\n config_path = pretrained_model_name_or_path\n\n user_agent = {\n \"diffusers\": __version__,\n \"file_type\": \"model\",\n \"framework\": \"pytorch\",\n }\n\n # load config\n config, unused_kwargs, commit_hash = cls.load_config(\n config_path,\n cache_dir=cache_dir,\n return_unused_kwargs=True,\n return_commit_hash=True,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n subfolder=subfolder,\n device_map=device_map,\n max_memory=max_memory,\n offload_folder=offload_folder,\n offload_state_dict=offload_state_dict,\n user_agent=user_agent,\n **kwargs,\n )\n\n # load model\n model_file = None\n if from_flax:\n model_file = _get_model_file(\n pretrained_model_name_or_path,\n weights_name=FLAX_WEIGHTS_NAME,\n cache_dir=cache_dir,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n subfolder=subfolder,\n user_agent=user_agent,\n commit_hash=commit_hash,\n )\n model = cls.from_config(config, **unused_kwargs)\n\n # Convert the weights\n from .modeling_pytorch_flax_utils import load_flax_checkpoint_in_pytorch_model\n\n model = load_flax_checkpoint_in_pytorch_model(model, model_file)\n else:\n if use_safetensors:\n try:\n model_file = _get_model_file(\n pretrained_model_name_or_path,\n weights_name=_add_variant(SAFETENSORS_WEIGHTS_NAME, variant),\n cache_dir=cache_dir,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n subfolder=subfolder,\n user_agent=user_agent,\n commit_hash=commit_hash,\n )\n except IOError as e:\n if not allow_pickle:\n raise e\n pass\n if model_file is None:\n model_file = _get_model_file(\n pretrained_model_name_or_path,\n weights_name=_add_variant(WEIGHTS_NAME, variant),\n cache_dir=cache_dir,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n subfolder=subfolder,\n user_agent=user_agent,\n commit_hash=commit_hash,\n )\n\n if low_cpu_mem_usage:\n # Instantiate model with empty weights\n with accelerate.init_empty_weights():\n model = cls.from_config(config, **unused_kwargs)\n\n # if device_map is None, load the state dict and move the params from meta device to the cpu\n if device_map is None:\n param_device = \"cpu\"\n state_dict = load_state_dict(model_file, variant=variant)\n model._convert_deprecated_attention_blocks(state_dict)\n # move the params from meta device to cpu\n missing_keys = set(model.state_dict().keys()) - set(state_dict.keys())\n if len(missing_keys) > 0:\n raise ValueError(\n f\"Cannot load {cls} from {pretrained_model_name_or_path} because the following keys are\"\n f\" missing: \\n {', '.join(missing_keys)}. \\n Please make sure to pass\"\n \" `low_cpu_mem_usage=False` and `device_map=None` if you want to randomly initialize\"\n \" those weights or else make sure your checkpoint file is correct.\"\n )\n\n unexpected_keys = load_model_dict_into_meta(\n model,\n state_dict,\n device=param_device,\n dtype=torch_dtype,\n model_name_or_path=pretrained_model_name_or_path,\n )\n\n if cls._keys_to_ignore_on_load_unexpected is not None:\n for pat in cls._keys_to_ignore_on_load_unexpected:\n unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]\n\n if len(unexpected_keys) > 0:\n logger.warn(\n f\"Some weights of the model checkpoint were not used when initializing {cls.__name__}: \\n {[', '.join(unexpected_keys)]}\"\n )\n\n else: # else let accelerate handle loading and dispatching.\n # Load weights and dispatch according to the device_map\n # by default the device_map is None and the weights are loaded on the CPU\n try:\n accelerate.load_checkpoint_and_dispatch(\n model,\n model_file,\n device_map,\n max_memory=max_memory,\n offload_folder=offload_folder,\n offload_state_dict=offload_state_dict,\n dtype=torch_dtype,\n )\n except AttributeError as e:\n # When using accelerate loading, we do not have the ability to load the state\n # dict and rename the weight names manually. Additionally, accelerate skips\n # torch loading conventions and directly writes into `module.{_buffers, _parameters}`\n # (which look like they should be private variables?), so we can't use the standard hooks\n # to rename parameters on load. We need to mimic the original weight names so the correct\n # attributes are available. After we have loaded the weights, we convert the deprecated\n # names to the new non-deprecated names. Then we _greatly encourage_ the user to convert\n # the weights so we don't have to do this again.\n\n if \"'Attention' object has no attribute\" in str(e):\n logger.warn(\n f\"Taking `{str(e)}` while using `accelerate.load_checkpoint_and_dispatch` to mean {pretrained_model_name_or_path}\"\n \" was saved with deprecated attention block weight names. We will load it with the deprecated attention block\"\n \" names and convert them on the fly to the new attention block format. Please re-save the model after this conversion,\"\n \" so we don't have to do the on the fly renaming in the future. If the model is from a hub checkpoint,\"\n \" please also re-upload it or open a PR on the original repository.\"\n )\n model._temp_convert_self_to_deprecated_attention_blocks()\n accelerate.load_checkpoint_and_dispatch(\n model,\n model_file,\n device_map,\n max_memory=max_memory,\n offload_folder=offload_folder,\n offload_state_dict=offload_state_dict,\n dtype=torch_dtype,\n )\n model._undo_temp_convert_self_to_deprecated_attention_blocks()\n else:\n raise e\n\n loading_info = {\n \"missing_keys\": [],\n \"unexpected_keys\": [],\n \"mismatched_keys\": [],\n \"error_msgs\": [],\n }\n else:\n model = cls.from_config(config, **unused_kwargs)\n\n state_dict = load_state_dict(model_file, variant=variant)\n model._convert_deprecated_attention_blocks(state_dict)\n\n model, missing_keys, unexpected_keys, mismatched_keys, error_msgs = cls._load_pretrained_model(\n model,\n state_dict,\n model_file,\n pretrained_model_name_or_path,\n ignore_mismatched_sizes=ignore_mismatched_sizes,\n )\n\n loading_info = {\n \"missing_keys\": missing_keys,\n \"unexpected_keys\": unexpected_keys,\n \"mismatched_keys\": mismatched_keys,\n \"error_msgs\": error_msgs,\n }\n\n if torch_dtype is not None and not isinstance(torch_dtype, torch.dtype):\n raise ValueError(\n f\"{torch_dtype} needs to be of type `torch.dtype`, e.g. `torch.float16`, but is {type(torch_dtype)}.\"\n )\n elif torch_dtype is not None:\n model = model.to(torch_dtype)\n\n model.register_to_config(_name_or_path=pretrained_model_name_or_path)\n\n # Set model in evaluation mode to deactivate DropOut modules by default\n model.eval()\n if output_loading_info:\n return model, loading_info\n\n return model\n\n @classmethod\n def _load_pretrained_model(\n cls,\n model,\n state_dict: OrderedDict,\n resolved_archive_file,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n ignore_mismatched_sizes: bool = False,\n ):\n # Retrieve missing & unexpected_keys\n model_state_dict = model.state_dict()\n loaded_keys = list(state_dict.keys())\n\n expected_keys = list(model_state_dict.keys())\n\n original_loaded_keys = loaded_keys\n\n missing_keys = list(set(expected_keys) - set(loaded_keys))\n unexpected_keys = list(set(loaded_keys) - set(expected_keys))\n\n # Make sure we are able to load base models as well as derived models (with heads)\n model_to_load = model\n\n def _find_mismatched_keys(\n state_dict,\n model_state_dict,\n loaded_keys,\n ignore_mismatched_sizes,\n ):\n mismatched_keys = []\n if ignore_mismatched_sizes:\n for checkpoint_key in loaded_keys:\n model_key = checkpoint_key\n\n if (\n model_key in model_state_dict\n and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape\n ):\n mismatched_keys.append(\n (checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape)\n )\n del state_dict[checkpoint_key]\n return mismatched_keys\n\n if state_dict is not None:\n # Whole checkpoint\n mismatched_keys = _find_mismatched_keys(\n state_dict,\n model_state_dict,\n original_loaded_keys,\n ignore_mismatched_sizes,\n )\n error_msgs = _load_state_dict_into_model(model_to_load, state_dict)\n\n if len(error_msgs) > 0:\n error_msg = \"\\n\\t\".join(error_msgs)\n if \"size mismatch\" in error_msg:\n error_msg += (\n \"\\n\\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method.\"\n )\n raise RuntimeError(f\"Error(s) in loading state_dict for {model.__class__.__name__}:\\n\\t{error_msg}\")\n\n if len(unexpected_keys) > 0:\n logger.warning(\n f\"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when\"\n f\" initializing {model.__class__.__name__}: {unexpected_keys}\\n- This IS expected if you are\"\n f\" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task\"\n \" or with another architecture (e.g. initializing a BertForSequenceClassification model from a\"\n \" BertForPreTraining model).\\n- This IS NOT expected if you are initializing\"\n f\" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly\"\n \" identical (initializing a BertForSequenceClassification model from a\"\n \" BertForSequenceClassification model).\"\n )\n else:\n logger.info(f\"All model checkpoint weights were used when initializing {model.__class__.__name__}.\\n\")\n if len(missing_keys) > 0:\n logger.warning(\n f\"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at\"\n f\" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\\nYou should probably\"\n \" TRAIN this model on a down-stream task to be able to use it for predictions and inference.\"\n )\n elif len(mismatched_keys) == 0:\n logger.info(\n f\"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at\"\n f\" {pretrained_model_name_or_path}.\\nIf your task is similar to the task the model of the\"\n f\" checkpoint was trained on, you can already use {model.__class__.__name__} for predictions\"\n \" without further training.\"\n )\n if len(mismatched_keys) > 0:\n mismatched_warning = \"\\n\".join(\n [\n f\"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated\"\n for key, shape1, shape2 in mismatched_keys\n ]\n )\n logger.warning(\n f\"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at\"\n f\" {pretrained_model_name_or_path} and are newly initialized because the shapes did not\"\n f\" match:\\n{mismatched_warning}\\nYou should probably TRAIN this model on a down-stream task to be\"\n \" able to use it for predictions and inference.\"\n )\n\n return model, missing_keys, unexpected_keys, mismatched_keys, error_msgs\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n `torch.device`: The device on which the module is (assuming that all the module parameters are on the same\n device).\n \"\"\"\n return get_parameter_device(self)\n\n @property\n def dtype(self) -> torch.dtype:\n \"\"\"\n `torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).\n \"\"\"\n return get_parameter_dtype(self)\n\n def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int:\n \"\"\"\n Get number of (trainable or non-embedding) parameters in the module.\n\n Args:\n only_trainable (`bool`, *optional*, defaults to `False`):\n Whether or not to return only the number of trainable parameters.\n exclude_embeddings (`bool`, *optional*, defaults to `False`):\n Whether or not to return only the number of non-embedding parameters.\n\n Returns:\n `int`: The number of parameters.\n\n Example:\n\n ```py\n from diffusers import UNet2DConditionModel\n\n model_id = \"runwayml/stable-diffusion-v1-5\"\n unet = UNet2DConditionModel.from_pretrained(model_id, subfolder=\"unet\")\n unet.num_parameters(only_trainable=True)\n 859520964\n ```\n \"\"\"\n\n if exclude_embeddings:\n embedding_param_names = [\n f\"{name}.weight\"\n for name, module_type in self.named_modules()\n if isinstance(module_type, torch.nn.Embedding)\n ]\n non_embedding_parameters = [\n parameter for name, parameter in self.named_parameters() if name not in embedding_param_names\n ]\n return sum(p.numel() for p in non_embedding_parameters if p.requires_grad or not only_trainable)\n else:\n return sum(p.numel() for p in self.parameters() if p.requires_grad or not only_trainable)\n\n def _convert_deprecated_attention_blocks(self, state_dict: OrderedDict) -> None:\n deprecated_attention_block_paths = []\n\n def recursive_find_attn_block(name, module):\n if hasattr(module, \"_from_deprecated_attn_block\") and module._from_deprecated_attn_block:\n deprecated_attention_block_paths.append(name)\n\n for sub_name, sub_module in module.named_children():\n sub_name = sub_name if name == \"\" else f\"{name}.{sub_name}\"\n recursive_find_attn_block(sub_name, sub_module)\n\n recursive_find_attn_block(\"\", self)\n\n # NOTE: we have to check if the deprecated parameters are in the state dict\n # because it is possible we are loading from a state dict that was already\n # converted\n\n for path in deprecated_attention_block_paths:\n # group_norm path stays the same\n\n # query -> to_q\n if f\"{path}.query.weight\" in state_dict:\n state_dict[f\"{path}.to_q.weight\"] = state_dict.pop(f\"{path}.query.weight\")\n if f\"{path}.query.bias\" in state_dict:\n state_dict[f\"{path}.to_q.bias\"] = state_dict.pop(f\"{path}.query.bias\")\n\n # key -> to_k\n if f\"{path}.key.weight\" in state_dict:\n state_dict[f\"{path}.to_k.weight\"] = state_dict.pop(f\"{path}.key.weight\")\n if f\"{path}.key.bias\" in state_dict:\n state_dict[f\"{path}.to_k.bias\"] = state_dict.pop(f\"{path}.key.bias\")\n\n # value -> to_v\n if f\"{path}.value.weight\" in state_dict:\n state_dict[f\"{path}.to_v.weight\"] = state_dict.pop(f\"{path}.value.weight\")\n if f\"{path}.value.bias\" in state_dict:\n state_dict[f\"{path}.to_v.bias\"] = state_dict.pop(f\"{path}.value.bias\")\n\n # proj_attn -> to_out.0\n if f\"{path}.proj_attn.weight\" in state_dict:\n state_dict[f\"{path}.to_out.0.weight\"] = state_dict.pop(f\"{path}.proj_attn.weight\")\n if f\"{path}.proj_attn.bias\" in state_dict:\n state_dict[f\"{path}.to_out.0.bias\"] = state_dict.pop(f\"{path}.proj_attn.bias\")\n\n def _temp_convert_self_to_deprecated_attention_blocks(self) -> None:\n deprecated_attention_block_modules = []\n\n def recursive_find_attn_block(module):\n if hasattr(module, \"_from_deprecated_attn_block\") and module._from_deprecated_attn_block:\n deprecated_attention_block_modules.append(module)\n\n for sub_module in module.children():\n recursive_find_attn_block(sub_module)\n\n recursive_find_attn_block(self)\n\n for module in deprecated_attention_block_modules:\n module.query = module.to_q\n module.key = module.to_k\n module.value = module.to_v\n module.proj_attn = module.to_out[0]\n\n # We don't _have_ to delete the old attributes, but it's helpful to ensure\n # that _all_ the weights are loaded into the new attributes and we're not\n # making an incorrect assumption that this model should be converted when\n # it really shouldn't be.\n del module.to_q\n del module.to_k\n del module.to_v\n del module.to_out\n\n def _undo_temp_convert_self_to_deprecated_attention_blocks(self) -> None:\n deprecated_attention_block_modules = []\n\n def recursive_find_attn_block(module) -> None:\n if hasattr(module, \"_from_deprecated_attn_block\") and module._from_deprecated_attn_block:\n deprecated_attention_block_modules.append(module)\n\n for sub_module in module.children():\n recursive_find_attn_block(sub_module)\n\n recursive_find_attn_block(self)\n\n for module in deprecated_attention_block_modules:\n module.to_q = module.query\n module.to_k = module.key\n module.to_v = module.value\n module.to_out = nn.ModuleList([module.proj_attn, nn.Dropout(module.dropout)])\n\n del module.query\n del module.key\n del module.value\n del module.proj_attn" }, { "identifier": "AttnBlock", "path": "src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py", "snippet": "class AttnBlock(nn.Module):\n def __init__(self, c, c_cond, nhead, self_attn=True, dropout=0.0):\n super().__init__()\n\n linear_cls = nn.Linear if USE_PEFT_BACKEND else LoRACompatibleLinear\n\n self.self_attn = self_attn\n self.norm = WuerstchenLayerNorm(c, elementwise_affine=False, eps=1e-6)\n self.attention = Attention(query_dim=c, heads=nhead, dim_head=c // nhead, dropout=dropout, bias=True)\n self.kv_mapper = nn.Sequential(nn.SiLU(), linear_cls(c_cond, c))\n\n def forward(self, x, kv):\n kv = self.kv_mapper(kv)\n norm_x = self.norm(x)\n if self.self_attn:\n batch_size, channel, _, _ = x.shape\n kv = torch.cat([norm_x.view(batch_size, channel, -1).transpose(1, 2), kv], dim=1)\n x = x + self.attention(norm_x, encoder_hidden_states=kv)\n return x" }, { "identifier": "GlobalResponseNorm", "path": "src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py", "snippet": "class GlobalResponseNorm(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim))\n self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim))\n\n def forward(self, x):\n agg_norm = torch.norm(x, p=2, dim=(1, 2), keepdim=True)\n stand_div_norm = agg_norm / (agg_norm.mean(dim=-1, keepdim=True) + 1e-6)\n return self.gamma * (x * stand_div_norm) + self.beta + x" }, { "identifier": "TimestepBlock", "path": "src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py", "snippet": "class TimestepBlock(nn.Module):\n def __init__(self, c, c_timestep):\n super().__init__()\n linear_cls = nn.Linear if USE_PEFT_BACKEND else LoRACompatibleLinear\n self.mapper = linear_cls(c_timestep, c * 2)\n\n def forward(self, x, t):\n a, b = self.mapper(t)[:, :, None, None].chunk(2, dim=1)\n return x * (1 + a) + b" }, { "identifier": "WuerstchenLayerNorm", "path": "src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py", "snippet": "class WuerstchenLayerNorm(nn.LayerNorm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def forward(self, x):\n x = x.permute(0, 2, 3, 1)\n x = super().forward(x)\n return x.permute(0, 3, 1, 2)" } ]
import math import numpy as np import torch import torch.nn as nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin from .modeling_wuerstchen_common import AttnBlock, GlobalResponseNorm, TimestepBlock, WuerstchenLayerNorm
18,014
# Copyright (c) 2023 Dominic Rampas MIT License # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class WuerstchenDiffNeXt(ModelMixin, ConfigMixin): @register_to_config def __init__( self, c_in=4, c_out=4, c_r=64, patch_size=2, c_cond=1024, c_hidden=[320, 640, 1280, 1280], nhead=[-1, 10, 20, 20], blocks=[4, 4, 14, 4], level_config=["CT", "CTA", "CTA", "CTA"], inject_effnet=[False, True, True, True], effnet_embd=16, clip_embd=1024, kernel_size=3, dropout=0.1, ): super().__init__() self.c_r = c_r self.c_cond = c_cond if not isinstance(dropout, list): dropout = [dropout] * len(c_hidden) # CONDITIONING self.clip_mapper = nn.Linear(clip_embd, c_cond) self.effnet_mappers = nn.ModuleList( [ nn.Conv2d(effnet_embd, c_cond, kernel_size=1) if inject else None for inject in inject_effnet + list(reversed(inject_effnet)) ] ) self.seq_norm = nn.LayerNorm(c_cond, elementwise_affine=False, eps=1e-6) self.embedding = nn.Sequential( nn.PixelUnshuffle(patch_size), nn.Conv2d(c_in * (patch_size**2), c_hidden[0], kernel_size=1),
# Copyright (c) 2023 Dominic Rampas MIT License # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class WuerstchenDiffNeXt(ModelMixin, ConfigMixin): @register_to_config def __init__( self, c_in=4, c_out=4, c_r=64, patch_size=2, c_cond=1024, c_hidden=[320, 640, 1280, 1280], nhead=[-1, 10, 20, 20], blocks=[4, 4, 14, 4], level_config=["CT", "CTA", "CTA", "CTA"], inject_effnet=[False, True, True, True], effnet_embd=16, clip_embd=1024, kernel_size=3, dropout=0.1, ): super().__init__() self.c_r = c_r self.c_cond = c_cond if not isinstance(dropout, list): dropout = [dropout] * len(c_hidden) # CONDITIONING self.clip_mapper = nn.Linear(clip_embd, c_cond) self.effnet_mappers = nn.ModuleList( [ nn.Conv2d(effnet_embd, c_cond, kernel_size=1) if inject else None for inject in inject_effnet + list(reversed(inject_effnet)) ] ) self.seq_norm = nn.LayerNorm(c_cond, elementwise_affine=False, eps=1e-6) self.embedding = nn.Sequential( nn.PixelUnshuffle(patch_size), nn.Conv2d(c_in * (patch_size**2), c_hidden[0], kernel_size=1),
WuerstchenLayerNorm(c_hidden[0], elementwise_affine=False, eps=1e-6),
6
2023-11-18 01:40:55+00:00
24k
wjun0830/CGDETR
cg_detr/train.py
[ { "identifier": "BaseOptions", "path": "cg_detr/config.py", "snippet": "class BaseOptions(object):\n saved_option_filename = \"opt.json\"\n ckpt_filename = \"model.ckpt\"\n tensorboard_log_dir = \"tensorboard_log\"\n train_log_filename = \"train.log.txt\"\n eval_log_filename = \"eval.log.txt\"\n\n def __init__(self):\n self.parser = None\n self.initialized = False\n self.opt = None\n\n def initialize(self):\n self.initialized = True\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dset_name\", type=str, choices=[\"hl\", 'tvsum', 'charadesSTA', 'tacos', 'nlq','youtube_uni'])\n parser.add_argument(\"--dset_domain\", type=str, \n help=\"Domain to train for tvsum dataset. (Only used for tvsum and youtube-hl)\")\n \n parser.add_argument(\"--eval_split_name\", type=str, default=\"val\",\n help=\"should match keys in video_duration_idx_path, must set for VCMR\")\n parser.add_argument(\"--debug\", action=\"store_true\",\n help=\"debug (fast) mode, break all loops, do not load all data into memory.\")\n parser.add_argument(\"--data_ratio\", type=float, default=1.0,\n help=\"how many training and eval data to use. 1.0: use all, 0.1: use 10%.\"\n \"Use small portion for debug purposes. Note this is different from --debug, \"\n \"which works by breaking the loops, typically they are not used together.\")\n parser.add_argument(\"--results_root\", type=str, default=\"results\")\n parser.add_argument(\"--exp_id\", type=str, default=None, help=\"id of this run, required at training\")\n parser.add_argument(\"--seed\", type=int, default=2018, help=\"random seed\")\n parser.add_argument(\"--device\", type=int, default=0, help=\"0 cuda, -1 cpu\")\n parser.add_argument(\"--num_workers\", type=int, default=0,\n help=\"num subprocesses used to load the data, 0: use main process\")\n parser.add_argument(\"--no_pin_memory\", action=\"store_true\",\n help=\"Don't use pin_memory=True for dataloader. \"\n \"ref: https://discuss.pytorch.org/t/should-we-set-non-blocking-to-true/38234/4\")\n\n # training config\n parser.add_argument(\"--lr\", type=float, default=1e-4, help=\"learning rate\")\n parser.add_argument(\"--lr_drop\", type=int, default=400, help=\"drop learning rate to 1/10 every lr_drop epochs\")\n parser.add_argument(\"--wd\", type=float, default=1e-4, help=\"weight decay\")\n parser.add_argument(\"--n_epoch\", type=int, default=200, help=\"number of epochs to run\")\n parser.add_argument(\"--max_es_cnt\", type=int, default=200,\n help=\"number of epochs to early stop, use -1 to disable early stop\")\n parser.add_argument(\"--bsz\", type=int, default=32, help=\"mini-batch size\")\n parser.add_argument(\"--eval_bsz\", type=int, default=100,\n help=\"mini-batch size at inference, for query\")\n parser.add_argument(\"--eval_epoch\", type=int, default=5,\n help=\"inference epoch\")\n parser.add_argument(\"--grad_clip\", type=float, default=0.1, help=\"perform gradient clip, -1: disable\")\n parser.add_argument(\"--eval_untrained\", action=\"store_true\", help=\"Evaluate on un-trained model\")\n parser.add_argument(\"--resume\", type=str, default=None,\n help=\"checkpoint path to resume or evaluate, without --resume_all this only load weights\")\n parser.add_argument(\"--resume_all\", action=\"store_true\",\n help=\"if --resume_all, load optimizer/scheduler/epoch as well\")\n parser.add_argument(\"--start_epoch\", type=int, default=None,\n help=\"if None, will be set automatically when using --resume_all\")\n\n # Data config\n parser.add_argument(\"--max_q_l\", type=int, default=-1)\n parser.add_argument(\"--max_v_l\", type=int, default=-1)\n parser.add_argument(\"--clip_length\", type=float, default=2)\n parser.add_argument(\"--max_windows\", type=int, default=5)\n\n parser.add_argument(\"--train_path\", type=str, default=None)\n parser.add_argument(\"--eval_path\", type=str, default=None,\n help=\"Evaluating during training, for Dev set. If None, will only do training, \")\n parser.add_argument(\"--no_norm_vfeat\", action=\"store_true\", help=\"Do not do normalize video feat\")\n parser.add_argument(\"--no_norm_tfeat\", action=\"store_true\", help=\"Do not do normalize text feat\")\n parser.add_argument(\"--v_feat_dirs\", type=str, nargs=\"+\",\n help=\"video feature dirs. If more than one, will concat their features. \"\n \"Note that sub ctx features are also accepted here.\")\n parser.add_argument(\"--t_feat_dir\", type=str, help=\"text/query feature dir\")\n parser.add_argument(\"--a_feat_dir\", type=str, help=\"audio feature dir\")\n parser.add_argument(\"--v_feat_dim\", type=int, help=\"video feature dim\")\n parser.add_argument(\"--t_feat_dim\", type=int, help=\"text/query feature dim\")\n parser.add_argument(\"--a_feat_dim\", type=int, help=\"audio feature dim\")\n parser.add_argument(\"--ctx_mode\", type=str, default=\"video_tef\")\n\n # Model config\n parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),\n help=\"Type of positional embedding to use on top of the image features\")\n # * Transformer\n parser.add_argument('--enc_layers', default=3, type=int,\n help=\"Number of encoding layers in the transformer\")\n parser.add_argument('--dec_layers', default=3, type=int,\n help=\"Number of decoding layers in the transformer\")\n parser.add_argument('--t2v_layers', default=2, type=int,\n help=\"Number of decoding layers in the transformer\")\n parser.add_argument('--sent_layers', default=1, type=int,\n help=\"Number of decoding layers in the transformer\")\n parser.add_argument('--moment_layers', default=1, type=int,\n help=\"Number of decoding layers in the transformer\")\n parser.add_argument('--dummy_layers', default=2, type=int,\n help=\"Number of encoding layers in the transformer\")\n parser.add_argument('--dim_feedforward', default=1024, type=int,\n help=\"Intermediate size of the feedforward layers in the transformer blocks\")\n parser.add_argument('--hidden_dim', default=256, type=int,\n help=\"Size of the embeddings (dimension of the transformer)\")\n parser.add_argument('--input_dropout', default=0.5, type=float,\n help=\"Dropout applied in input\")\n parser.add_argument('--dropout', default=0.1, type=float,\n help=\"Dropout applied in the transformer\")\n parser.add_argument(\"--txt_drop_ratio\", default=0, type=float,\n help=\"drop txt_drop_ratio tokens from text input. 0.1=10%\")\n parser.add_argument(\"--use_txt_pos\", action=\"store_true\", help=\"use position_embedding for text as well.\")\n parser.add_argument('--nheads', default=8, type=int,\n help=\"Number of attention heads inside the transformer's attentions\")\n parser.add_argument('--num_queries', default=10, type=int,\n help=\"Number of query slots\")\n parser.add_argument('--num_dummies', default=45, type=int,\n help=\"Number of dummy tokens\")\n parser.add_argument('--total_prompts', default=10, type=int,\n help=\"Number of query slots\")\n parser.add_argument('--num_prompts', default=1, type=int,\n help=\"Number of dummy tokens\")\n parser.add_argument('--pre_norm', action='store_true')\n # other model configs\n parser.add_argument(\"--n_input_proj\", type=int, default=2, help=\"#layers to encoder input\")\n parser.add_argument(\"--contrastive_hdim\", type=int, default=64, help=\"dim for contrastive embeddings\")\n parser.add_argument(\"--temperature\", type=float, default=0.07, help=\"temperature nce contrastive_align_loss\")\n # Loss\n\n parser.add_argument(\"--saliency_margin\", type=float, default=0.2)\n parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',\n help=\"Disables auxiliary decoding losses (loss at each layer)\")\n parser.add_argument(\"--span_loss_type\", default=\"l1\", type=str, choices=['l1', 'ce'],\n help=\"l1: (center-x, width) regression. ce: (st_idx, ed_idx) classification.\")\n parser.add_argument(\"--contrastive_align_loss\", action=\"store_true\",\n help=\"Disable contrastive_align_loss between matched query spans and the text.\")\n # * Matcher\n parser.add_argument('--set_cost_span', default=10, type=float,\n help=\"L1 span coefficient in the matching cost\")\n parser.add_argument('--set_cost_giou', default=1, type=float,\n help=\"giou span coefficient in the matching cost\")\n parser.add_argument('--set_cost_class', default=4, type=float,\n help=\"Class coefficient in the matching cost\")\n\n # * Loss coefficients\n parser.add_argument(\"--lw_saliency\", type=float, default=1.,\n help=\"weight for saliency loss, set to 0 will ignore\")\n parser.add_argument(\"--lw_wattn\", type=float, default=1.,\n help=\"weight for saliency loss, set to 0 will ignore\")\n parser.add_argument(\"--lw_ms_align\", type=float, default=1.,\n help=\"weight for saliency loss, set to 0 will ignore\")\n parser.add_argument(\"--lw_distill\", type=float, default=1.,\n help=\"weight for saliency loss, set to 0 will ignore\")\n parser.add_argument('--span_loss_coef', default=10, type=float)\n parser.add_argument('--giou_loss_coef', default=1, type=float)\n parser.add_argument('--label_loss_coef', default=4, type=float)\n parser.add_argument('--eos_coef', default=0.1, type=float,\n help=\"Relative classification weight of the no-object class\")\n parser.add_argument(\"--contrastive_align_loss_coef\", default=0.0, type=float)\n\n parser.add_argument(\"--no_sort_results\", action=\"store_true\",\n help=\"do not sort results, use this for moment query visualization\")\n parser.add_argument(\"--max_before_nms\", type=int, default=10)\n parser.add_argument(\"--max_after_nms\", type=int, default=10)\n parser.add_argument(\"--conf_thd\", type=float, default=0.0, help=\"only keep windows with conf >= conf_thd\")\n parser.add_argument(\"--nms_thd\", type=float, default=-1,\n help=\"additionally use non-maximum suppression \"\n \"(or non-minimum suppression for distance)\"\n \"to post-processing the predictions. \"\n \"-1: do not use nms. [0, 1]\")\n self.parser = parser\n\n def display_save(self, opt):\n args = vars(opt)\n # Display settings\n print(dict_to_markdown(vars(opt), max_str_len=120))\n # Save settings\n if not isinstance(self, TestOptions):\n option_file_path = os.path.join(opt.results_dir, self.saved_option_filename) # not yaml file indeed\n save_json(args, option_file_path, save_pretty=True)\n\n def parse(self, a_feat_dir=None):\n if not self.initialized:\n self.initialize()\n opt = self.parser.parse_args()\n\n if opt.debug:\n opt.results_root = os.path.sep.join(opt.results_root.split(os.path.sep)[:-1] + [\"debug_results\", ])\n opt.num_workers = 0\n\n if isinstance(self, TestOptions):\n # modify model_dir to absolute path\n # opt.model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"results\", opt.model_dir)\n opt.model_dir = os.path.dirname(opt.resume)\n if a_feat_dir is not None:\n opt.a_feat_dir = a_feat_dir\n saved_options = load_json(os.path.join(opt.model_dir, self.saved_option_filename))\n for arg in saved_options: # use saved options to overwrite all BaseOptions args.\n if arg not in [\"results_root\", \"num_workers\", \"nms_thd\", \"debug\", # \"max_before_nms\", \"max_after_nms\"\n \"max_pred_l\", \"min_pred_l\",\n \"resume\", \"resume_all\", \"no_sort_results\"]:\n setattr(opt, arg, saved_options[arg])\n # opt.no_core_driver = True\n if opt.eval_results_dir is not None:\n opt.results_dir = opt.eval_results_dir\n else:\n if opt.exp_id is None:\n raise ValueError(\"--exp_id is required for at a training option!\")\n\n ctx_str = opt.ctx_mode + \"_sub\" if any([\"sub_ctx\" in p for p in opt.v_feat_dirs]) else opt.ctx_mode\n opt.results_dir = os.path.join(opt.results_root,\n \"-\".join([opt.dset_name, ctx_str, opt.exp_id,\n str(opt.enc_layers) + str(opt.dec_layers) + str(opt.t2v_layers) + str(opt.moment_layers) + str(opt.dummy_layers) + str(opt.sent_layers),\n 'ndum_' + str(opt.num_dummies), 'nprom_' + str(opt.num_prompts) + '_' + str(opt.total_prompts)]))\n mkdirp(opt.results_dir)\n save_fns = ['cg_detr/model.py', 'cg_detr/transformer.py']\n for save_fn in save_fns:\n shutil.copyfile(save_fn, os.path.join(opt.results_dir, os.path.basename(save_fn)))\n\n # save a copy of current code\n code_dir = os.path.dirname(os.path.realpath(__file__))\n code_zip_filename = os.path.join(opt.results_dir, \"code.zip\")\n make_zipfile(code_dir, code_zip_filename,\n enclosing_dir=\"code\",\n exclude_dirs_substring=\"results\",\n exclude_dirs=[\"results\", \"debug_results\", \"__pycache__\"],\n exclude_extensions=[\".pyc\", \".ipynb\", \".swap\"], )\n\n self.display_save(opt)\n\n opt.ckpt_filepath = os.path.join(opt.results_dir, self.ckpt_filename)\n opt.train_log_filepath = os.path.join(opt.results_dir, self.train_log_filename)\n opt.eval_log_filepath = os.path.join(opt.results_dir, self.eval_log_filename)\n opt.tensorboard_log_dir = os.path.join(opt.results_dir, self.tensorboard_log_dir)\n opt.device = torch.device(\"cuda\" if opt.device >= 0 else \"cpu\")\n opt.pin_memory = not opt.no_pin_memory\n\n opt.use_tef = \"tef\" in opt.ctx_mode\n opt.use_video = \"video\" in opt.ctx_mode\n if not opt.use_video:\n opt.v_feat_dim = 0\n if opt.use_tef:\n opt.v_feat_dim += 2\n\n self.opt = opt\n return opt" }, { "identifier": "StartEndDataset", "path": "cg_detr/start_end_dataset.py", "snippet": "class StartEndDataset(Dataset):\n Q_FEAT_TYPES = [\"pooler_output\", \"last_hidden_state\"]\n \"\"\"One line in data loaded from data_path.\"\n {\n \"qid\": 7803,\n \"query\": \"Man in gray top walks from outside to inside.\",\n \"duration\": 150,\n \"vid\": \"RoripwjYFp8_360.0_510.0\",\n \"relevant_clip_ids\": [13, 14, 15, 16, 17],\n \"relevant_windows\": [[26, 36]]\n }\n \"\"\"\n\n def __init__(self, dset_name, data_path, v_feat_dirs, q_feat_dir,\n q_feat_type=\"last_hidden_state\",\n max_q_l=32, max_v_l=75, data_ratio=1.0, ctx_mode=\"video\",\n normalize_v=True, normalize_t=True, load_labels=True,\n clip_len=2, max_windows=5, span_loss_type=\"l1\", txt_drop_ratio=0,\n dset_domain=None):\n self.dset_name = dset_name\n self.data_path = data_path\n self.data_ratio = data_ratio\n self.v_feat_dirs = v_feat_dirs \\\n if isinstance(v_feat_dirs, list) else [v_feat_dirs]\n self.q_feat_dir = q_feat_dir\n self.q_feat_type = q_feat_type\n if max_v_l == -1:\n max_v_l = 100000000\n if max_q_l == -1:\n max_q_l = 100\n self.max_q_l = max_q_l\n self.max_v_l = max_v_l\n self.ctx_mode = ctx_mode\n self.use_tef = \"tef\" in ctx_mode\n self.use_video = \"video\" in ctx_mode\n self.normalize_t = normalize_t\n self.normalize_v = normalize_v\n self.load_labels = load_labels\n self.clip_len = clip_len\n self.max_windows = max_windows # maximum number of windows to use as labels\n self.span_loss_type = span_loss_type\n self.txt_drop_ratio = txt_drop_ratio\n if \"val\" in data_path or \"test\" in data_path:\n assert txt_drop_ratio == 0\n\n\n # checks\n assert q_feat_type in self.Q_FEAT_TYPES\n\n # data\n self.data = self.load_data()\n \n # load specific domain data for tvsum dataset\n if self.dset_name in ['tvsum', 'tvsum_sfc']:\n target_domain = dset_domain\n assert target_domain in [\"BK\", \"BT\", \"DS\", \"FM\", \"GA\", \"MS\", \"PK\", \"PR\", \"VT\", \"VU\"]\n\n new_data = []\n for d in self.data:\n if target_domain == d['domain']:\n new_data.append(d)\n self.data = new_data\n \n # load specific domain data for youtube-hl dataset\n if self.dset_name == 'youtube_uni':\n target_domain = dset_domain\n assert target_domain in [\"dog\", \"gymnastics\", \"parkour\", \"skating\", \"skiing\", \"surfing\"]\n \n new_data = []\n for d in self.data:\n if target_domain == d['domain']:\n new_data.append(d)\n self.data = new_data \n \n self.use_glove = False\n self.use_glove = 'vgg' in self.v_feat_dirs[0]\n\n if self.dset_name == 'charadesSTA' and self.use_glove:\n self.vocab = vocab.pretrained_aliases['glove.6B.300d']()\n self.vocab.itos.extend(['<unk>'])\n self.vocab.stoi['<unk>'] = self.vocab.vectors.shape[0]\n self.vocab.vectors = torch.cat(\n (self.vocab.vectors, torch.zeros(1, self.vocab.dim)), dim=0)\n self.embedding = nn.Embedding.from_pretrained(self.vocab.vectors)\n \n\n def load_data(self):\n datalist = load_jsonl(self.data_path)\n if self.data_ratio != 1:\n n_examples = int(len(datalist) * self.data_ratio)\n datalist = datalist[:n_examples]\n logger.info(\"Using {}% of the data: {} examples\"\n .format(self.data_ratio * 100, n_examples))\n return datalist\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n meta = self.data[index]\n\n model_inputs = dict()\n\n if self.use_glove:\n model_inputs[\"query_feat\"] = self.get_query(meta[\"query\"])\n else:\n model_inputs[\"query_feat\"] = self._get_query_feat_by_qid(meta[\"qid\"]) # (Dq, ) or (Lq, Dq)\n \n if self.use_video:\n model_inputs[\"video_feat\"] = self._get_video_feat_by_vid(meta[\"vid\"]) # (Lv, Dv)\n ctx_l = len(model_inputs[\"video_feat\"])\n else:\n ctx_l = self.max_v_l\n\n\n if self.use_tef:\n tef_st = torch.arange(0, ctx_l, 1.0) / ctx_l\n tef_ed = tef_st + 1.0 / ctx_l\n tef = torch.stack([tef_st, tef_ed], dim=1) # (Lv, 2)\n if self.use_video:\n model_inputs[\"video_feat\"] = torch.cat(\n [model_inputs[\"video_feat\"], tef], dim=1) # (Lv, Dv+2)\n else:\n model_inputs[\"video_feat\"] = tef\n\n\n if self.dset_name in ['tvsum']:\n model_inputs[\"span_labels\"] = torch.tensor([[0., 0.]])\n meta_label = meta['label']\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_all_tvsum(meta_label, ctx_l)\n if len(model_inputs[\"saliency_all_labels\"]) != len(model_inputs[\"video_feat\"]):\n model_inputs[\"video_feat\"] = model_inputs[\"video_feat\"][:len(model_inputs[\"saliency_all_labels\"])]\n\n elif self.dset_name == 'youtube_uni':\n model_inputs[\"span_labels\"] = torch.tensor([[0., 0.]])\n meta_label = meta['label']\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_all_youtube(meta_label, ctx_l)\n else:\n if \"relevant_windows\" in meta: ## For Qvhighlights test set\n model_inputs[\"span_labels\"] = self.get_span_labels(meta[\"relevant_windows\"], ctx_l) # (#windows, 2)\n if self.dset_name in ['charadesSTA', 'tacos', 'activitynet']: ## charades, tacos, nlq\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_sub_as_query(meta[\"relevant_windows\"][0], meta[\"duration\"], ctx_l) # only one gt\n elif self.dset_name in ['nlq']:\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_sub_as_query(meta[\"relevant_windows\"][0], meta[\"duration\"], ctx_l, 2) # only one gt\n elif \"subs_train\" not in self.data_path:\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_all(meta[\"relevant_clip_ids\"], meta[\"saliency_scores\"], ctx_l)\n else:\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\n \"saliency_all_labels\"] = \\\n self.get_saliency_labels_sub_as_query(meta[\"relevant_windows\"][0], meta[\"duration\"], ctx_l) # only one gt\n\n if 'qvhighlight' in self.data_path:\n model_inputs[\"relevant_clip_ids\"] = meta[\"relevant_clip_ids\"]\n model_inputs[\"vid\"] = meta[\"vid\"]\n model_inputs[\"qid\"] = meta[\"qid\"]\n return dict(meta=meta, model_inputs=model_inputs)\n\n def get_query(self, query):\n word_inds = torch.LongTensor(\n [self.vocab.stoi.get(w.lower(), 400000) for w in query.split()])\n return self.embedding(word_inds)\n\n def get_saliency_labels_sub_as_query(self, gt_window, duration, ctx_l, max_n=2):\n clip_len = duration / ctx_l\n gt_st = int(gt_window[0] / clip_len)\n gt_ed = max(0, min(int(gt_window[1] / clip_len), ctx_l) - 1)\n if gt_st > gt_ed:\n gt_st = gt_ed\n\n if gt_st != gt_ed:\n pos_clip_indices = random.sample(range(gt_st, gt_ed + 1), k=max_n)\n else:\n if self.dset_name == 'nlq':\n pos_clip_indices = [gt_st] * 2\n else:\n pos_clip_indices = [gt_st, gt_st]\n\n neg_pool = list(range(0, gt_st)) + list(range(gt_ed+1, ctx_l))\n try:\n neg_clip_indices = random.sample(neg_pool, k=max_n)\n except:\n neg_clip_indices = pos_clip_indices\n\n # For charades_sta\n score_array = np.zeros(ctx_l)\n score_array[gt_st:gt_ed + 1] = 1\n\n return pos_clip_indices, neg_clip_indices, score_array\n \n\n def get_saliency_labels(self, rel_clip_ids, scores, ctx_l, max_n=1, add_easy_negative=True):\n \"\"\"Sum the scores from the three annotations, then take the two clips with the\n maximum scores as positive, and two with the minimum scores as negative.\n Args:\n rel_clip_ids: list(int), list of relevant clip ids\n scores: list([anno1_score, anno2_score, anno3_score]),\n ctx_l: int\n max_n: int, #clips to use as positive and negative, for easy and hard negative, respectively.\n add_easy_negative: bool, if True, sample eay negative outside the relevant_clip_ids.\n \"\"\"\n # indices inside rel_clip_ids\n scores = np.array(scores) # (#rel_clips, 3)\n agg_scores = np.sum(scores, 1) # (#rel_clips, )\n sort_indices = np.argsort(agg_scores) # increasing\n\n # indices in the whole video\n # the min(_, ctx_l-1) here is incorrect, but should not cause\n # much troubles since this should be rarely used.\n hard_pos_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)) - set(rel_clip_ids))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n return pos_clip_indices, neg_clip_indices\n\n def get_saliency_labels_all(self, rel_clip_ids, scores, ctx_l, max_n=1, add_easy_negative=True):\n \"\"\"Sum the scores from the three annotations, then take the two clips with the\n maximum scores as positive, and two with the minimum scores as negative.\n Args:\n rel_clip_ids: list(int), list of relevant clip ids\n scores: list([anno1_score, anno2_score, anno3_score]),\n ctx_l: int\n max_n: int, #clips to use as positive and negative, for easy and hard negative, respectively.\n add_easy_negative: bool, if True, sample eay negative outside the relevant_clip_ids.\n \"\"\"\n # indices inside rel_clip_ids\n scores = np.array(scores) # (#rel_clips, 3)\n agg_scores = np.sum(scores, 1) # (#rel_clips, )\n sort_indices = np.argsort(agg_scores) # increasing\n\n # score_array = [min(agg_scores[idx], ctx_l-1) for idx in range(ctx_l)]\n score_array = np.zeros(ctx_l)\n for idx in range(len(rel_clip_ids)):\n if rel_clip_ids[idx] >= ctx_l:\n score_array_new = np.zeros(ctx_l + 1)\n score_array_new[:ctx_l] = score_array\n score_array = score_array_new\n score_array[rel_clip_ids[idx]] = agg_scores[idx]\n\n # indices in the whole video\n # the min(_, ctx_l-1) here is incorrect, but should not cause\n # much troubles since this should be rarely used.\n hard_pos_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)) - set(rel_clip_ids))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n return pos_clip_indices, neg_clip_indices, score_array\n\n def get_saliency_labels_all_tvsum(self, labels, ctx_l, max_n=1, add_easy_negative=False):\n \n agg_scores = np.sum(labels - np.ones_like(labels), axis=-1)[:ctx_l] # start from 1, so minus 1\n score_array = agg_scores / 80 * 12\n sort_indices = np.argsort(agg_scores) # increasing\n\n hard_pos_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n\n return pos_clip_indices, neg_clip_indices, score_array\n\n def get_saliency_labels_all_youtube(self, labels, ctx_l, max_n=1, add_easy_negative=False):\n \n # Youtube-hl only have binary score\n agg_scores = np.array(labels)[:, 0] # (L, 1) --> (L, )\n score_array = agg_scores * 1\n \n sort_indices = np.argsort(agg_scores) # increasing\n\n hard_pos_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n\n return pos_clip_indices, neg_clip_indices, score_array\n \n \n def get_span_labels(self, windows, ctx_l):\n \"\"\"\n windows: list([st, ed]) in seconds. E.g. [[26, 36]], corresponding st_ed clip_indices [[13, 17]] (inclusive)\n Note a maximum of `self.max_windows` windows are used.\n returns Tensor of shape (#windows, 2), each row is [center, width] normalized by video length\n \"\"\"\n if len(windows) > self.max_windows:\n random.shuffle(windows)\n windows = windows[:self.max_windows]\n if self.span_loss_type == \"l1\":\n windows = torch.Tensor(windows) / (ctx_l * self.clip_len) # normalized windows in xx\n windows = span_xx_to_cxw(windows) # normalized windows in cxw\n elif self.span_loss_type == \"ce\":\n windows = torch.Tensor([\n [int(w[0] / self.clip_len), min(int(w[1] / self.clip_len), ctx_l) - 1]\n for w in windows]).long() # inclusive\n else:\n raise NotImplementedError\n return windows\n\n def _get_query_feat_by_qid(self, qid):\n if self.dset_name == 'tvsum':\n q_feat = np.load(join(self.q_feat_dir, \"{}.npz\".format(qid))) # 'token', 'text'\n return torch.from_numpy(q_feat['token'])\n # youtube-hl\n elif self.dset_name == 'youtube_uni':\n q_feat = np.load(join(self.q_feat_dir, \"{}.npz\".format(qid)))\n return torch.from_numpy(q_feat['last_hidden_state'])\n \n elif self.dset_name in ['tacos', 'nlq']:\n q_feat_path = join(self.q_feat_dir, f\"{qid}.npz\")\n q_feat = np.load(q_feat_path)[self.q_feat_type].astype(np.float32)\n if self.q_feat_type == \"last_hidden_state\":\n q_feat = q_feat[:self.max_q_l]\n if self.normalize_t:\n q_feat = l2_normalize_np_array(q_feat)\n if self.txt_drop_ratio > 0:\n q_feat = self.random_drop_rows(q_feat)\n else:\n # QVhighlight dataset\n q_feat_path = join(self.q_feat_dir, f\"qid{qid}.npz\")\n q_feat = np.load(q_feat_path)[self.q_feat_type].astype(np.float32)\n if self.q_feat_type == \"last_hidden_state\":\n q_feat = q_feat[:self.max_q_l]\n if self.normalize_t:\n q_feat = l2_normalize_np_array(q_feat)\n if self.txt_drop_ratio > 0:\n q_feat = self.random_drop_rows(q_feat)\n return torch.from_numpy(q_feat) # (D, ) or (Lq, D)\n\n def random_drop_rows(self, embeddings):\n \"\"\"randomly mask num_drop rows in embeddings to be zero.\n Args:\n embeddings: np.ndarray (L, D)\n \"\"\"\n num_drop_rows = round(len(embeddings) * self.txt_drop_ratio)\n if num_drop_rows > 0:\n row_indices = np.random.choice(\n len(embeddings), size=num_drop_rows, replace=False)\n embeddings[row_indices] = 0\n return embeddings\n\n def _get_video_feat_by_vid(self, vid):\n if self.dset_name == 'tvsum':\n v_feat_list = []\n for _feat_dir in self.v_feat_dirs:\n _feat_path = join(_feat_dir, f\"{vid}_rgb.npy\")\n _feat_rgb = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n\n _feat_path = join(_feat_dir, f\"{vid}_opt.npy\")\n _feat_opt = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n \n _feat = np.concatenate([_feat_rgb, _feat_opt], axis=-1)\n # _feat = _feat_rgb\n if self.normalize_v:\n _feat = l2_normalize_np_array(_feat)\n v_feat_list.append(_feat)\n # some features are slightly longer than the others\n min_len = min([len(e) for e in v_feat_list])\n v_feat_list = [e[:min_len] for e in v_feat_list]\n v_feat = np.concatenate(v_feat_list, axis=1)\n\n elif self.dset_name == 'youtube_uni':\n v_feat_list = []\n for _feat_dir in self.v_feat_dirs:\n # Only single npz files per directory\n try:\n _feat_path = join(_feat_dir, f\"{vid}.npz\")\n _feat = np.load(_feat_path)[\"features\"][:self.max_v_l].astype(np.float32)\n except:\n _feat_path = join(_feat_dir, f\"{vid}.npy\")\n _feat = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n \n # _feat = _feat_rgb\n if self.normalize_v:\n _feat = l2_normalize_np_array(_feat)\n v_feat_list.append(_feat)\n # some features are slightly longer than the others\n min_len = min([len(e) for e in v_feat_list])\n v_feat_list = [e[:min_len] for e in v_feat_list] # TODO do we need to cut the length over the min_len?\n v_feat = np.concatenate(v_feat_list, axis=1)\n\n else:\n v_feat_list = []\n for _feat_dir in self.v_feat_dirs:\n try:\n _feat_path = join(_feat_dir, f\"{vid}.npz\")\n _feat = np.load(_feat_path)[\"features\"][:self.max_v_l].astype(np.float32)\n except:\n _feat_path = join(_feat_dir, f\"{vid}.npy\")\n _feat = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n if self.normalize_v:\n _feat = l2_normalize_np_array(_feat)\n v_feat_list.append(_feat)\n # some features are slightly longer than the others\n min_len = min([len(e) for e in v_feat_list])\n v_feat_list = [e[:min_len] for e in v_feat_list]\n v_feat = np.concatenate(v_feat_list, axis=1)\n return torch.from_numpy(v_feat) # (Lv, D)" }, { "identifier": "start_end_collate", "path": "cg_detr/start_end_dataset.py", "snippet": "def start_end_collate(batch):\n batch_meta = [e[\"meta\"] for e in batch] # seems no need to collate ?\n\n model_inputs_keys = batch[0][\"model_inputs\"].keys()\n batched_data = dict()\n for k in model_inputs_keys:\n if k == \"span_labels\":\n batched_data[k] = [dict(spans=e[\"model_inputs\"][\"span_labels\"]) for e in batch]\n continue\n if k in [\"saliency_pos_labels\", \"saliency_neg_labels\"]:\n batched_data[k] = torch.LongTensor([e[\"model_inputs\"][k] for e in batch])\n continue\n if k == \"saliency_all_labels\":\n pad_data, mask_data = pad_sequences_1d([e[\"model_inputs\"][k] for e in batch], dtype=np.float32, fixed_length=None)\n batched_data[k] = torch.tensor(pad_data, dtype=torch.float32)\n continue\n if k == 'qid':\n batched_data[k] = [e[\"model_inputs\"][k] for e in batch]\n continue\n if k == 'vid':\n batched_data[k] = [e[\"model_inputs\"][k] for e in batch]\n continue\n batched_data[k] = pad_sequences_1d(\n [e[\"model_inputs\"][k] for e in batch], dtype=torch.float32, fixed_length=None)\n return batch_meta, batched_data" }, { "identifier": "prepare_batch_inputs", "path": "cg_detr/start_end_dataset.py", "snippet": "def prepare_batch_inputs(batched_model_inputs, device, non_blocking=False):\n model_inputs = dict(\n src_txt=batched_model_inputs[\"query_feat\"][0].to(device, non_blocking=non_blocking),\n src_txt_mask=batched_model_inputs[\"query_feat\"][1].to(device, non_blocking=non_blocking),\n src_vid=batched_model_inputs[\"video_feat\"][0].to(device, non_blocking=non_blocking),\n src_vid_mask=batched_model_inputs[\"video_feat\"][1].to(device, non_blocking=non_blocking),\n vid=batched_model_inputs[\"vid\"],\n qid=batched_model_inputs[\"qid\"],\n )\n targets = {}\n\n if \"span_labels\" in batched_model_inputs:\n targets[\"span_labels\"] = [\n dict(spans=e[\"spans\"].to(device, non_blocking=non_blocking))\n for e in batched_model_inputs[\"span_labels\"]\n ]\n if \"saliency_pos_labels\" in batched_model_inputs:\n for name in [\"saliency_pos_labels\", \"saliency_neg_labels\"]:\n targets[name] = batched_model_inputs[name].to(device, non_blocking=non_blocking)\n\n if \"saliency_all_labels\" in batched_model_inputs:\n targets[\"saliency_all_labels\"] = batched_model_inputs[\"saliency_all_labels\"].to(device, non_blocking=non_blocking)\n targets[\"relevant_clips\"] = batched_model_inputs[\"saliency_all_labels\"].to(device, non_blocking=non_blocking)\n targets = None if len(targets) == 0 else targets\n return model_inputs, targets" }, { "identifier": "eval_epoch", "path": "cg_detr/inference.py", "snippet": "def eval_epoch(model, eval_dataset, opt, save_submission_filename, epoch_i=None, criterion=None, tb_writer=None):\n logger.info(\"Generate submissions\")\n model.eval()\n if criterion is not None and eval_dataset.load_labels:\n criterion.eval()\n else:\n criterion = None\n\n if opt.dset_name == 'tacos':\n shuffle = True\n else:\n shuffle = False\n\n eval_loader = DataLoader(\n eval_dataset,\n collate_fn=start_end_collate,\n batch_size=opt.eval_bsz,\n num_workers=opt.num_workers,\n shuffle=shuffle,\n pin_memory=opt.pin_memory\n )\n\n\n # tvsum \n if opt.dset_name in ['tvsum', 'youtube_uni']:\n metrics, eval_loss_meters = compute_hl_results(model, eval_loader, opt, epoch_i, criterion, tb_writer)\n \n # to match original save format\n submission = [\n {\"brief\": metrics}\n ]\n submission_path = os.path.join(opt.results_dir, \"latest_metric.jsonl\")\n save_jsonl(submission, submission_path)\n\n return submission[0], submission[0], eval_loss_meters, [submission_path]\n\n else:\n submission, eval_loss_meters = get_eval_res(model, eval_loader, opt, epoch_i, criterion, tb_writer)\n\n if opt.dset_name in ['charadesSTA', 'tacos', 'nlq']:\n new_submission = []\n for s in submission:\n s.pop('pred_saliency_scores', None)\n new_submission.append(s)\n submission = new_submission\n\n if opt.no_sort_results:\n save_submission_filename = save_submission_filename.replace(\".jsonl\", \"_unsorted.jsonl\")\n metrics, metrics_nms, latest_file_paths = eval_epoch_post_processing(\n submission, opt, eval_dataset.data, save_submission_filename)\n return metrics, metrics_nms, eval_loss_meters, latest_file_paths" }, { "identifier": "start_inference", "path": "cg_detr/inference.py", "snippet": "def start_inference(train_opt=None, split=None, splitfile=None):\n if train_opt is not None:\n opt = TestOptions().parse(train_opt.a_feat_dir)\n else:\n opt = TestOptions().parse()\n if split is not None:\n opt.eval_split_name = split\n if splitfile is not None:\n opt.eval_path = splitfile\n\n print(opt.eval_split_name)\n print(opt.eval_path)\n logger.info(\"Setup config, data and model...\")\n\n\n cudnn.benchmark = True\n cudnn.deterministic = False\n\n assert opt.eval_path is not None\n if opt.eval_split_name == 'val':\n loadlabel = True\n else:\n loadlabel = False\n\n eval_dataset = StartEndDataset(\n dset_name=opt.dset_name,\n data_path=opt.eval_path,\n v_feat_dirs=opt.v_feat_dirs,\n q_feat_dir=opt.t_feat_dir,\n q_feat_type=\"last_hidden_state\",\n max_q_l=opt.max_q_l,\n max_v_l=opt.max_v_l,\n ctx_mode=opt.ctx_mode,\n data_ratio=opt.data_ratio,\n normalize_v=not opt.no_norm_vfeat,\n normalize_t=not opt.no_norm_tfeat,\n clip_len=opt.clip_length,\n max_windows=opt.max_windows,\n load_labels=loadlabel, # opt.eval_split_name == \"val\",\n span_loss_type=opt.span_loss_type,\n txt_drop_ratio=0,\n dset_domain=opt.dset_domain,\n )\n\n\n\n model, criterion, _, _ = setup_model(opt)\n\n save_submission_filename = \"hl_{}_submission.jsonl\".format(\n opt.eval_split_name)\n # save_submission_filename = \"inference_{}_{}_{}_preds.jsonl\".format(\n # opt.dset_name, opt.eval_split_name, opt.eval_id)\n logger.info(\"Starting inference...\")\n with torch.no_grad():\n metrics_no_nms, metrics_nms, eval_loss_meters, latest_file_paths = \\\n eval_epoch(model, eval_dataset, opt, save_submission_filename, criterion=criterion)\n if opt.eval_split_name == 'val':\n logger.info(\"metrics_no_nms {}\".format(pprint.pformat(metrics_no_nms[\"brief\"], indent=4)))\n if metrics_nms is not None:\n logger.info(\"metrics_nms {}\".format(pprint.pformat(metrics_nms[\"brief\"], indent=4)))" }, { "identifier": "setup_model", "path": "cg_detr/inference.py", "snippet": "def setup_model(opt):\n \"\"\"setup model/optimizer/scheduler and load checkpoints when needed\"\"\"\n logger.info(\"setup model/optimizer/scheduler\")\n model, criterion = build_model(opt)\n if opt.device.type == \"cuda\":\n logger.info(\"CUDA enabled.\")\n model.to(opt.device)\n criterion.to(opt.device)\n\n param_dicts = [{\"params\": [p for n, p in model.named_parameters() if p.requires_grad]}]\n optimizer = torch.optim.AdamW(param_dicts, lr=opt.lr, weight_decay=opt.wd)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_drop)\n\n if opt.resume is not None:\n logger.info(f\"Load checkpoint from {opt.resume}\")\n checkpoint = torch.load(opt.resume, map_location=\"cpu\")\n from collections import OrderedDict\n new_state_dict = OrderedDict()\n if 'pt' in opt.resume[:-4]:\n if 'asr' in opt.resume[:25]:\n model.load_state_dict(checkpoint[\"model\"])\n else:\n for k, v in checkpoint[\"model\"].items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\n # model.load_state_dict(checkpoint[\"model\"])\n model.load_state_dict(new_state_dict)\n else:\n model.load_state_dict(checkpoint[\"model\"])\n if opt.resume_all:\n optimizer.load_state_dict(checkpoint['optimizer'])\n lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n opt.start_epoch = checkpoint['epoch'] + 1\n logger.info(f\"Loaded model saved at epoch {checkpoint['epoch']} from checkpoint: {opt.resume}\")\n else:\n logger.warning(\"If you intend to evaluate the model, please specify --resume with ckpt path\")\n\n return model, criterion, optimizer, lr_scheduler" }, { "identifier": "AverageMeter", "path": "utils/basic_utils.py", "snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current/max/min value\"\"\"\n def __init__(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self.max = -1e10\n self.min = 1e10\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self.max = -1e10\n self.min = 1e10\n\n def update(self, val, n=1):\n self.max = max(val, self.max)\n self.min = min(val, self.min)\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count" }, { "identifier": "dict_to_markdown", "path": "utils/basic_utils.py", "snippet": "def dict_to_markdown(d, max_str_len=120):\n # convert list into its str representation\n d = {k: v.__repr__() if isinstance(v, list) else v for k, v in d.items()}\n # truncate string that is longer than max_str_len\n if max_str_len is not None:\n d = {k: v[-max_str_len:] if isinstance(v, str) else v for k, v in d.items()}\n return pd.DataFrame(d, index=[0]).transpose().to_markdown()" }, { "identifier": "count_parameters", "path": "utils/model_utils.py", "snippet": "def count_parameters(model, verbose=True):\n \"\"\"Count number of parameters in PyTorch model,\n References: https://discuss.pytorch.org/t/how-do-i-check-the-number-of-parameters-of-a-model/4325/7.\n\n from utils.utils import count_parameters\n count_parameters(model)\n import sys\n sys.exit(1)\n \"\"\"\n n_all = sum(p.numel() for p in model.parameters())\n n_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)\n if verbose:\n print(\"Parameter Count: all {:,d}; trainable {:,d}\".format(n_all, n_trainable))\n return n_all, n_trainable" } ]
import os import time import json import pprint import random import numpy as np import torch import torch.nn as nn import torch.backends.cudnn as cudnn import logging import sys from tqdm import tqdm, trange from collections import defaultdict from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from cg_detr.config import BaseOptions from cg_detr.start_end_dataset import \ StartEndDataset, start_end_collate, prepare_batch_inputs from cg_detr.inference import eval_epoch, start_inference, setup_model from utils.basic_utils import AverageMeter, dict_to_markdown from utils.model_utils import count_parameters
14,837
if opt.debug: break tb_writer.close() def train_hl(model, criterion, optimizer, lr_scheduler, train_dataset, val_dataset, opt): if opt.device.type == "cuda": logger.info("CUDA enabled.") model.to(opt.device) tb_writer = SummaryWriter(opt.tensorboard_log_dir) tb_writer.add_text("hyperparameters", dict_to_markdown(vars(opt), max_str_len=None)) opt.train_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str}\n" opt.eval_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str} [Metrics] {eval_metrics_str}\n" train_loader = DataLoader( train_dataset, collate_fn=start_end_collate, batch_size=opt.bsz, num_workers=opt.num_workers, shuffle=True, pin_memory=opt.pin_memory ) prev_best_score = 0. es_cnt = 0 # start_epoch = 0 if opt.start_epoch is None: start_epoch = -1 if opt.eval_untrained else 0 else: start_epoch = opt.start_epoch save_submission_filename = "latest_{}_{}_preds.jsonl".format(opt.dset_name, opt.eval_split_name) for epoch_i in trange(start_epoch, opt.n_epoch, desc="Epoch"): if epoch_i > -1: train_epoch(model, criterion, train_loader, optimizer, opt, epoch_i, tb_writer) lr_scheduler.step() eval_epoch_interval = 5 if opt.eval_path is not None and (epoch_i + 1) % eval_epoch_interval == 0: with torch.no_grad(): metrics_no_nms, metrics_nms, eval_loss_meters, latest_file_paths = \ eval_epoch(model, val_dataset, opt, save_submission_filename, epoch_i, criterion, tb_writer) # log to_write = opt.eval_log_txt_formatter.format( time_str=time.strftime("%Y_%m_%d_%H_%M_%S"), epoch=epoch_i, loss_str=" ".join(["{} {:.4f}".format(k, v.avg) for k, v in eval_loss_meters.items()]), eval_metrics_str=json.dumps(metrics_no_nms)) with open(opt.eval_log_filepath, "a") as f: f.write(to_write) logger.info("metrics_no_nms {}".format(pprint.pformat(metrics_no_nms["brief"], indent=4))) if metrics_nms is not None: logger.info("metrics_nms {}".format(pprint.pformat(metrics_nms["brief"], indent=4))) metrics = metrics_no_nms for k, v in metrics["brief"].items(): tb_writer.add_scalar(f"Eval/{k}", float(v), epoch_i+1) # stop_score = metrics["brief"]["MR-full-mAP"] stop_score = metrics["brief"]["mAP"] if stop_score > prev_best_score: es_cnt = 0 prev_best_score = stop_score checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_best.ckpt")) best_file_paths = [e.replace("latest", "best") for e in latest_file_paths] for src, tgt in zip(latest_file_paths, best_file_paths): os.renames(src, tgt) logger.info("The checkpoint file has been updated.") else: es_cnt += 1 if opt.max_es_cnt != -1 and es_cnt > opt.max_es_cnt: # early stop with open(opt.train_log_filepath, "a") as f: f.write(f"Early Stop at epoch {epoch_i}") logger.info(f"\n>>>>> Early stop at epoch {epoch_i} {prev_best_score}\n") break # save ckpt checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_latest.ckpt")) save_interval = 10 if "subs_train" in opt.train_path else 50 # smaller for pretrain if (epoch_i + 1) % save_interval == 0 or (epoch_i + 1) % opt.lr_drop == 0: # additional copies checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", f"_e{epoch_i:04d}.ckpt")) if opt.debug: break tb_writer.close() def start_training(): logger.info("Setup config, data and model...")
logger = logging.getLogger(__name__) logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO) def set_seed(seed, use_cuda=True): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if use_cuda: torch.cuda.manual_seed_all(seed) def train_epoch(model, criterion, train_loader, optimizer, opt, epoch_i, tb_writer): logger.info(f"[Epoch {epoch_i+1}]") model.train() criterion.train() # init meters time_meters = defaultdict(AverageMeter) loss_meters = defaultdict(AverageMeter) num_training_examples = len(train_loader) timer_dataloading = time.time() for batch_idx, batch in tqdm(enumerate(train_loader), desc="Training Iteration", total=num_training_examples): time_meters["dataloading_time"].update(time.time() - timer_dataloading) timer_start = time.time() model_inputs, targets = prepare_batch_inputs(batch[1], opt.device, non_blocking=opt.pin_memory) time_meters["prepare_inputs_time"].update(time.time() - timer_start) timer_start = time.time() outputs = model(**model_inputs, targets=targets) loss_dict = criterion(outputs, targets) weight_dict = criterion.weight_dict losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) time_meters["model_forward_time"].update(time.time() - timer_start) timer_start = time.time() optimizer.zero_grad() losses.backward() if opt.grad_clip > 0: nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip) optimizer.step() time_meters["model_backward_time"].update(time.time() - timer_start) loss_dict["loss_overall"] = float(losses) # for logging only for k, v in loss_dict.items(): loss_meters[k].update(float(v) * weight_dict[k] if k in weight_dict else float(v)) timer_dataloading = time.time() if opt.debug and batch_idx == 3: break # print/add logs tb_writer.add_scalar("Train/lr", float(optimizer.param_groups[0]["lr"]), epoch_i+1) for k, v in loss_meters.items(): tb_writer.add_scalar("Train/{}".format(k), v.avg, epoch_i+1) to_write = opt.train_log_txt_formatter.format( time_str=time.strftime("%Y_%m_%d_%H_%M_%S"), epoch=epoch_i+1, loss_str=" ".join(["{} {:.4f}".format(k, v.avg) for k, v in loss_meters.items()])) with open(opt.train_log_filepath, "a") as f: f.write(to_write) logger.info("Epoch time stats:") for name, meter in time_meters.items(): d = {k: f"{getattr(meter, k):.4f}" for k in ["max", "min", "avg"]} logger.info(f"{name} ==> {d}") def train(model, criterion, optimizer, lr_scheduler, train_dataset, val_dataset, opt): if opt.device.type == "cuda": logger.info("CUDA enabled.") model.to(opt.device) tb_writer = SummaryWriter(opt.tensorboard_log_dir) tb_writer.add_text("hyperparameters", dict_to_markdown(vars(opt), max_str_len=None)) opt.train_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str}\n" opt.eval_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str} [Metrics] {eval_metrics_str}\n" train_loader = DataLoader( train_dataset, collate_fn=start_end_collate, batch_size=opt.bsz, num_workers=opt.num_workers, shuffle=True, pin_memory=opt.pin_memory ) prev_best_score = 0. es_cnt = 0 # start_epoch = 0 if opt.start_epoch is None: start_epoch = -1 if opt.eval_untrained else 0 else: start_epoch = opt.start_epoch save_submission_filename = "latest_{}_{}_preds.jsonl".format(opt.dset_name, opt.eval_split_name) for epoch_i in trange(start_epoch, opt.n_epoch, desc="Epoch"): if epoch_i > -1: train_epoch(model, criterion, train_loader, optimizer, opt, epoch_i, tb_writer) lr_scheduler.step() eval_epoch_interval = opt.eval_epoch if opt.eval_path is not None and (epoch_i + 1) % eval_epoch_interval == 0: with torch.no_grad(): metrics_no_nms, metrics_nms, eval_loss_meters, latest_file_paths = \ eval_epoch(model, val_dataset, opt, save_submission_filename, epoch_i, criterion, tb_writer) # log to_write = opt.eval_log_txt_formatter.format( time_str=time.strftime("%Y_%m_%d_%H_%M_%S"), epoch=epoch_i, loss_str=" ".join(["{} {:.4f}".format(k, v.avg) for k, v in eval_loss_meters.items()]), eval_metrics_str=json.dumps(metrics_no_nms)) with open(opt.eval_log_filepath, "a") as f: f.write(to_write) logger.info("metrics_no_nms {}".format(pprint.pformat(metrics_no_nms["brief"], indent=4))) if metrics_nms is not None: logger.info("metrics_nms {}".format(pprint.pformat(metrics_nms["brief"], indent=4))) metrics = metrics_no_nms for k, v in metrics["brief"].items(): tb_writer.add_scalar(f"Eval/{k}", float(v), epoch_i+1) if opt.dset_name in ['hl']: stop_score = metrics["brief"]["MR-full-mAP"] else: stop_score = (metrics["brief"]["[email protected]"] + metrics["brief"]["[email protected]"]) / 2 if stop_score > prev_best_score: es_cnt = 0 prev_best_score = stop_score checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_best.ckpt")) best_file_paths = [e.replace("latest", "best") for e in latest_file_paths] for src, tgt in zip(latest_file_paths, best_file_paths): os.renames(src, tgt) logger.info("The checkpoint file has been updated.") else: es_cnt += 1 if opt.max_es_cnt != -1 and es_cnt > opt.max_es_cnt: # early stop with open(opt.train_log_filepath, "a") as f: f.write(f"Early Stop at epoch {epoch_i}") logger.info(f"\n>>>>> Early stop at epoch {epoch_i} {prev_best_score}\n") break # save ckpt checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_latest.ckpt")) # save_interval = 10 if "subs_train" in opt.train_path else 50 # smaller for pretrain # if (epoch_i + 1) % save_interval == 0 or (epoch_i + 1) % opt.lr_drop == 0: # additional copies # checkpoint = { # "model": model.state_dict(), # "optimizer": optimizer.state_dict(), # "epoch": epoch_i, # "opt": opt # } # torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", f"_e{epoch_i:04d}.ckpt")) if opt.debug: break tb_writer.close() def train_hl(model, criterion, optimizer, lr_scheduler, train_dataset, val_dataset, opt): if opt.device.type == "cuda": logger.info("CUDA enabled.") model.to(opt.device) tb_writer = SummaryWriter(opt.tensorboard_log_dir) tb_writer.add_text("hyperparameters", dict_to_markdown(vars(opt), max_str_len=None)) opt.train_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str}\n" opt.eval_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str} [Metrics] {eval_metrics_str}\n" train_loader = DataLoader( train_dataset, collate_fn=start_end_collate, batch_size=opt.bsz, num_workers=opt.num_workers, shuffle=True, pin_memory=opt.pin_memory ) prev_best_score = 0. es_cnt = 0 # start_epoch = 0 if opt.start_epoch is None: start_epoch = -1 if opt.eval_untrained else 0 else: start_epoch = opt.start_epoch save_submission_filename = "latest_{}_{}_preds.jsonl".format(opt.dset_name, opt.eval_split_name) for epoch_i in trange(start_epoch, opt.n_epoch, desc="Epoch"): if epoch_i > -1: train_epoch(model, criterion, train_loader, optimizer, opt, epoch_i, tb_writer) lr_scheduler.step() eval_epoch_interval = 5 if opt.eval_path is not None and (epoch_i + 1) % eval_epoch_interval == 0: with torch.no_grad(): metrics_no_nms, metrics_nms, eval_loss_meters, latest_file_paths = \ eval_epoch(model, val_dataset, opt, save_submission_filename, epoch_i, criterion, tb_writer) # log to_write = opt.eval_log_txt_formatter.format( time_str=time.strftime("%Y_%m_%d_%H_%M_%S"), epoch=epoch_i, loss_str=" ".join(["{} {:.4f}".format(k, v.avg) for k, v in eval_loss_meters.items()]), eval_metrics_str=json.dumps(metrics_no_nms)) with open(opt.eval_log_filepath, "a") as f: f.write(to_write) logger.info("metrics_no_nms {}".format(pprint.pformat(metrics_no_nms["brief"], indent=4))) if metrics_nms is not None: logger.info("metrics_nms {}".format(pprint.pformat(metrics_nms["brief"], indent=4))) metrics = metrics_no_nms for k, v in metrics["brief"].items(): tb_writer.add_scalar(f"Eval/{k}", float(v), epoch_i+1) # stop_score = metrics["brief"]["MR-full-mAP"] stop_score = metrics["brief"]["mAP"] if stop_score > prev_best_score: es_cnt = 0 prev_best_score = stop_score checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_best.ckpt")) best_file_paths = [e.replace("latest", "best") for e in latest_file_paths] for src, tgt in zip(latest_file_paths, best_file_paths): os.renames(src, tgt) logger.info("The checkpoint file has been updated.") else: es_cnt += 1 if opt.max_es_cnt != -1 and es_cnt > opt.max_es_cnt: # early stop with open(opt.train_log_filepath, "a") as f: f.write(f"Early Stop at epoch {epoch_i}") logger.info(f"\n>>>>> Early stop at epoch {epoch_i} {prev_best_score}\n") break # save ckpt checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_latest.ckpt")) save_interval = 10 if "subs_train" in opt.train_path else 50 # smaller for pretrain if (epoch_i + 1) % save_interval == 0 or (epoch_i + 1) % opt.lr_drop == 0: # additional copies checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", f"_e{epoch_i:04d}.ckpt")) if opt.debug: break tb_writer.close() def start_training(): logger.info("Setup config, data and model...")
opt = BaseOptions().parse()
0
2023-11-10 12:45:25+00:00
24k
ej0cl6/TextEE
TextEE/models/OneIE/E2Etrainer.py
[ { "identifier": "BasicTrainer", "path": "TextEE/models/trainer.py", "snippet": "class BasicTrainer(object):\n def __init__(self, config, type_set=None):\n self.config = config\n self.type_set = type_set\n \n @classmethod\n def add_extra_info_fn(cls, instances, raw_data, config):\n for instance in instances:\n instance[\"extra_info\"] = None\n return instances\n \n def load_model(self, checkpoint=None):\n pass\n \n def train(self, train_data, dev_data, **kwargs):\n pass\n \n def predict(self, data, **kwargs):\n pass" }, { "identifier": "OneIEE2EModel", "path": "TextEE/models/OneIE/E2Emodel.py", "snippet": "class OneIEE2EModel(nn.Module):\n def __init__(self,\n config,\n vocabs,\n valid_patterns=None):\n super().__init__()\n\n # vocabularies\n self.vocabs = vocabs\n self.entity_label_stoi = vocabs['entity_label']\n self.trigger_label_stoi = vocabs['trigger_label']\n self.mention_type_stoi = vocabs['mention_type']\n self.entity_type_stoi = vocabs['entity_type']\n self.event_type_stoi = vocabs['event_type']\n self.relation_type_stoi = vocabs['relation_type']\n self.role_type_stoi = vocabs['role_type']\n self.entity_label_itos = {i:s for s, i in self.entity_label_stoi.items()}\n self.trigger_label_itos = {i:s for s, i in self.trigger_label_stoi.items()}\n self.entity_type_itos = {i: s for s, i in self.entity_type_stoi.items()}\n self.event_type_itos = {i: s for s, i in self.event_type_stoi.items()}\n self.relation_type_itos = {i: s for s, i in self.relation_type_stoi.items()}\n self.role_type_itos = {i: s for s, i in self.role_type_stoi.items()}\n self.entity_label_num = len(self.entity_label_stoi)\n self.trigger_label_num = len(self.trigger_label_stoi)\n self.mention_type_num = len(self.mention_type_stoi)\n self.entity_type_num = len(self.entity_type_stoi)\n self.event_type_num = len(self.event_type_stoi)\n self.relation_type_num = len(self.relation_type_stoi)\n self.role_type_num = len(self.role_type_stoi)\n self.valid_relation_entity = None\n self.valid_event_role = None\n self.valid_role_entity = None\n if valid_patterns:\n self.valid_event_role = valid_patterns['event_role']\n self.valid_relation_entity = valid_patterns['relation_entity']\n self.valid_role_entity = valid_patterns['role_entity']\n self.relation_directional = config.relation_directional\n self.symmetric_relations = config.symmetric_relations\n self.symmetric_relation_idxs = {self.relation_type_stoi[r]\n for r in self.symmetric_relations}\n\n # BERT encoder\n self.pretrained_model_name = config.pretrained_model_name\n self.cache_dir = config.cache_dir\n if self.pretrained_model_name.startswith('bert-'):\n self.bert = BertModel.from_pretrained(self.pretrained_model_name,\n cache_dir=self.cache_dir,\n output_hidden_states=True)\n self.bert_config = BertConfig.from_pretrained(self.pretrained_model_name,\n cache_dir=self.cache_dir)\n elif self.pretrained_model_name.startswith('roberta-'):\n self.bert = RobertaModel.from_pretrained(self.pretrained_model_name,\n cache_dir=self.cache_dir,\n output_hidden_states=True)\n self.bert_config = RobertaConfig.from_pretrained(self.pretrained_model_name,\n cache_dir=self.cache_dir)\n elif self.pretrained_model_name.startswith('xlm-'):\n self.bert = XLMRobertaModel.from_pretrained(self.pretrained_model_name,\n cache_dir=self.cache_dir,\n output_hidden_states=True)\n self.bert_config = XLMRobertaConfig.from_pretrained(self.pretrained_model_name,\n cache_dir=self.cache_dir) \n else:\n raise ValueError\n self.bert_dim = self.bert_config.hidden_size\n self.extra_bert = config.extra_bert\n self.use_extra_bert = config.use_extra_bert\n if self.use_extra_bert:\n self.bert_dim *= 2\n self.bert_dropout = nn.Dropout(p=config.bert_dropout)\n self.multi_piece = config.multi_piece_strategy\n # local classifiers\n self.use_entity_type = config.use_entity_type\n self.binary_dim = self.bert_dim * 2\n linear_bias = config.linear_bias\n linear_dropout = config.linear_dropout\n entity_hidden_num = config.entity_hidden_num\n mention_hidden_num = config.mention_hidden_num\n event_hidden_num = config.event_hidden_num\n relation_hidden_num = config.relation_hidden_num\n role_hidden_num = config.role_hidden_num\n role_input_dim = self.binary_dim + (self.entity_type_num if self.use_entity_type else 0)\n self.entity_label_ffn = nn.Linear(self.bert_dim, self.entity_label_num,\n bias=linear_bias)\n self.trigger_label_ffn = nn.Linear(self.bert_dim, self.trigger_label_num,\n bias=linear_bias)\n self.entity_type_ffn = Linears([self.bert_dim, entity_hidden_num,\n self.entity_type_num],\n dropout_prob=linear_dropout,\n bias=linear_bias,\n activation=config.linear_activation)\n self.mention_type_ffn = Linears([self.bert_dim, mention_hidden_num,\n self.mention_type_num],\n dropout_prob=linear_dropout,\n bias=linear_bias,\n activation=config.linear_activation)\n self.event_type_ffn = Linears([self.bert_dim, event_hidden_num,\n self.event_type_num],\n dropout_prob=linear_dropout,\n bias=linear_bias,\n activation=config.linear_activation)\n self.relation_type_ffn = Linears([self.binary_dim, relation_hidden_num,\n self.relation_type_num],\n dropout_prob=linear_dropout,\n bias=linear_bias,\n activation=config.linear_activation)\n self.role_type_ffn = Linears([role_input_dim, role_hidden_num,\n self.role_type_num],\n dropout_prob=linear_dropout,\n bias=linear_bias,\n activation=config.linear_activation)\n # global features\n self.use_global_features = config.use_global_features\n self.global_features = config.global_features\n self.global_feature_maps = generate_global_feature_maps(vocabs, valid_patterns)\n self.global_feature_num = sum(len(m) for k, m in self.global_feature_maps.items()\n if k in self.global_features or\n not self.global_features)\n self.global_feature_weights = nn.Parameter(\n torch.zeros(self.global_feature_num).fill_(-0.0001))\n # decoder\n self.beam_size = config.beam_size\n self.beta_v = config.beta_v\n self.beta_e = config.beta_e\n # loss functions\n self.entity_criteria = torch.nn.CrossEntropyLoss()\n self.event_criteria = torch.nn.CrossEntropyLoss()\n self.mention_criteria = torch.nn.CrossEntropyLoss()\n self.relation_criteria = torch.nn.CrossEntropyLoss()\n self.role_criteria = torch.nn.CrossEntropyLoss()\n # others\n self.entity_crf = CRF(self.entity_label_stoi, bioes=False)\n self.trigger_crf = CRF(self.trigger_label_stoi, bioes=False)\n self.pad_vector = nn.Parameter(torch.randn(1, 1, self.bert_dim))\n\n def encode(self, piece_idxs, attention_masks, token_lens):\n \"\"\"Encode input sequences with BERT\n :param piece_idxs (LongTensor): word pieces indices\n :param attention_masks (FloatTensor): attention mask\n :param token_lens (list): token lengths\n \"\"\"\n batch_size, _ = piece_idxs.size()\n all_bert_outputs = self.bert(piece_idxs, attention_mask=attention_masks)\n bert_outputs = all_bert_outputs[0]\n\n if self.use_extra_bert:\n extra_bert_outputs = all_bert_outputs[2][self.extra_bert]\n bert_outputs = torch.cat([bert_outputs, extra_bert_outputs], dim=2)\n\n if self.multi_piece == 'first':\n # select the first piece for multi-piece words\n offsets = token_lens_to_offsets(token_lens)\n offsets = piece_idxs.new(offsets)\n # + 1 because the first vector is for [CLS]\n offsets = offsets.unsqueeze(-1).expand(batch_size, -1, self.bert_dim) + 1\n bert_outputs = torch.gather(bert_outputs, 1, offsets)\n elif self.multi_piece == 'average':\n # average all pieces for multi-piece words\n idxs, masks, token_num, token_len = token_lens_to_idxs(token_lens)\n idxs = piece_idxs.new(idxs).unsqueeze(-1).expand(batch_size, -1, self.bert_dim) + 1\n masks = bert_outputs.new(masks).unsqueeze(-1)\n bert_outputs = torch.gather(bert_outputs, 1, idxs) * masks\n bert_outputs = bert_outputs.view(batch_size, token_num, token_len, self.bert_dim)\n bert_outputs = bert_outputs.sum(2)\n else:\n raise ValueError('Unknown multi-piece token handling strategy: {}'\n .format(self.multi_piece))\n bert_outputs = self.bert_dropout(bert_outputs)\n return bert_outputs\n\n def scores(self, bert_outputs, graphs, entity_types_onehot=None,\n predict=False, gold_tri=False, gold_ent=False):\n (\n entity_idxs, entity_masks, entity_num, entity_len,\n trigger_idxs, trigger_masks, trigger_num, trigger_len,\n ) = graphs_to_node_idxs(graphs)\n\n batch_size, _, bert_dim = bert_outputs.size()\n\n entity_idxs = bert_outputs.new_tensor(entity_idxs, dtype=torch.long)\n trigger_idxs = bert_outputs.new_tensor(trigger_idxs, dtype=torch.long)\n entity_masks = bert_outputs.new_tensor(entity_masks)\n trigger_masks = bert_outputs.new_tensor(trigger_masks)\n\n # entity type scores\n entity_idxs = entity_idxs.unsqueeze(-1).expand(-1, -1, bert_dim)\n entity_masks = entity_masks.unsqueeze(-1).expand(-1, -1, bert_dim)\n entity_words = torch.gather(bert_outputs, 1, entity_idxs)\n entity_words = entity_words * entity_masks\n entity_words = entity_words.view(batch_size, entity_num, entity_len, bert_dim)\n entity_reprs = entity_words.sum(2)\n entity_type_scores = self.entity_type_ffn(entity_reprs)\n\n # mention type scores\n mention_type_scores = self.mention_type_ffn(entity_reprs)\n\n # trigger type scores\n trigger_idxs = trigger_idxs.unsqueeze(-1).expand(-1, -1, bert_dim)\n trigger_masks = trigger_masks.unsqueeze(-1).expand(-1, -1, bert_dim)\n trigger_words = torch.gather(bert_outputs, 1, trigger_idxs)\n trigger_words = trigger_words * trigger_masks\n trigger_words = trigger_words.view(batch_size, trigger_num, trigger_len, bert_dim)\n trigger_reprs = trigger_words.sum(2)\n event_type_scores = self.event_type_ffn(trigger_reprs)\n \n # Add for gold entity given case:\n # The idea is to make the gold entities' score become very high\n if gold_ent:\n for graph, entity_type_score in zip(graphs, entity_type_scores):\n for ent, score in zip(graph.entities, entity_type_score):\n score[ent[2]] = 10000 \n # Add for gold trigger given case:\n # The idea is to make the gold triggers' score become very high\n if gold_tri:\n for graph, event_type_score in zip(graphs, event_type_scores):\n for trig, score in zip(graph.triggers, event_type_score):\n score[trig[2]] = 10000\n\n # relation type score\n ee_idxs = generate_pairwise_idxs(entity_num, entity_num)\n ee_idxs = entity_idxs.new(ee_idxs)\n ee_idxs = ee_idxs.unsqueeze(0).unsqueeze(-1).expand(batch_size, -1, bert_dim)\n ee_reprs = torch.cat([entity_reprs, entity_reprs], dim=1)\n ee_reprs = torch.gather(ee_reprs, 1, ee_idxs)\n ee_reprs = ee_reprs.view(batch_size, -1, 2 * bert_dim)\n relation_type_scores = self.relation_type_ffn(ee_reprs)\n\n # role type score\n te_idxs = generate_pairwise_idxs(trigger_num, entity_num)\n te_idxs = entity_idxs.new(te_idxs)\n te_idxs = te_idxs.unsqueeze(0).unsqueeze(-1).expand(batch_size, -1, bert_dim)\n te_reprs = torch.cat([trigger_reprs, entity_reprs], dim=1)\n te_reprs = torch.gather(te_reprs, 1, te_idxs)\n te_reprs = te_reprs.view(batch_size, -1, 2 * bert_dim)\n\n if self.use_entity_type:\n if predict:\n entity_type_scores_softmax = entity_type_scores.softmax(dim=2)\n entity_type_scores_softmax = entity_type_scores_softmax.repeat(1, trigger_num, 1)\n te_reprs = torch.cat([te_reprs, entity_type_scores_softmax], dim=2)\n else:\n entity_types_onehot = entity_types_onehot.repeat(1, trigger_num, 1)\n te_reprs = torch.cat([te_reprs, entity_types_onehot], dim=2)\n role_type_scores = self.role_type_ffn(te_reprs)\n\n return (entity_type_scores, mention_type_scores, event_type_scores,\n relation_type_scores, role_type_scores)\n\n def forward(self, batch):\n # encoding\n bert_outputs = self.encode(batch.piece_idxs,\n batch.attention_masks,\n batch.token_lens)\n batch_size, _, _ = bert_outputs.size()\n # entity type indices -> one hot\n entity_types = batch.entity_type_idxs.view(batch_size, -1)\n entity_types = torch.clamp(entity_types, min=0)\n entity_types_onehot = bert_outputs.new_zeros(*entity_types.size(),\n self.entity_type_num)\n entity_types_onehot.scatter_(2, entity_types.unsqueeze(-1), 1)\n # identification\n entity_label_scores = self.entity_label_ffn(bert_outputs)\n trigger_label_scores = self.trigger_label_ffn(bert_outputs)\n\n entity_label_scores = self.entity_crf.pad_logits(entity_label_scores)\n entity_label_loglik = self.entity_crf.loglik(entity_label_scores,\n batch.entity_label_idxs,\n batch.token_nums)\n trigger_label_scores = self.trigger_crf.pad_logits(trigger_label_scores)\n trigger_label_loglik = self.trigger_crf.loglik(trigger_label_scores,\n batch.trigger_label_idxs,\n batch.token_nums)\n # classification\n scores = self.scores(bert_outputs, batch.graphs, entity_types_onehot)\n (\n entity_type_scores, mention_type_scores, event_type_scores,\n relation_type_scores, role_type_scores\n ) = scores\n entity_type_scores = entity_type_scores.view(-1, self.entity_type_num)\n event_type_scores = event_type_scores.view(-1, self.event_type_num)\n relation_type_scores = relation_type_scores.view(-1, self.relation_type_num)\n role_type_scores = role_type_scores.view(-1, self.role_type_num)\n mention_type_scores = mention_type_scores.view(-1, self.mention_type_num)\n classification_loss = self.entity_criteria(entity_type_scores,\n batch.entity_type_idxs) + \\\n self.event_criteria(event_type_scores,\n batch.event_type_idxs) + \\\n self.role_criteria(role_type_scores,\n batch.role_type_idxs) + \\\n self.relation_criteria(relation_type_scores,\n batch.relation_type_idxs) + \\\n self.mention_criteria(mention_type_scores,\n batch.mention_type_idxs)\n\n loss = classification_loss - entity_label_loglik.mean() - trigger_label_loglik.mean()\n\n # global features\n if self.use_global_features:\n gold_scores = self.compute_graph_scores(batch.graphs, scores)\n top_graphs = self.generate_locally_top_graphs(batch.graphs, scores)\n top_scores = self.compute_graph_scores(top_graphs, scores)\n global_loss = (top_scores - gold_scores).clamp(min=0)\n loss = loss + global_loss.mean()\n return loss\n\n def predict(self, batch, gold_tri=False, gold_ent=False):\n self.eval()\n with torch.no_grad():\n bert_outputs = self.encode(batch.piece_idxs,\n batch.attention_masks,\n batch.token_lens)\n batch_size, _, _ = bert_outputs.size()\n\n # identification\n entity_label_scores = self.entity_label_ffn(bert_outputs)\n entity_label_scores = self.entity_crf.pad_logits(entity_label_scores)\n trigger_label_scores = self.trigger_label_ffn(bert_outputs)\n trigger_label_scores = self.trigger_crf.pad_logits(trigger_label_scores)\n _, entity_label_preds = self.entity_crf.viterbi_decode(entity_label_scores,\n batch.token_nums)\n _, trigger_label_preds = self.trigger_crf.viterbi_decode(trigger_label_scores,\n batch.token_nums)\n entities = tag_paths_to_spans(entity_label_preds,\n batch.token_nums,\n self.entity_label_stoi)\n triggers = tag_paths_to_spans(trigger_label_preds,\n batch.token_nums,\n self.trigger_label_stoi)\n \n # Add for gold trigger/ gold entity given case.\n if gold_tri:\n triggers = [[list(trigger) for trigger in graph.triggers] for graph in batch.graphs]\n if gold_ent:\n entities = [[list(entity) for entity in graph.entities] for graph in batch.graphs]\n\n node_graphs = [Graph(e, t, [], [], self.vocabs)\n for e, t in zip(entities, triggers)]\n scores = self.scores(bert_outputs, node_graphs, predict=True, gold_tri=gold_tri, gold_ent=gold_ent)\n max_entity_num = max(max(len(seq_entities) for seq_entities in entities), 1)\n\n batch_graphs = []\n # Decode each sentence in the batch\n for i in range(batch_size):\n seq_entities, seq_triggers = entities[i], triggers[i]\n spans = sorted([(*i, True) for i in seq_entities] +\n [(*i, False) for i in seq_triggers],\n key=lambda x: (x[0], x[1], not x[-1]))\n entity_num, trigger_num = len(seq_entities), len(seq_triggers)\n if entity_num == 0 and trigger_num == 0:\n # skip decoding\n batch_graphs.append(Graph.empty_graph(self.vocabs))\n continue\n graph = self.decode(spans,\n entity_type_scores=scores[0][i],\n mention_type_scores=scores[1][i],\n event_type_scores=scores[2][i],\n relation_type_scores=scores[3][i],\n role_type_scores=scores[4][i],\n entity_num=max_entity_num)\n batch_graphs.append(graph)\n\n self.train()\n return batch_graphs\n\n def compute_graph_scores(self, graphs, scores):\n (\n entity_type_scores, _mention_type_scores,\n trigger_type_scores, relation_type_scores,\n role_type_scores\n ) = scores\n label_idxs = graphs_to_label_idxs(graphs)\n label_idxs = [entity_type_scores.new_tensor(idx,\n dtype=torch.long if i % 2 == 0\n else torch.float)\n for i, idx in enumerate(label_idxs)]\n (\n entity_idxs, entity_mask, trigger_idxs, trigger_mask,\n relation_idxs, relation_mask, role_idxs, role_mask\n ) = label_idxs\n # Entity score\n entity_idxs = entity_idxs.unsqueeze(-1)\n entity_scores = torch.gather(entity_type_scores, 2, entity_idxs)\n entity_scores = entity_scores.squeeze(-1) * entity_mask\n entity_score = entity_scores.sum(1)\n # Trigger score\n trigger_idxs = trigger_idxs.unsqueeze(-1)\n trigger_scores = torch.gather(trigger_type_scores, 2, trigger_idxs)\n trigger_scores = trigger_scores.squeeze(-1) * trigger_mask\n trigger_score = trigger_scores.sum(1)\n # Relation score\n relation_idxs = relation_idxs.unsqueeze(-1)\n relation_scores = torch.gather(relation_type_scores, 2, relation_idxs)\n relation_scores = relation_scores.squeeze(-1) * relation_mask\n relation_score = relation_scores.sum(1)\n # Role score\n role_idxs = role_idxs.unsqueeze(-1)\n role_scores = torch.gather(role_type_scores, 2, role_idxs)\n role_scores = role_scores.squeeze(-1) * role_mask\n role_score = role_scores.sum(1)\n\n score = entity_score + trigger_score + role_score + relation_score\n\n global_vectors = [generate_global_feature_vector(g, self.global_feature_maps, features=self.global_features)\n for g in graphs]\n global_vectors = entity_scores.new_tensor(global_vectors)\n global_weights = self.global_feature_weights.unsqueeze(0).expand_as(global_vectors)\n global_score = (global_vectors * global_weights).sum(1)\n score = score + global_score\n\n return score\n\n def generate_locally_top_graphs(self, graphs, scores):\n (\n entity_type_scores, _mention_type_scores,\n trigger_type_scores, relation_type_scores,\n role_type_scores\n ) = scores\n max_entity_num = max(max([g.entity_num for g in graphs]), 1)\n top_graphs = []\n for graph_idx, graph in enumerate(graphs):\n entity_num = graph.entity_num\n trigger_num = graph.trigger_num\n _, top_entities = entity_type_scores[graph_idx].max(1)\n top_entities = top_entities.tolist()[:entity_num]\n top_entities = [(i, j, k) for (i, j, _), k in\n zip(graph.entities, top_entities)]\n _, top_triggers = trigger_type_scores[graph_idx].max(1)\n top_triggers = top_triggers.tolist()[:trigger_num]\n top_triggers = [(i, j, k) for (i, j, _), k in\n zip(graph.triggers, top_triggers)]\n \n top_relation_scores, top_relation_labels = relation_type_scores[graph_idx].max(1)\n top_relation_scores = top_relation_scores.tolist()\n top_relation_labels = top_relation_labels.tolist()\n top_relations = [(i, j) for i, j in zip(top_relation_scores, top_relation_labels)]\n top_relation_list = []\n for i in range(entity_num):\n for j in range(entity_num):\n if i < j:\n score_1, label_1 = top_relations[i * max_entity_num + j]\n score_2, label_2 = top_relations[j * max_entity_num + i]\n if score_1 > score_2 and label_1 != 0:\n top_relation_list.append((i, j, label_1))\n if score_2 > score_1 and label_2 != 0: \n top_relation_list.append((j, i, label_2))\n\n _, top_roles = role_type_scores[graph_idx].max(1)\n top_roles = top_roles.tolist()\n top_roles = [(i, j, top_roles[i * max_entity_num + j])\n for i in range(trigger_num) for j in range(entity_num)\n if top_roles[i * max_entity_num + j] != 0]\n top_graphs.append(Graph(\n entities=top_entities,\n triggers=top_triggers,\n # relations=top_relations,\n relations=top_relation_list,\n roles=top_roles,\n vocabs=graph.vocabs\n ))\n return top_graphs\n\n def trim_beam_set(self, beam_set, beam_size):\n if len(beam_set) > beam_size:\n beam_set.sort(key=lambda x: self.compute_graph_score(x), reverse=True)\n beam_set = beam_set[:beam_size]\n return beam_set\n\n def compute_graph_score(self, graph):\n score = graph.graph_local_score\n if self.use_global_features:\n global_vector = generate_global_feature_vector(graph,\n self.global_feature_maps,\n features=self.global_features)\n global_vector = self.global_feature_weights.new_tensor(global_vector)\n global_score = global_vector.dot(self.global_feature_weights).item()\n score = score + global_score\n return score\n\n def decode(self,\n spans,\n entity_type_scores,\n mention_type_scores,\n event_type_scores,\n relation_type_scores,\n role_type_scores,\n entity_num):\n beam_set = [Graph.empty_graph(self.vocabs)]\n entity_idx, trigger_idx = 0, 0\n\n for start, end, _, is_entity_node in spans:\n # 1. node step\n if is_entity_node:\n node_scores = entity_type_scores[entity_idx].tolist()\n else:\n node_scores = event_type_scores[trigger_idx].tolist()\n node_scores_norm = normalize_score(node_scores)\n node_scores = [(s, i, n) for i, (s, n) in enumerate(zip(node_scores,\n node_scores_norm))]\n node_scores.sort(key=lambda x: x[0], reverse=True)\n top_node_scores = node_scores[:self.beta_v]\n\n beam_set_ = []\n for graph in beam_set:\n for score, label, score_norm in top_node_scores:\n graph_ = graph.copy()\n if is_entity_node:\n graph_.add_entity(start, end, label, score, score_norm)\n else:\n graph_.add_trigger(start, end, label, score, score_norm)\n beam_set_.append(graph_)\n beam_set = beam_set_\n\n # 2. edge step\n if is_entity_node:\n # add a new entity: new relations, new argument roles\n for i in range(entity_idx):\n # add relation edges\n edge_scores_1 = relation_type_scores[i * entity_num + entity_idx].tolist()\n edge_scores_2 = relation_type_scores[entity_idx * entity_num + i].tolist()\n edge_scores_norm_1 = normalize_score(edge_scores_1)\n edge_scores_norm_2 = normalize_score(edge_scores_2)\n\n if self.relation_directional:\n edge_scores = [(max(s1, s2), n2 if s1 < s2 else n1, i, s1 < s2)\n for i, (s1, s2, n1, n2)\n in enumerate(zip(edge_scores_1, edge_scores_2,\n edge_scores_norm_1,\n edge_scores_norm_2))]\n null_score = edge_scores[0][0]\n edge_scores.sort(key=lambda x: x[0], reverse=True)\n top_edge_scores = edge_scores[:self.beta_e]\n else:\n edge_scores = [(max(s1, s2), n2 if s1 < n2 else n1, i, False)\n for i, (s1, s2, n1, n2)\n in enumerate(zip(edge_scores_1, edge_scores_2,\n edge_scores_norm_1,\n edge_scores_norm_2))]\n null_score = edge_scores[0][0]\n edge_scores.sort(key=lambda x: x[0], reverse=True)\n top_edge_scores = edge_scores[:self.beta_e]\n\n beam_set_ = []\n for graph in beam_set:\n has_valid_edge = False\n for score, score_norm, label, inverse in top_edge_scores:\n rel_cur_ent = label * 1000 + graph.entities[-1][-1]\n rel_pre_ent = label * 1000 + graph.entities[i][-1]\n if self.valid_relation_entity is not None and self.valid_relation_entity is not None:\n if label == 0 or (rel_pre_ent in self.valid_relation_entity and\n rel_cur_ent in self.valid_relation_entity):\n graph_ = graph.copy()\n if self.relation_directional and inverse:\n graph_.add_relation(entity_idx, i, label, score, score_norm)\n else:\n graph_.add_relation(i, entity_idx, label, score, score_norm)\n beam_set_.append(graph_)\n has_valid_edge = True\n else:\n if label == 0:\n graph_ = graph.copy()\n if self.relation_directional and inverse:\n graph_.add_relation(entity_idx, i, label, score, score_norm)\n else:\n graph_.add_relation(i, entity_idx, label, score, score_norm)\n beam_set_.append(graph_)\n has_valid_edge = True\n if not has_valid_edge:\n graph_ = graph.copy()\n graph_.add_relation(i, entity_idx, 0, null_score)\n beam_set_.append(graph_)\n beam_set = beam_set_\n if len(beam_set) > 200:\n beam_set = self.trim_beam_set(beam_set, self.beam_size)\n\n for i in range(trigger_idx):\n # add argument role edges\n edge_scores = role_type_scores[i * entity_num + entity_idx].tolist()\n edge_scores_norm = normalize_score(edge_scores)\n edge_scores = [(s, i, n) for i, (s, n) in enumerate(zip(edge_scores, edge_scores_norm))]\n null_score = edge_scores[0][0]\n edge_scores.sort(key=lambda x: x[0], reverse=True)\n top_edge_scores = edge_scores[:self.beta_e]\n\n beam_set_ = []\n for graph in beam_set:\n has_valid_edge = False\n for score, label, score_norm in top_edge_scores:\n role_entity = label * 1000 + graph.entities[-1][-1]\n event_role = graph.triggers[i][-1] * 1000 + label\n if (self.valid_event_role is not None) and (self.valid_role_entity is not None):\n if label == 0 or (event_role in self.valid_event_role and\n role_entity in self.valid_role_entity):\n graph_ = graph.copy()\n graph_.add_role(i, entity_idx, label, score, score_norm)\n beam_set_.append(graph_)\n has_valid_edge = True\n else:\n if label == 0 :\n graph_ = graph.copy()\n graph_.add_role(i, entity_idx, label, score, score_norm)\n beam_set_.append(graph_)\n has_valid_edge = True\n if not has_valid_edge:\n graph_ = graph.copy()\n graph_.add_role(i, entity_idx, 0, null_score)\n beam_set_.append(graph_)\n beam_set = beam_set_\n if len(beam_set) > 100:\n beam_set = self.trim_beam_set(beam_set, self.beam_size)\n beam_set = self.trim_beam_set(beam_set_, self.beam_size)\n\n else:\n # add a new trigger: new argument roles\n for i in range(entity_idx):\n edge_scores = role_type_scores[trigger_idx * entity_num + i].tolist()\n edge_scores_norm = normalize_score(edge_scores)\n edge_scores = [(s, i, n) for i, (s, n) in enumerate(zip(edge_scores,\n edge_scores_norm))]\n null_score = edge_scores[0][0]\n edge_scores.sort(key=lambda x: x[0], reverse=True)\n top_edge_scores = edge_scores[:self.beta_e]\n\n beam_set_ = []\n for graph in beam_set:\n has_valid_edge = False\n for score, label, score_norm in top_edge_scores:\n event_role = graph.triggers[-1][-1] * 1000 + label\n role_entity = label * 1000 + graph.entities[i][-1]\n if self.valid_event_role is not None and self.valid_role_entity is not None:\n if label == 0 or (event_role in self.valid_event_role\n and role_entity in self.valid_role_entity):\n graph_ = graph.copy()\n graph_.add_role(trigger_idx, i, label, score, score_norm)\n beam_set_.append(graph_)\n has_valid_edge = True\n else:\n if label == 0:\n graph_ = graph.copy()\n graph_.add_role(trigger_idx, i, label, score, score_norm)\n beam_set_.append(graph_)\n has_valid_edge = True\n if not has_valid_edge:\n graph_ = graph.copy()\n graph_.add_role(trigger_idx, i, 0, null_score)\n beam_set_.append(graph_)\n beam_set = beam_set_\n if len(beam_set) > 100:\n beam_set = self.trim_beam_set(beam_set, self.beam_size)\n\n beam_set = self.trim_beam_set(beam_set_, self.beam_size)\n\n if is_entity_node:\n entity_idx += 1\n else:\n trigger_idx += 1\n beam_set.sort(key=lambda x: self.compute_graph_score(x), reverse=True)\n graph = beam_set[0]\n\n # predict mention types\n _, mention_types = mention_type_scores.max(dim=1)\n mention_types = mention_types[:entity_idx]\n mention_list = [(i, j, l.item()) for (i, j, k), l\n in zip(graph.entities, mention_types)]\n graph.mentions = mention_list\n\n return graph" }, { "identifier": "IEDataset", "path": "TextEE/models/OneIE/data.py", "snippet": "class IEDataset(Dataset):\n def __init__(self, raw_data, tokenizer, max_length=128, gpu=False, ignore_title=False,\n relation_mask_self=True, relation_directional=False,\n coref=False, symmetric_relations=None, test=False):\n self.raw_data = raw_data\n self.data = []\n self.gpu = gpu\n self.max_length = max_length\n self.ignore_title = ignore_title\n self.relation_mask_self = relation_mask_self\n self.relation_directional = relation_directional\n self.coref = coref\n if symmetric_relations is None:\n self.symmetric_relations = set()\n else:\n self.symmetric_relations = symmetric_relations\n self.tokenizer = tokenizer\n self.test = test\n self.load_data()\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, item):\n return self.data[item]\n\n @property\n def entity_type_set(self):\n type_set = set()\n for inst in self.data:\n for entity in inst['entity_mentions']:\n type_set.add(entity.get('entity_type', \"UNK\"))\n return type_set\n\n @property\n def event_type_set(self):\n type_set = set()\n for inst in self.data:\n for event in inst['event_mentions']:\n type_set.add(event['event_type'])\n return type_set\n\n @property\n def relation_type_set(self):\n type_set = set()\n for inst in self.data:\n for relation in inst.get('relation_mentions', []):\n type_set.add(relation['relation_type'])\n return type_set\n\n @property\n def role_type_set(self):\n type_set = set()\n for inst in self.data:\n for event in inst['event_mentions']:\n for arg in event['arguments']:\n type_set.add(arg['role'])\n return type_set\n\n def load_data(self):\n overlength_num = 0\n for inst in self.raw_data:\n \n ## added\n pieces = [self.tokenizer.tokenize(t, is_split_into_words=True) for t in inst['tokens']]\n token_lens = [len(x) for x in pieces]\n if 0 in token_lens:\n raise ValueError\n pieces = [p for ps in pieces for p in ps]\n inst['pieces'] = pieces\n inst['token_lens'] = token_lens\n \n inst['entity_mentions'] = inst['extra_info']['entity_mentions']\n inst['relation_mentions'] = inst['extra_info']['relation_mentions']\n inst['event_mentions'] = inst['extra_info']['event_mentions']\n ##\n\n if not self.test:\n if self.max_length != -1 and len(pieces) > self.max_length - 2:\n overlength_num += 1\n continue\n else:\n if len(pieces) > self.max_length - 2:\n # add token_lens until over-length\n piece_counter = 0\n for max_token_include, token_len in enumerate(inst['token_lens']):\n if piece_counter + token_len >= self.max_length - 2:\n logger.info('overlength during testing...')\n break\n else:\n piece_counter += token_len\n inst['pieces'] = inst['pieces'][:piece_counter]\n inst['token_lens'] = inst['token_lens'][:max_token_include]\n inst['tokens'] = inst['tokens'][:max_token_include]\n self.data.append(inst)\n\n if overlength_num:\n logger.info('Discarded {} overlength instances'.format(overlength_num))\n logger.info('Loaded {} OneIE instances from {} E2E instances'.format(len(self), len(self.raw_data)))\n\n def numberize(self, tokenizer, vocabs):\n \"\"\"Numberize word pieces, labels, etcs.\n :param tokenizer: Bert tokenizer.\n :param vocabs (dict): a dict of vocabularies.\n \"\"\"\n entity_type_stoi = vocabs.get('entity_type', None)\n event_type_stoi = vocabs.get('event_type', None)\n relation_type_stoi = vocabs.get('relation_type', None)\n role_type_stoi = vocabs.get('role_type', None)\n mention_type_stoi = vocabs.get('mention_type', None)\n entity_label_stoi = vocabs.get('entity_label', None)\n trigger_label_stoi = vocabs.get('trigger_label', None)\n\n data = []\n for inst in self.data:\n doc_id = inst['doc_id']\n wnd_id = inst['wnd_id']\n tokens = inst['tokens']\n pieces = inst['pieces']\n sent_id = inst['wnd_id']\n entities = inst['entity_mentions']\n token_num = len(tokens)\n entities, entity_id_map = remove_overlap_entities(entities, token_num)\n entities.sort(key=lambda x: x['start'])\n events = inst['event_mentions']\n events.sort(key=lambda x: x['trigger']['start'])\n events = [eve for eve in events if eve['trigger']['end']<= token_num]\n relations = inst.get('relation_mentions', [])\n token_lens = inst['token_lens']\n\n # Pad word pieces with special tokens\n piece_idxs = tokenizer.encode(pieces,\n add_special_tokens=True,\n max_length=self.max_length,\n truncation=True)\n pad_num = self.max_length - len(piece_idxs)\n attn_mask = [1] * len(piece_idxs) + [0] * pad_num\n #piece_idxs = piece_idxs + [0] * pad_num\n pad_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.pad_token)\n piece_idxs = piece_idxs + [pad_id] * pad_num\n \n # Entity\n # - entity_labels and entity_label_idxs are used for identification\n # - entity_types and entity_type_idxs are used for classification\n # - entity_list is used for graph representation\n entity_labels = get_entity_labels(entities, token_num)\n entity_label_idxs = [entity_label_stoi[l] for l in entity_labels]\n entity_types = [e.get('entity_type', \"UNK\") for e in entities]\n entity_type_idxs = [entity_type_stoi[l] for l in entity_types]\n entity_list = [(e['start'], e['end'], entity_type_stoi[e.get('entity_type', \"UNK\")])\n for e in entities]\n # entity_num = len(entity_list)\n mention_types = [e.get('mention_type', \"UNK\") for e in entities]\n mention_type_idxs = [mention_type_stoi[l] for l in mention_types]\n mention_list = [(i, j, l) for (i, j, k), l\n in zip(entity_list, mention_type_idxs)]\n\n # Trigger\n # - trigger_labels and trigger_label_idxs are used for identification\n # - event_types and event_type_idxs are used for classification\n # - trigger_list is used for graph representation\n trigger_labels = get_trigger_labels(events, token_num)\n if self.test:\n trigger_label_idxs = []\n for l in trigger_labels:\n if l in trigger_label_stoi.keys():\n trigger_label_idxs.append(trigger_label_stoi[l])\n else:\n trigger_label_idxs.append(trigger_label_stoi['O']) # we need this when xl test\n event_type_idxs = [event_type_stoi[e['event_type']] for e in events \n if (e['trigger']['end'] <= token_num) and (e['event_type'] in event_type_stoi.keys())]\n trigger_list = [(e['trigger']['start'], e['trigger']['end'],\n event_type_stoi[e['event_type']])\n for e in events if e['event_type'] in event_type_stoi.keys()]\n else:\n trigger_label_idxs = [trigger_label_stoi[l]\n for l in trigger_labels]\n event_type_idxs = [event_type_stoi[e['event_type']] for e in events]\n trigger_list = [(e['trigger']['start'], e['trigger']['end'],\n event_type_stoi[e['event_type']])\n for e in events]\n\n # Relation\n relation_types = get_relation_types(entities, relations,\n entity_id_map,\n self.test,\n relation_type_stoi,\n directional=self.relation_directional,\n symmetric=self.symmetric_relations)\n relation_type_idxs = [[relation_type_stoi[l] for l in ls]\n for ls in relation_types]\n if self.relation_mask_self:\n for i in range(len(relation_type_idxs)):\n relation_type_idxs[i][i] = -100\n relation_list = get_relation_list(entities, relations,\n entity_id_map, relation_type_stoi, self.test,\n directional=self.relation_directional,\n symmetric=self.symmetric_relations)\n #relation_type_idxs = []\n #relation_list = []\n\n # Argument role\n role_types = get_role_types(entities, events, entity_id_map, role_type_stoi, self.test)\n role_type_idxs = [[role_type_stoi[l] for l in ls]\n for ls in role_types]\n role_list = get_role_list(entities, events,\n entity_id_map, role_type_stoi, self.test)\n\n # Graph\n graph = Graph(\n entities=entity_list,\n triggers=trigger_list,\n relations=relation_list,\n roles=role_list,\n mentions=mention_list,\n vocabs=vocabs,\n )\n \n instance = Instance(\n doc_id=doc_id,\n wnd_id=wnd_id,\n sent_id=sent_id,\n tokens=tokens,\n pieces=pieces,\n piece_idxs=piece_idxs,\n token_lens=token_lens,\n attention_mask=attn_mask,\n entity_label_idxs=entity_label_idxs,\n trigger_label_idxs=trigger_label_idxs,\n entity_type_idxs=entity_type_idxs,\n event_type_idxs=event_type_idxs,\n relation_type_idxs=relation_type_idxs,\n mention_type_idxs=mention_type_idxs,\n role_type_idxs=role_type_idxs,\n graph=graph,\n entity_num=len(entities),\n trigger_num=len(events),\n )\n data.append(instance)\n self.data = data\n\n def collate_fn(self, batch):\n batch_piece_idxs = []\n batch_tokens = []\n batch_entity_labels, batch_trigger_labels = [], []\n batch_entity_types, batch_event_types = [], []\n batch_relation_types, batch_role_types = [], []\n batch_mention_types = []\n batch_graphs = []\n batch_token_lens = []\n batch_attention_masks = []\n\n sent_ids = [inst.sent_id for inst in batch]\n token_nums = [len(inst.tokens) for inst in batch]\n max_token_num = max(token_nums)\n\n max_entity_num = max([inst.entity_num for inst in batch] + [1])\n max_trigger_num = max([inst.trigger_num for inst in batch] + [1])\n \n doc_ids = [inst.doc_id for inst in batch]\n wnd_ids = [inst.wnd_id for inst in batch]\n\n for inst in batch:\n token_num = len(inst.tokens)\n batch_piece_idxs.append(inst.piece_idxs)\n batch_attention_masks.append(inst.attention_mask)\n batch_token_lens.append(inst.token_lens)\n batch_graphs.append(inst.graph)\n batch_tokens.append(inst.tokens)\n # for identification\n batch_entity_labels.append(inst.entity_label_idxs +\n [0] * (max_token_num - token_num))\n batch_trigger_labels.append(inst.trigger_label_idxs +\n [0] * (max_token_num - token_num))\n # for classification\n batch_entity_types.extend(inst.entity_type_idxs +\n [-100] * (max_entity_num - inst.entity_num))\n batch_event_types.extend(inst.event_type_idxs +\n [-100] * (max_trigger_num - inst.trigger_num))\n batch_mention_types.extend(inst.mention_type_idxs +\n [-100] * (max_entity_num - inst.entity_num))\n for l in inst.relation_type_idxs:\n batch_relation_types.extend(\n l + [-100] * (max_entity_num - inst.entity_num))\n batch_relation_types.extend(\n [-100] * max_entity_num * (max_entity_num - inst.entity_num))\n for l in inst.role_type_idxs:\n batch_role_types.extend(\n l + [-100] * (max_entity_num - inst.entity_num))\n batch_role_types.extend(\n [-100] * max_entity_num * (max_trigger_num - inst.trigger_num))\n\n if self.gpu:\n batch_piece_idxs = torch.cuda.LongTensor(batch_piece_idxs)\n batch_attention_masks = torch.cuda.FloatTensor(\n batch_attention_masks)\n\n batch_entity_labels = torch.cuda.LongTensor(batch_entity_labels)\n batch_trigger_labels = torch.cuda.LongTensor(batch_trigger_labels)\n batch_entity_types = torch.cuda.LongTensor(batch_entity_types)\n batch_mention_types = torch.cuda.LongTensor(batch_mention_types)\n batch_event_types = torch.cuda.LongTensor(batch_event_types)\n batch_relation_types = torch.cuda.LongTensor(batch_relation_types)\n batch_role_types = torch.cuda.LongTensor(batch_role_types)\n\n token_nums = torch.cuda.LongTensor(token_nums)\n else:\n batch_piece_idxs = torch.LongTensor(batch_piece_idxs)\n batch_attention_masks = torch.FloatTensor(batch_attention_masks)\n\n batch_entity_labels = torch.LongTensor(batch_entity_labels)\n batch_trigger_labels = torch.LongTensor(batch_trigger_labels)\n batch_entity_types = torch.LongTensor(batch_entity_types)\n batch_mention_types = torch.LongTensor(batch_mention_types)\n batch_event_types = torch.LongTensor(batch_event_types)\n batch_relation_types = torch.LongTensor(batch_relation_types)\n batch_role_types = torch.LongTensor(batch_role_types)\n\n token_nums = torch.LongTensor(token_nums)\n\n return Batch(\n doc_ids=doc_ids,\n wnd_ids=wnd_ids,\n sent_ids=sent_ids,\n tokens=[inst.tokens for inst in batch],\n piece_idxs=batch_piece_idxs,\n token_lens=batch_token_lens,\n attention_masks=batch_attention_masks,\n entity_label_idxs=batch_entity_labels,\n trigger_label_idxs=batch_trigger_labels,\n entity_type_idxs=batch_entity_types,\n mention_type_idxs=batch_mention_types,\n event_type_idxs=batch_event_types,\n relation_type_idxs=batch_relation_types,\n role_type_idxs=batch_role_types,\n graphs=batch_graphs,\n token_nums=token_nums,\n )" }, { "identifier": "generate_vocabs", "path": "TextEE/models/OneIE/util.py", "snippet": "def generate_vocabs(datasets, coref=False,\n relation_directional=False,\n symmetric_relations=None):\n \"\"\"Generate vocabularies from a list of data sets\n :param datasets (list): A list of data sets\n :return (dict): A dictionary of vocabs\n \"\"\"\n entity_type_set = set()\n event_type_set = set()\n relation_type_set = set()\n role_type_set = set()\n for dataset in datasets:\n entity_type_set.update(dataset.entity_type_set)\n event_type_set.update(dataset.event_type_set)\n relation_type_set.update(dataset.relation_type_set)\n role_type_set.update(dataset.role_type_set)\n\n # add inverse relation types for non-symmetric relations\n if relation_directional:\n if symmetric_relations is None:\n symmetric_relations = []\n relation_type_set_ = set()\n for relation_type in relation_type_set:\n relation_type_set_.add(relation_type)\n if relation_directional and relation_type not in symmetric_relations:\n relation_type_set_.add(relation_type + '_inv')\n\n # entity and trigger labels\n prefix = ['B', 'I']\n entity_label_stoi = {'O': 0}\n trigger_label_stoi = {'O': 0}\n for t in entity_type_set:\n for p in prefix:\n entity_label_stoi['{}-{}'.format(p, t)] = len(entity_label_stoi)\n for t in event_type_set:\n for p in prefix:\n trigger_label_stoi['{}-{}'.format(p, t)] = len(trigger_label_stoi)\n\n entity_type_stoi = {k: i for i, k in enumerate(entity_type_set, 1)}\n entity_type_stoi['O'] = 0\n\n event_type_stoi = {k: i for i, k in enumerate(event_type_set, 1)}\n event_type_stoi['O'] = 0\n\n relation_type_stoi = {k: i for i, k in enumerate(relation_type_set, 1)}\n relation_type_stoi['O'] = 0\n if coref:\n relation_type_stoi['COREF'] = len(relation_type_stoi)\n\n role_type_stoi = {k: i for i, k in enumerate(role_type_set, 1)}\n role_type_stoi['O'] = 0\n\n mention_type_stoi = {'NAM': 0, 'NOM': 1, 'PRO': 2, 'UNK': 3}\n\n return {\n 'entity_type': entity_type_stoi,\n 'event_type': event_type_stoi,\n 'relation_type': relation_type_stoi,\n 'role_type': role_type_stoi,\n 'mention_type': mention_type_stoi,\n 'entity_label': entity_label_stoi,\n 'trigger_label': trigger_label_stoi,\n }" }, { "identifier": "load_valid_patterns", "path": "TextEE/models/OneIE/util.py", "snippet": "def load_valid_patterns(path, vocabs):\n if path is None:\n print('valid pattern path not exists, we do not apply valid pattern for decoding')\n return None\n event_type_vocab = vocabs['event_type']\n entity_type_vocab = vocabs['entity_type']\n relation_type_vocab = vocabs['relation_type']\n role_type_vocab = vocabs['role_type']\n\n # valid event-role\n valid_event_role = set()\n event_role = json.load(\n open(os.path.join(path, 'event_role.json'), 'r', encoding='utf-8'))\n for event, roles in event_role.items():\n if event not in event_type_vocab:\n continue\n event_type_idx = event_type_vocab[event]\n for role in roles:\n if role not in role_type_vocab:\n continue\n role_type_idx = role_type_vocab[role]\n valid_event_role.add(event_type_idx * 1000 + role_type_idx)\n\n # valid relation-entity\n valid_relation_entity = set()\n # relation_entity = json.load(\n # open(os.path.join(path, 'relation_entity.json'), 'r', encoding='utf-8'))\n # for relation, entities in relation_entity.items():\n # relation_type_idx = relation_type_vocab[relation]\n # for entity in entities:\n # entity_type_idx = entity_type_vocab[entity]\n # valid_relation_entity.add(\n # relation_type_idx * 1000 + entity_type_idx)\n\n # valid role-entity\n valid_role_entity = set()\n role_entity = json.load(\n open(os.path.join(path, 'role_entity.json'), 'r', encoding='utf-8'))\n for role, entities in role_entity.items():\n if role not in role_type_vocab:\n continue\n role_type_idx = role_type_vocab[role]\n for entity in entities:\n entity_type_idx = entity_type_vocab[entity]\n valid_role_entity.add(role_type_idx * 1000 + entity_type_idx)\n\n return {\n 'event_role': valid_event_role,\n 'relation_entity': valid_relation_entity,\n 'role_entity': valid_role_entity\n }" }, { "identifier": "save_result", "path": "TextEE/models/OneIE/util.py", "snippet": "def save_result(output_file, gold_graphs, pred_graphs, sent_ids, tokens=None):\n with open(output_file, 'w', encoding='utf-8') as w:\n for i, (gold_graph, pred_graph, sent_id) in enumerate(\n zip(gold_graphs, pred_graphs, sent_ids)):\n output = {'sent_id': sent_id,\n 'gold': gold_graph.to_dict(),\n 'pred': pred_graph.to_dict()}\n if tokens:\n output['tokens'] = tokens[i]\n w.write(json.dumps(output) + '\\n')" }, { "identifier": "best_score_by_task", "path": "TextEE/models/OneIE/util.py", "snippet": "def best_score_by_task(log_file, task, max_epoch=1000):\n with open(log_file, 'r', encoding='utf-8') as r:\n config = r.readline()\n\n best_scores = []\n best_dev_score = 0\n for line in r:\n record = json.loads(line)\n dev = record['dev']\n #test = record['test']\n test = record.get('test', None)\n epoch = record['epoch']\n if epoch > max_epoch:\n break\n if dev[task]['f'] > best_dev_score:\n best_dev_score = dev[task]['f']\n best_scores = [dev, test, epoch]\n\n print('Epoch: {}'.format(best_scores[-1]))\n tasks = ['entity', 'mention', 'relation', 'trigger_id', 'trigger',\n 'role_id', 'role']\n for t in tasks:\n print('{}: dev: {:.2f}, test: {:.2f}'.format(t,\n best_scores[0][t][\n 'f'] * 100.0,\n best_scores[1][t][\n 'f'] * 100.0))" }, { "identifier": "score_graphs", "path": "TextEE/models/OneIE/scorer.py", "snippet": "def score_graphs(gold_graphs, pred_graphs,\n relation_directional=False):\n gold_arg_num = pred_arg_num = arg_idn_num = arg_class_num = 0\n gold_trigger_num = pred_trigger_num = trigger_idn_num = trigger_class_num = 0\n gold_ent_num = pred_ent_num = ent_match_num = 0\n gold_rel_num = pred_rel_num = rel_match_num = 0\n gold_men_num = pred_men_num = men_match_num = 0\n\n for gold_graph, pred_graph in zip(gold_graphs, pred_graphs):\n # Entity\n gold_entities = gold_graph.entities\n pred_entities = pred_graph.entities\n gold_ent_num += len(gold_entities)\n pred_ent_num += len(pred_entities)\n ent_match_num += len([entity for entity in pred_entities\n if entity in gold_entities])\n\n # Mention\n gold_mentions = gold_graph.mentions\n pred_mentions = pred_graph.mentions\n gold_men_num += len(gold_mentions)\n pred_men_num += len(pred_mentions)\n men_match_num += len([mention for mention in pred_mentions\n if mention in gold_mentions])\n\n # Relation\n gold_relations = gold_graph.relations\n pred_relations = pred_graph.relations\n gold_rel_num += len(gold_relations)\n pred_rel_num += len(pred_relations)\n for arg1, arg2, rel_type in pred_relations:\n arg1_start, arg1_end, _ = pred_entities[arg1]\n arg2_start, arg2_end, _ = pred_entities[arg2]\n for arg1_gold, arg2_gold, rel_type_gold in gold_relations:\n arg1_start_gold, arg1_end_gold, _ = gold_entities[arg1_gold]\n arg2_start_gold, arg2_end_gold, _ = gold_entities[arg2_gold]\n if relation_directional:\n if (arg1_start == arg1_start_gold and\n arg1_end == arg1_end_gold and\n arg2_start == arg2_start_gold and\n arg2_end == arg2_end_gold\n ) and rel_type == rel_type_gold:\n rel_match_num += 1\n break\n else:\n if ((arg1_start == arg1_start_gold and\n arg1_end == arg1_end_gold and\n arg2_start == arg2_start_gold and\n arg2_end == arg2_end_gold) or (\n arg1_start == arg2_start_gold and\n arg1_end == arg2_end_gold and\n arg2_start == arg1_start_gold and\n arg2_end == arg1_end_gold\n )) and rel_type == rel_type_gold:\n rel_match_num += 1\n break\n\n # Trigger\n gold_triggers = gold_graph.triggers\n pred_triggers = pred_graph.triggers\n gold_trigger_num += len(gold_triggers)\n pred_trigger_num += len(pred_triggers)\n for trg_start, trg_end, event_type in pred_triggers:\n matched = [item for item in gold_triggers\n if item[0] == trg_start and item[1] == trg_end]\n if matched:\n trigger_idn_num += 1\n if matched[0][-1] == event_type:\n trigger_class_num += 1\n\n # Argument\n gold_args = convert_arguments(gold_triggers, gold_entities,\n gold_graph.roles)\n pred_args = convert_arguments(pred_triggers, pred_entities,\n pred_graph.roles)\n gold_arg_num += len(gold_args)\n pred_arg_num += len(pred_args)\n for pred_arg in pred_args:\n arg_start, arg_end, event_type, role = pred_arg\n gold_idn = {item for item in gold_args\n if item[0] == arg_start and item[1] == arg_end\n and item[2] == event_type}\n if gold_idn:\n arg_idn_num += 1\n gold_class = {item for item in gold_idn if item[-1] == role}\n if gold_class:\n arg_class_num += 1\n\n entity_prec, entity_rec, entity_f = compute_f1(\n pred_ent_num, gold_ent_num, ent_match_num)\n mention_prec, mention_rec, mention_f = compute_f1(\n pred_men_num, gold_men_num, men_match_num)\n trigger_id_prec, trigger_id_rec, trigger_id_f = compute_f1(\n pred_trigger_num, gold_trigger_num, trigger_idn_num)\n trigger_prec, trigger_rec, trigger_f = compute_f1(\n pred_trigger_num, gold_trigger_num, trigger_class_num)\n relation_prec, relation_rec, relation_f = compute_f1(\n pred_rel_num, gold_rel_num, rel_match_num)\n role_id_prec, role_id_rec, role_id_f = compute_f1(\n pred_arg_num, gold_arg_num, arg_idn_num)\n role_prec, role_rec, role_f = compute_f1(\n pred_arg_num, gold_arg_num, arg_class_num)\n\n print('Entity: P: {:.2f}, R: {:.2f}, F: {:.2f}'.format(\n entity_prec * 100.0, entity_rec * 100.0, entity_f * 100.0))\n print('Mention: P: {:.2f}, R: {:.2f}, F: {:.2f}'.format(\n mention_prec * 100.0, mention_rec * 100.0, mention_f * 100.0))\n print('Trigger identification: P: {:.2f}, R: {:.2f}, F: {:.2f}'.format(\n trigger_id_prec * 100.0, trigger_id_rec * 100.0, trigger_id_f * 100.0))\n print('Trigger: P: {:.2f}, R: {:.2f}, F: {:.2f}'.format(\n trigger_prec * 100.0, trigger_rec * 100.0, trigger_f * 100.0))\n print('Relation: P: {:.2f}, R: {:.2f}, F: {:.2f}'.format(\n relation_prec * 100.0, relation_rec * 100.0, relation_f * 100.0))\n print('Role identification: P: {:.2f}, R: {:.2f}, F: {:.2f}'.format(\n role_id_prec * 100.0, role_id_rec * 100.0, role_id_f * 100.0))\n print('Role: P: {:.2f}, R: {:.2f}, F: {:.2f}'.format(\n role_prec * 100.0, role_rec * 100.0, role_f * 100.0))\n\n scores = {\n 'entity': {'prec': entity_prec, 'rec': entity_rec, 'f': entity_f},\n 'mention': {'prec': mention_prec, 'rec': mention_rec, 'f': mention_f},\n 'trigger': {'prec': trigger_prec, 'rec': trigger_rec, 'f': trigger_f},\n 'trigger_id': {'prec': trigger_id_prec, 'rec': trigger_id_rec,\n 'f': trigger_id_f},\n 'role': {'prec': role_prec, 'rec': role_rec, 'f': role_f},\n 'role_id': {'prec': role_id_prec, 'rec': role_id_rec, 'f': role_id_f},\n 'relation': {'prec': relation_prec, 'rec': relation_rec,\n 'f': relation_f}\n }\n return scores" } ]
import os, sys, logging, tqdm, pprint, copy import torch import numpy as np import ipdb from transformers import (BertTokenizer, RobertaTokenizer, XLMRobertaTokenizer, AutoTokenizer, AdamW, get_linear_schedule_with_warmup) from torch.utils.data import DataLoader from torch.optim import AdamW from ..trainer import BasicTrainer from .E2Emodel import OneIEE2EModel from .data import IEDataset from .util import generate_vocabs, load_valid_patterns, save_result, best_score_by_task from .scorer import score_graphs from scorer import compute_f1, print_scores
15,302
logger = logging.getLogger(__name__) class OneIEE2ETrainer(BasicTrainer): def __init__(self, config, type_set=None): super().__init__(config, type_set) self.tokenizer = None self.model = None self.valid_patterns = None @classmethod def add_extra_info_fn(cls, instances, raw_data, config): extra_info_map = {} for dt in raw_data: extra_info = { "entity_mentions": dt["entity_mentions"] if "entity_mentions" in dt else [], "relation_mentions": dt["relation_mentions"] if "relation_mentions" in dt else [], "event_mentions": dt["event_mentions"] if "event_mentions" in dt else [], } extra_info_map[(dt["doc_id"], dt["wnd_id"])] = extra_info for instance in instances: instance["extra_info"] = extra_info_map[(instance["doc_id"], instance["wnd_id"])] return instances def load_tokenizer_(self, checkpoint=None): if checkpoint: logger.info(f"Loading tokenizer from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.tokenizer")) self.tokenizer = state["tokenizer"] else: logger.info(f"Loading tokenizer from {self.config.pretrained_model_name}") if self.config.pretrained_model_name.startswith('bert-'): self.tokenizer = BertTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) elif self.config.pretrained_model_name.startswith('roberta-'): self.tokenizer = RobertaTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) elif self.config.pretrained_model_name.startswith('xlm-roberta-'): self.tokenizer = XLMRobertaTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) else: self.tokenizer = AutoTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir, do_lower_case=False) def load_model_(self, checkpoint=None): assert self.tokenizer if checkpoint: logger.info(f"Loading model from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.state"), map_location=f'cuda:{self.config.gpu_device}') self.vocabs = state["vocabs"] self.type_set = state["type_set"] self.valid_patterns = state["valid_patterns"] self.model = OneIEE2EModel(self.config, self.vocabs, self.valid_patterns) self.model.load_state_dict(state['model']) self.model.cuda(device=self.config.gpu_device) else:
logger = logging.getLogger(__name__) class OneIEE2ETrainer(BasicTrainer): def __init__(self, config, type_set=None): super().__init__(config, type_set) self.tokenizer = None self.model = None self.valid_patterns = None @classmethod def add_extra_info_fn(cls, instances, raw_data, config): extra_info_map = {} for dt in raw_data: extra_info = { "entity_mentions": dt["entity_mentions"] if "entity_mentions" in dt else [], "relation_mentions": dt["relation_mentions"] if "relation_mentions" in dt else [], "event_mentions": dt["event_mentions"] if "event_mentions" in dt else [], } extra_info_map[(dt["doc_id"], dt["wnd_id"])] = extra_info for instance in instances: instance["extra_info"] = extra_info_map[(instance["doc_id"], instance["wnd_id"])] return instances def load_tokenizer_(self, checkpoint=None): if checkpoint: logger.info(f"Loading tokenizer from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.tokenizer")) self.tokenizer = state["tokenizer"] else: logger.info(f"Loading tokenizer from {self.config.pretrained_model_name}") if self.config.pretrained_model_name.startswith('bert-'): self.tokenizer = BertTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) elif self.config.pretrained_model_name.startswith('roberta-'): self.tokenizer = RobertaTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) elif self.config.pretrained_model_name.startswith('xlm-roberta-'): self.tokenizer = XLMRobertaTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) else: self.tokenizer = AutoTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir, do_lower_case=False) def load_model_(self, checkpoint=None): assert self.tokenizer if checkpoint: logger.info(f"Loading model from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.state"), map_location=f'cuda:{self.config.gpu_device}') self.vocabs = state["vocabs"] self.type_set = state["type_set"] self.valid_patterns = state["valid_patterns"] self.model = OneIEE2EModel(self.config, self.vocabs, self.valid_patterns) self.model.load_state_dict(state['model']) self.model.cuda(device=self.config.gpu_device) else:
self.valid_patterns = load_valid_patterns(self.config.valid_pattern_path, self.vocabs)
4
2023-11-15 21:32:56+00:00
24k
ahayler/s4c
models/bts/trainer_overfit.py
[ { "identifier": "make_datasets", "path": "datasets/data_util.py", "snippet": "def make_datasets(config):\n type = config.get(\"type\", \"KITTI_Raw\")\n if type == \"KITTI_Odometry\":\n train_dataset = KittiOdometryDataset(\n base_path=config[\"data_path\"],\n frame_count=config.get(\"data_fc\", 1),\n target_image_size=config.get(\"image_size\", (128, 256)),\n return_stereo=config.get(\"data_stereo\", False),\n sequences=config.get(\"train_sequences\", (\"00\",)),\n custom_pose_path=config.get(\"custom_pose_path\", None),\n keyframe_offset=0 #-(config.get(\"data_fc\", 1) // 2)\n )\n test_dataset = KittiOdometryDataset(\n base_path=config[\"data_path\"],\n frame_count=config.get(\"data_fc\", 1),\n target_image_size=config.get(\"image_size\", (128, 256)),\n return_stereo=config.get(\"data_stereo\", False),\n sequences=config.get(\"val_sequences\", (\"00\",)),\n custom_pose_path=config.get(\"custom_pose_path\", None),\n keyframe_offset=0 #-(config.get(\"data_fc\", 1) // 2)\n )\n return train_dataset, test_dataset\n\n elif type == \"KITTI_Raw\":\n train_dataset = KittiRawDataset(\n data_path=config[\"data_path\"],\n pose_path=config[\"pose_path\"],\n split_path=os.path.join(config[\"split_path\"], \"train_files.txt\"),\n target_image_size=config.get(\"image_size\", (192, 640)),\n frame_count=config.get(\"data_fc\", 1),\n return_stereo=config.get(\"data_stereo\", False),\n keyframe_offset=config.get(\"keyframe_offset\", 0),\n dilation=config.get(\"dilation\", 1),\n color_aug=config.get(\"color_aug\", False)\n )\n test_dataset = KittiRawDataset(\n data_path=config[\"data_path\"],\n pose_path=config[\"pose_path\"],\n split_path=os.path.join(config[\"split_path\"], \"val_files.txt\"),\n target_image_size=config.get(\"image_size\", (192, 640)),\n frame_count=config.get(\"data_fc\", 1),\n return_stereo=config.get(\"data_stereo\", False),\n keyframe_offset=config.get(\"keyframe_offset\", 0),\n dilation=config.get(\"dilation\", 1),\n )\n return train_dataset, test_dataset\n\n elif type == \"KITTI_360\":\n if config.get(\"split_path\", None) is None:\n train_split_path = None\n test_split_path = None\n else:\n train_split_path = os.path.join(config[\"split_path\"], \"train_files.txt\")\n test_split_path = os.path.join(config[\"split_path\"], \"val_files.txt\")\n\n train_dataset = Kitti360Dataset(\n data_path=config[\"data_path\"],\n data_segmentation_path=config.get(\"data_segmentation_path\", None),\n pose_path=config[\"pose_path\"],\n split_path=train_split_path,\n target_image_size=tuple(config.get(\"image_size\", (192, 640))),\n frame_count=config.get(\"data_fc\", 3),\n return_stereo=config.get(\"data_stereo\", True),\n return_fisheye=config.get(\"data_fisheye\", True),\n return_3d_bboxes=config.get(\"data_3d_bboxes\", False),\n return_segmentation=config.get(\"data_segmentation\", False),\n segmentation_mode=config.get(\"segmentation_mode\", None),\n keyframe_offset=config.get(\"keyframe_offset\", 0),\n dilation=config.get(\"dilation\", 1),\n fisheye_rotation=config.get(\"fisheye_rotation\", 0),\n fisheye_offset=config.get(\"fisheye_offset\", 1),\n color_aug=config.get(\"color_aug\", False),\n is_preprocessed=config.get(\"is_preprocessed\", False),\n load_kitti_360_segmentation_gt=False,\n constrain_to_datapoints=config.get(\"constrain_to_datapoints\", False),\n additional_random_front_offset=config.get(\"additional_random_front_offset\", False)\n )\n test_dataset = Kitti360Dataset(\n data_path=config[\"data_path\"],\n data_segmentation_path=config.get(\"data_segmentation_path\", None),\n pose_path=config[\"pose_path\"],\n split_path=test_split_path,\n target_image_size=tuple(config.get(\"image_size\", (192, 640))),\n frame_count=config.get(\"data_fc\", 3),\n return_stereo=config.get(\"data_stereo\", True),\n return_fisheye=config.get(\"data_fisheye\", True),\n return_3d_bboxes=config.get(\"data_3d_bboxes\", False),\n return_segmentation=config.get(\"data_segmentation\", False),\n segmentation_mode=config.get(\"segmentation_mode\", None),\n keyframe_offset=config.get(\"keyframe_offset\", 0),\n fisheye_rotation=config.get(\"fisheye_rotation\", 0),\n fisheye_offset=config.get(\"fisheye_offset\", 1),\n dilation=config.get(\"dilation\", 1),\n is_preprocessed=config.get(\"is_preprocessed\", False),\n load_kitti_360_segmentation_gt=True,\n constrain_to_datapoints=config.get(\"constrain_to_datapoints\", False),\n additional_random_front_offset=config.get(\"additional_random_front_offset\", False)\n )\n return train_dataset, test_dataset\n\n elif type == \"RealEstate10k\":\n train_dataset = RealEstate10kDataset(\n data_path=config[\"data_path\"],\n split_path=None,\n target_image_size=config.get(\"image_size\", (256, 384)),\n frame_count=config.get(\"data_fc\", 2),\n keyframe_offset=0, #-(config.get(\"data_fc\", 1) // 2),\n dilation=config.get(\"dilation\", 10),\n color_aug=config.get(\"color_aug\", False)\n )\n test_dataset = RealEstate10kDataset(\n data_path=config[\"data_path\"],\n split_path=os.path.join(config[\"split_path\"], \"val_files.txt\"),\n target_image_size=config.get(\"image_size\", (256, 384)),\n frame_count=config.get(\"data_fc\", 2),\n keyframe_offset=0, #-(config.get(\"data_fc\", 1) // 2),\n dilation=config.get(\"dilation\", 10),\n color_aug=False\n )\n return train_dataset, test_dataset\n\n elif type == \"Waymo\":\n if config.get(\"split_path\", None) is None:\n train_split_path = None\n test_split_path = None\n else:\n train_split_path = os.path.join(config[\"split_path\"], \"train_files.txt\")\n test_split_path = os.path.join(config[\"split_path\"], \"val_files.txt\")\n\n train_dataset = WaymoDataset(\n data_path=config[\"data_path\"],\n mode=\"training\",\n split_path=train_split_path,\n target_image_size=tuple(config.get(\"image_size\", (320, 480))),\n frame_count=config.get(\"data_fc\", 2),\n keyframe_offset=config.get(\"keyframe_offset\", 0),\n return_45=config.get(\"return_45\", True),\n return_90=config.get(\"return_90\", True),\n offset_45=config.get(\"offset_45\", 5),\n offset_90=config.get(\"offset_90\", 10),\n dilation=config.get(\"dilation\", 1),\n color_aug=config.get(\"color_aug\", True),\n correct_exposure=config.get(\"correct_exposure\", True),\n )\n test_dataset = WaymoDataset(\n data_path=config[\"data_path\"],\n mode=\"validation\",\n split_path=test_split_path,\n target_image_size=tuple(config.get(\"image_size\", (320, 480))),\n frame_count=config.get(\"data_fc\", 2),\n keyframe_offset=config.get(\"keyframe_offset\", 0),\n return_45=config.get(\"return_45\", True),\n return_90=config.get(\"return_90\", True),\n offset_45=config.get(\"offset_45\", 5),\n offset_90=config.get(\"offset_90\", 10),\n dilation=config.get(\"dilation\", 1),\n color_aug=False,\n return_depth=True,\n correct_exposure=config.get(\"correct_exposure\", True),\n )\n return train_dataset, test_dataset\n\n else:\n raise NotImplementedError(f\"Unsupported dataset type: {type}\")" }, { "identifier": "make_scheduler", "path": "models/common/model/scheduler.py", "snippet": "def make_scheduler(config, optim):\n type = config.get(\"type\", \"fix\")\n if type == \"fix\":\n scheduler = FixLR(optim)\n return scheduler\n elif type == \"step\":\n scheduler = StepLR(\n optim,\n config[\"step_size\"],\n config[\"gamma\"]\n )\n return scheduler\n else:\n raise NotImplementedError(f\"Unknown learning rate scheduler type: {type}\")" }, { "identifier": "NeRFRenderer", "path": "models/common/render/nerf.py", "snippet": "class NeRFRenderer(torch.nn.Module):\n \"\"\"\n NeRF differentiable renderer\n :param n_coarse number of coarse (binned uniform) samples\n :param n_fine number of fine (importance) samples\n :param n_fine_depth number of expected depth samples\n :param noise_std noise to add to sigma. We do not use it\n :param depth_std noise for depth samples\n :param eval_batch_size ray batch size for evaluation\n :param white_bkgd if true, background color is white; else black\n :param lindisp if to use samples linear in disparity instead of distance\n :param sched ray sampling schedule. list containing 3 lists of equal length.\n sched[0] is list of iteration numbers,\n sched[1] is list of coarse sample numbers,\n sched[2] is list of fine sample numbers\n \"\"\"\n\n def __init__(\n self,\n n_coarse=128,\n n_fine=0,\n n_fine_depth=0,\n noise_std=0.0,\n depth_std=0.01,\n eval_batch_size=100000,\n white_bkgd=False,\n lindisp=False,\n sched=None, # ray sampling schedule for coarse and fine rays\n hard_alpha_cap=False\n ):\n super().__init__()\n self.n_coarse = n_coarse\n self.n_fine = n_fine\n self.n_fine_depth = n_fine_depth\n\n self.noise_std = noise_std\n self.depth_std = depth_std\n\n self.eval_batch_size = eval_batch_size\n self.white_bkgd = white_bkgd\n self.lindisp = lindisp\n if lindisp:\n print(\"Using linear displacement rays\")\n self.using_fine = n_fine > 0\n self.sched = sched\n if sched is not None and len(sched) == 0:\n self.sched = None\n self.register_buffer(\n \"iter_idx\", torch.tensor(0, dtype=torch.long), persistent=True\n )\n self.register_buffer(\n \"last_sched\", torch.tensor(0, dtype=torch.long), persistent=True\n )\n self.hard_alpha_cap = hard_alpha_cap\n\n def sample_coarse(self, rays):\n \"\"\"\n Stratified sampling. Note this is different from original NeRF slightly.\n :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)\n :return (B, Kc)\n \"\"\"\n device = rays.device\n near, far = rays[:, -2:-1], rays[:, -1:] # (B, 1)\n\n step = 1.0 / self.n_coarse\n B = rays.shape[0]\n z_steps = torch.linspace(0, 1 - step, self.n_coarse, device=device) # (Kc)\n z_steps = z_steps.unsqueeze(0).repeat(B, 1) # (B, Kc)\n z_steps += torch.rand_like(z_steps) * step\n if not self.lindisp: # Use linear sampling in depth space\n return near * (1 - z_steps) + far * z_steps # (B, Kf)\n else: # Use linear sampling in disparity space\n return 1 / (1 / near * (1 - z_steps) + 1 / far * z_steps) # (B, Kf)\n\n # Use linear sampling in depth space\n return near * (1 - z_steps) + far * z_steps # (B, Kc)\n\n def sample_coarse_from_dist(self, rays, weights, z_samp):\n device = rays.device\n B = rays.shape[0]\n\n num_bins = weights.shape[-1]\n num_samples = self.n_coarse\n\n weights = weights.detach() + 1e-5 # Prevent division by zero\n pdf = weights / torch.sum(weights, -1, keepdim=True) # (B, Kc)\n cdf = torch.cumsum(pdf, -1) # (B, Kc)\n cdf = torch.cat([torch.zeros_like(cdf[:, :1]), cdf], -1) # (B, Kc+1)\n\n u = torch.rand(B, num_samples, dtype=torch.float32, device=device) # (B, Kf)\n interval_ids = torch.searchsorted(cdf, u, right=True) - 1 # (B, Kf)\n interval_ids = torch.clamp(interval_ids, 0, num_samples-1)\n interval_interp = torch.rand_like(interval_ids, dtype=torch.float32)\n\n # z_samps describe the centers of the respective histogram bins. Therefore, we have to extend them to the left and right\n if self.lindisp:\n z_samp = 1 / z_samp\n\n centers = .5 * (z_samp[:, 1:] + z_samp[:, :-1])\n interval_borders = torch.cat((z_samp[:, :1], centers, z_samp[:, -1:]), dim=-1)\n\n left_border = torch.gather(interval_borders, dim=-1, index=interval_ids)\n right_border = torch.gather(interval_borders, dim=-1, index=interval_ids+1)\n\n z_samp_new = left_border * (1 - interval_interp) + right_border * interval_interp\n\n if self.lindisp:\n z_samp_new = 1 / z_samp_new\n\n assert not torch.any(torch.isnan(z_samp_new))\n\n return z_samp_new\n\n def sample_fine(self, rays, weights):\n \"\"\"min\n Weighted stratified (importance) sample\n :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)\n :param weights (B, Kc)\n :return (B, Kf-Kfd)\n \"\"\"\n device = rays.device\n B = rays.shape[0]\n\n weights = weights.detach() + 1e-5 # Prevent division by zero\n pdf = weights / torch.sum(weights, -1, keepdim=True) # (B, Kc)\n cdf = torch.cumsum(pdf, -1) # (B, Kc)\n cdf = torch.cat([torch.zeros_like(cdf[:, :1]), cdf], -1) # (B, Kc+1)\n\n u = torch.rand(\n B, self.n_fine - self.n_fine_depth, dtype=torch.float32, device=device\n ) # (B, Kf)\n inds = torch.searchsorted(cdf, u, right=True).float() - 1.0 # (B, Kf)\n inds = torch.clamp_min(inds, 0.0)\n\n z_steps = (inds + torch.rand_like(inds)) / self.n_coarse # (B, Kf)\n\n near, far = rays[:, -2:-1], rays[:, -1:] # (B, 1)\n if not self.lindisp: # Use linear sampling in depth space\n z_samp = near * (1 - z_steps) + far * z_steps # (B, Kf)\n else: # Use linear sampling in disparity space\n z_samp = 1 / (1 / near * (1 - z_steps) + 1 / far * z_steps) # (B, Kf)\n\n assert not torch.any(torch.isnan(z_samp))\n\n return z_samp\n\n def sample_fine_depth(self, rays, depth):\n \"\"\"\n Sample around specified depth\n :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)\n :param depth (B)\n :return (B, Kfd)\n \"\"\"\n z_samp = depth.unsqueeze(1).repeat((1, self.n_fine_depth))\n z_samp += torch.randn_like(z_samp) * self.depth_std\n # Clamp does not support tensor bounds\n z_samp = torch.max(torch.min(z_samp, rays[:, -1:]), rays[:, -2:-1])\n\n assert not torch.any(torch.isnan(z_samp))\n\n return z_samp\n\n def composite(self, model, rays, z_samp, coarse=True, sb=0, predict_segmentation=False):\n \"\"\"\n Render RGB and depth for each ray using NeRF alpha-compositing formula,\n given sampled positions along each ray (see sample_*)\n :param model should return (B, (r, g, b, sigma)) when called with (B, (x, y, z))\n should also support 'coarse' boolean argument\n :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)\n :param z_samp z positions sampled for each ray (B, K)\n :param coarse whether to evaluate using coarse NeRF\n :param predict_segmentation if true also predict the semantic distribution\n :param sb super-batch dimension; 0 = disable\n :return weights (B, K), rgb (B, 3), depth (B)\n \"\"\"\n with profiler.record_function(\"renderer_composite\"):\n B, K = z_samp.shape\n\n deltas = z_samp[:, 1:] - z_samp[:, :-1] # (B, K-1)\n delta_inf = 1e10 * torch.ones_like(deltas[:, :1]) # infty (B, 1)\n # delta_inf = rays[:, -1:] - z_samp[:, -1:]\n deltas = torch.cat([deltas, delta_inf], -1) # (B, K)\n\n # (B, K, 3)\n points = rays[:, None, :3] + z_samp.unsqueeze(2) * rays[:, None, 3:6]\n points = points.reshape(-1, 3) # (B*K, 3)\n\n use_viewdirs = hasattr(model, \"use_viewdirs\") and model.use_viewdirs\n\n rgbs_all, invalid_all, sigmas_all, segs_all = [], [], [], []\n if sb > 0:\n points = points.reshape(\n sb, -1, 3\n ) # (SB, B'*K, 3) B' is real ray batch size\n eval_batch_size = (self.eval_batch_size - 1) // sb + 1\n eval_batch_dim = 1\n else:\n eval_batch_size = self.eval_batch_size\n eval_batch_dim = 0\n\n split_points = torch.split(points, eval_batch_size, dim=eval_batch_dim)\n if use_viewdirs:\n dim1 = K\n viewdirs = rays[:, None, 3:6].expand(-1, dim1, -1) # (B, K, 3)\n if sb > 0:\n viewdirs = viewdirs.reshape(sb, -1, 3) # (SB, B'*K, 3)\n else:\n viewdirs = viewdirs.reshape(-1, 3) # (B*K, 3)\n split_viewdirs = torch.split(\n viewdirs, eval_batch_size, dim=eval_batch_dim\n )\n for pnts, dirs in zip(split_points, split_viewdirs):\n rgbs, invalid, sigmas = model(pnts, coarse=coarse, viewdirs=dirs)\n rgbs_all.append(rgbs)\n invalid_all.append(invalid)\n sigmas_all.append(sigmas)\n else:\n for pnts in split_points:\n if predict_segmentation:\n rgbs, invalid, sigmas, segs = model(pnts, coarse=coarse,\n predict_segmentation=predict_segmentation)\n segs_all.append(segs)\n else:\n rgbs, invalid, sigmas = model(pnts, coarse=coarse,\n predict_segmentation=predict_segmentation)\n rgbs_all.append(rgbs)\n invalid_all.append(invalid)\n sigmas_all.append(sigmas)\n points = None\n viewdirs = None\n # (B*K, 4) OR (SB, B'*K, 4)\n rgbs = torch.cat(rgbs_all, dim=eval_batch_dim)\n invalid = torch.cat(invalid_all, dim=eval_batch_dim)\n sigmas = torch.cat(sigmas_all, dim=eval_batch_dim)\n\n if predict_segmentation:\n segs = torch.cat(segs_all, dim=eval_batch_dim)\n segs = segs.reshape(B, K, -1) # (B, K, n_classes)\n\n rgbs = rgbs.reshape(B, K, -1) # (B, K, 4 or 5)\n invalid = invalid.reshape(B, K, -1)\n sigmas = sigmas.reshape(B, K)\n\n if self.training and self.noise_std > 0.0:\n sigmas = sigmas + torch.randn_like(sigmas) * self.noise_std\n\n alphas = 1 - torch.exp(-deltas.abs() * torch.relu(sigmas)) # (B, K) (delta should be positive anyways)\n\n if self.hard_alpha_cap:\n alphas[:, -1] = 1\n\n deltas = None\n sigmas = None\n alphas_shifted = torch.cat(\n [torch.ones_like(alphas[:, :1]), 1 - alphas + 1e-10], -1\n ) # (B, K+1) = [1, a1, a2, ...]\n T = torch.cumprod(alphas_shifted, -1) # (B)\n weights = alphas * T[:, :-1] # (B, K)\n # alphas = None\n alphas_shifted = None\n\n rgb_final = torch.sum(weights.unsqueeze(-1) * rgbs, -2) # (B, 3)\n depth_final = torch.sum(weights * z_samp, -1) # (B)\n\n\n\n if self.white_bkgd:\n # White background\n pix_alpha = weights.sum(dim=1) # (B), pixel alpha\n rgb_final = rgb_final + 1 - pix_alpha.unsqueeze(-1) # (B, 3)\n\n if predict_segmentation:\n segs_final = torch.sum(weights.unsqueeze(-1) * segs, dim=-2) # (B, n_classes)\n return (\n weights,\n rgb_final,\n depth_final,\n alphas,\n invalid,\n z_samp,\n rgbs,\n # segs,\n segs_final\n )\n else:\n return (\n weights,\n rgb_final,\n depth_final,\n alphas,\n invalid,\n z_samp,\n rgbs\n )\n\n def forward(\n self, model, rays, want_weights=False, want_alphas=False, want_z_samps=False, want_rgb_samps=False, predict_segmentation=False, sample_from_dist=None):\n \"\"\"\n :model nerf model, should return (SB, B, (r, g, b, sigma))\n when called with (SB, B, (x, y, z)), for multi-object:\n SB = 'super-batch' = size of object batch,\n B = size of per-object ray batch.\n Should also support 'coarse' boolean argument for coarse NeRF.\n :param rays ray spec [origins (3), directions (3), near (1), far (1)] (SB, B, 8)\n :param want_weights if true, returns compositing weights (SB, B, K)\n :param predict_segmentation if true, return the segmentation class distribution for each pixel\n :return render dict\n \"\"\"\n with profiler.record_function(\"renderer_forward\"):\n if self.sched is not None and self.last_sched.item() > 0:\n self.n_coarse = self.sched[1][self.last_sched.item() - 1]\n self.n_fine = self.sched[2][self.last_sched.item() - 1]\n\n assert len(rays.shape) == 3\n superbatch_size = rays.shape[0]\n rays = rays.reshape(-1, 8) # (SB * B, 8)\n\n if sample_from_dist is None:\n z_coarse = self.sample_coarse(rays) # (B, Kc)\n else:\n prop_weights, prop_z_samp = sample_from_dist\n n_samples = prop_weights.shape[-1]\n prop_weights = prop_weights.reshape(-1, n_samples)\n prop_z_samp = prop_z_samp.reshape(-1, n_samples)\n z_coarse = self.sample_coarse_from_dist(rays, prop_weights, prop_z_samp)\n z_coarse, _ = torch.sort(z_coarse, dim=-1)\n\n coarse_composite = self.composite(\n model, rays, z_coarse, coarse=True, sb=superbatch_size, predict_segmentation=predict_segmentation\n )\n\n outputs = DotMap(\n coarse=self._format_outputs(\n coarse_composite, superbatch_size, want_weights=want_weights, want_alphas=want_alphas,\n want_z_samps=want_z_samps, want_rgb_samps=want_rgb_samps, want_segmentation=predict_segmentation\n ),\n )\n\n if self.using_fine:\n all_samps = [z_coarse]\n if self.n_fine - self.n_fine_depth > 0:\n all_samps.append(\n self.sample_fine(rays, coarse_composite[0].detach())\n ) # (B, Kf - Kfd)\n if self.n_fine_depth > 0:\n all_samps.append(\n self.sample_fine_depth(rays, coarse_composite[2])\n ) # (B, Kfd)\n z_combine = torch.cat(all_samps, dim=-1) # (B, Kc + Kf)\n z_combine_sorted, argsort = torch.sort(z_combine, dim=-1)\n fine_composite = self.composite(\n model, rays, z_combine_sorted, coarse=False, sb=superbatch_size,\n )\n outputs.fine = self._format_outputs(\n fine_composite, superbatch_size, want_weights=want_weights, want_alphas=want_alphas, want_z_samps=want_z_samps, want_rgb_samps=want_rgb_samps\n )\n\n return outputs\n\n def _format_outputs(\n self, rendered_outputs, superbatch_size, want_weights=False, want_alphas=False, want_z_samps=False, want_rgb_samps=False, want_segmentation=False\n ):\n if want_segmentation:\n weights, rgb_final, depth, alphas, invalid, z_samps, rgb_samps, segs_final = rendered_outputs\n else:\n weights, rgb_final, depth, alphas, invalid, z_samps, rgb_samps = rendered_outputs\n\n n_smps = weights.shape[-1]\n out_d_rgb = rgb_final.shape[-1]\n out_d_i = invalid.shape[-1]\n\n if superbatch_size > 0:\n rgb_final = rgb_final.reshape(superbatch_size, -1, out_d_rgb)\n depth = depth.reshape(superbatch_size, -1)\n weights = weights.reshape(superbatch_size, -1, n_smps)\n alphas = alphas.reshape(superbatch_size, -1, n_smps)\n invalid = invalid.reshape(superbatch_size, -1, n_smps, out_d_i)\n z_samps = z_samps.reshape(superbatch_size, -1, n_smps)\n rgb_samps = rgb_samps.reshape(superbatch_size, -1, n_smps, out_d_rgb)\n\n if want_segmentation:\n out_segs = segs_final.shape[-1]\n segs_final = segs_final.reshape(superbatch_size, -1, out_segs)\n\n ret_dict = DotMap(rgb=rgb_final, depth=depth, invalid=invalid)\n if want_weights:\n ret_dict.weights = weights\n if want_alphas:\n ret_dict.alphas = alphas\n if want_z_samps:\n ret_dict.z_samps = z_samps\n if want_rgb_samps:\n ret_dict.rgb_samps = rgb_samps\n if want_segmentation:\n ret_dict.segs = segs_final\n # ret_dict.segs_raw = segs_raw\n return ret_dict\n\n def sched_step(self, steps=1):\n \"\"\"\n Called each training iteration to update sample numbers\n according to schedule\n \"\"\"\n if self.sched is None:\n return\n self.iter_idx += steps\n while (\n self.last_sched.item() < len(self.sched[0])\n and self.iter_idx.item() >= self.sched[0][self.last_sched.item()]\n ):\n self.n_coarse = self.sched[1][self.last_sched.item()]\n self.n_fine = self.sched[2][self.last_sched.item()]\n print(\n \"INFO: NeRF sampling resolution changed on schedule ==> c\",\n self.n_coarse,\n \"f\",\n self.n_fine,\n )\n self.last_sched += 1\n\n @classmethod\n def from_conf(cls, conf, white_bkgd=False, eval_batch_size=100000):\n return cls(\n conf.get(\"n_coarse\", 128),\n conf.get(\"n_fine\", 0),\n n_fine_depth=conf.get(\"n_fine_depth\", 0),\n noise_std=conf.get(\"noise_std\", 0.0),\n depth_std=conf.get(\"depth_std\", 0.01),\n white_bkgd=conf.get(\"white_bkgd\", white_bkgd),\n lindisp=conf.get(\"lindisp\", True),\n eval_batch_size=conf.get(\"eval_batch_size\", eval_batch_size),\n sched=conf.get(\"sched\", None),\n hard_alpha_cap=conf.get(\"hard_alpha_cap\", False)\n )\n\n def bind_parallel(self, net, gpus=None, simple_output=False):\n \"\"\"\n Returns a wrapper module compatible with DataParallel.\n Specifically, it renders rays with this renderer\n but always using the given network instance.\n Specify a list of GPU ids in 'gpus' to apply DataParallel automatically.\n :param net A PixelNeRF network\n :param gpus list of GPU ids to parallize to. If length is 1,\n does not parallelize\n :param simple_output only returns rendered (rgb, depth) instead of the \n full render output map. Saves data tranfer cost.\n :return torch module\n \"\"\"\n wrapped = _RenderWrapper(net, self, simple_output=simple_output)\n if gpus is not None and len(gpus) > 1:\n print(\"Using multi-GPU\", gpus)\n wrapped = torch.nn.DataParallel(wrapped, gpus, dim=1)\n return wrapped" }, { "identifier": "ReconstructionLoss", "path": "models/bts/model/loss.py", "snippet": "class ReconstructionLoss:\n def __init__(self, config, use_automasking=False) -> None:\n super().__init__()\n self.criterion_str = config.get(\"criterion\", \"l2\")\n if self.criterion_str == \"l2\":\n self.rgb_coarse_crit = torch.nn.MSELoss(reduction=\"none\")\n self.rgb_fine_crit = torch.nn.MSELoss(reduction=\"none\")\n elif self.criterion_str == \"l1\":\n self.rgb_coarse_crit = torch.nn.L1Loss(reduction=\"none\")\n self.rgb_fine_crit = torch.nn.L1Loss(reduction=\"none\")\n elif self.criterion_str == \"l1+ssim\":\n self.rgb_coarse_crit = compute_errors_l1ssim\n self.rgb_fine_crit = compute_errors_l1ssim\n self.invalid_policy = config.get(\"invalid_policy\", \"strict\")\n assert self.invalid_policy in [\"strict\", \"weight_guided\", \"weight_guided_diverse\", None, \"none\"]\n self.ignore_invalid = self.invalid_policy is not None and self.invalid_policy != \"none\"\n self.lambda_coarse = config.get(\"lambda_coarse\", 1)\n self.lambda_fine = config.get(\"lambda_fine\", 1)\n self.lambda_segmentation = config.get(\"lambda_segmentation\", 1)\n self.segmentation_class_weights = config.get(\"segmentation_class_weights\", None)\n\n if self.segmentation_class_weights is not None:\n self.segmentation_class_weights = torch.tensor(list(config.get(\"segmentation_class_weights\", None).values()))\n\n self.use_automasking = use_automasking\n\n self.lambda_entropy = config.get(\"lambda_entropy\", 0)\n self.lambda_density_entropy = config.get(\"lambda_density_entropy\", 0)\n self.lambda_depth_reg = config.get(\"lambda_depth_reg\", 0)\n self.lambda_alpha_reg = config.get(\"lambda_alpha_reg\", 0)\n self.lambda_surfaceness_reg = config.get(\"lambda_surfaceness_reg\", 0)\n self.lambda_edge_aware_smoothness = config.get(\"lambda_edge_aware_smoothness\", 0)\n self.lambda_depth_smoothness = config.get(\"lambda_depth_smoothness\", 0)\n\n self.median_thresholding = config.get(\"median_thresholding\", False)\n\n self.alpha_reg_reduction = config.get(\"alpha_reg_reduction\", \"ray\")\n self.alpha_reg_fraction = config.get(\"alpha_reg_fraction\", 1/8)\n\n if self.alpha_reg_reduction not in (\"ray\", \"slice\"):\n raise ValueError(f\"Unknown reduction for alpha regularization: {self.alpha_reg_reduction}\")\n\n @staticmethod\n def get_loss_metric_names():\n return [\"loss\", \"loss_rgb_coarse\", \"loss_rgb_fine\", \"loss_ray_entropy\", \"loss_depth_reg\"]\n\n def __call__(self, data):\n with profiler.record_function(\"loss_computation\"):\n n_scales = len(data[\"coarse\"])\n\n loss_dict = {}\n\n loss_coarse_all = 0\n loss_fine_all = 0\n loss_segmentation = 0\n loss = 0\n\n coarse_0 = data[\"coarse\"][0]\n fine_0 = data[\"fine\"][0]\n segmentation_0 = data[\"segmentation\"][0]\n invalid_coarse = coarse_0[\"invalid\"]\n invalid_fine = fine_0[\"invalid\"]\n invalid_segmentation = segmentation_0[\"invalid\"]\n\n weights_coarse = coarse_0[\"weights\"]\n weights_fine = fine_0[\"weights\"]\n weights_segmentation = segmentation_0[\"weights\"]\n\n if self.invalid_policy == \"strict\":\n # Consider all rays invalid where there is at least one invalidly sampled color\n invalid_coarse = torch.all(torch.any(invalid_coarse > .5, dim=-2), dim=-1).unsqueeze(-1)\n invalid_fine = torch.all(torch.any(invalid_fine > .5, dim=-2), dim=-1).unsqueeze(-1)\n invalid_segmentation = torch.all(torch.any(invalid_segmentation > .5, dim=-2), dim=-1).unsqueeze(-1)\n elif self.invalid_policy == \"weight_guided\":\n # Integrate invalid indicator function over the weights. It is invalid if > 90% of the mass is invalid. (Arbitrary threshold)\n invalid_coarse = torch.all((invalid_coarse.to(torch.float32) * weights_coarse.unsqueeze(-1)).sum(-2) > .9, dim=-1, keepdim=True)\n invalid_fine = torch.all((invalid_fine.to(torch.float32) * weights_fine.unsqueeze(-1)).sum(-2) > .9, dim=-1, keepdim=True)\n invalid_segmentation = torch.all((invalid_segmentation.to(torch.float32) * weights_segmentation.unsqueeze(-1)).sum(-2) > .9,\n dim=-1, keepdim=True)\n elif self.invalid_policy == \"weight_guided_diverse\":\n # We now also consider, whether there is enough variance in the ray colors to give a meaningful supervision signal.\n rgb_samps_c = coarse_0[\"rgb_samps\"]\n rgb_samps_f = fine_0[\"rgb_samps\"]\n ray_std_c = torch.std(rgb_samps_c, dim=-3).mean(-1)\n ray_std_f = torch.std(rgb_samps_f, dim=-3).mean(-1)\n\n # Integrate invalid indicator function over the weights. It is invalid if > 90% of the mass is invalid. (Arbitrary threshold)\n invalid_coarse = torch.all(((invalid_coarse.to(torch.float32) * weights_coarse.unsqueeze(-1)).sum(-2) > .9) | (ray_std_c < 0.01), dim=-1, keepdim=True)\n invalid_fine = torch.all(((invalid_fine.to(torch.float32) * weights_fine.unsqueeze(-1)).sum(-2) > .9) | (ray_std_f < 0.01), dim=-1, keepdim=True)\n\n # for now we just do the weight guided invalids for the segmentation\n invalid_segmentation = torch.all(\n (invalid_segmentation.to(torch.float32) * weights_segmentation.unsqueeze(-1)).sum(-2) > .9,\n dim=-1, keepdim=True)\n elif self.invalid_policy == \"none\":\n invalid_coarse = torch.zeros_like(torch.all(torch.any(invalid_coarse > .5, dim=-2), dim=-1).unsqueeze(-1), dtype=torch.bool)\n invalid_fine = torch.zeros_like(torch.all(torch.any(invalid_fine > .5, dim=-2), dim=-1).unsqueeze(-1), dtype=torch.bool)\n invalid_segmentation = torch.zeros_like(torch.all(torch.any(invalid_segmentation > .5, dim=-2), dim=-1).unsqueeze(-1),\n dtype=torch.bool)\n else:\n raise NotImplementedError\n\n loss_depth_reg = torch.tensor(0.0, device=invalid_fine.device)\n loss_alpha_reg = torch.tensor(0.0, device=invalid_fine.device)\n loss_surfaceness_reg = torch.tensor(0.0, device=invalid_fine.device)\n loss_eas = torch.tensor(0.0, device=invalid_fine.device)\n loss_depth_smoothness = torch.tensor(0.0, device=invalid_fine.device)\n\n for scale in range(n_scales):\n coarse = data[\"coarse\"][scale]\n fine = data[\"fine\"][scale]\n segmentation = data[\"segmentation\"][scale]\n\n rgb_coarse = coarse[\"rgb\"]\n rgb_fine = fine[\"rgb\"]\n rgb_gt = data[\"rgb_gt\"]\n segmentation_gt = data[\"segmentation_gt\"].permute(0, 4, 1, 2, 3).squeeze(1) #(batch_size, n_patches, h, w)\n bs, n_patch, ph, pw, n_classes = segmentation[\"segs\"].shape\n segmentation_gt = segmentation_gt.view(-1, ph, pw)\n\n # do cross entropy loss\n self.segmentation_class_weights = self.segmentation_class_weights.to(segmentation_gt.device).float()\n cp_loss_fn = torch.nn.NLLLoss(weight=self.segmentation_class_weights)\n # log_segmentation = torch.log(segmentation[\"segs\"] + 1e-5).permute(0, 4, 1, 2, 3) #(batch_size, n_classes, n_patches, h, w)\n patch_to_image = data[\"patch_to_image\"]\n front_indices = patch_to_image <= 4\n side_indices = patch_to_image > 4\n\n log_segmentation = torch.log(segmentation[\"segs\"] + 1e-5).reshape(-1, ph, pw, n_classes).permute(0, 3, 1, 2)\n\n # Account for the invalids\n # TODO: Adjust the mean so that we don't have a low loss just because we have a lot of invalids\n invalid_segmentation = invalid_segmentation.squeeze(-1).to(torch.float32).reshape(-1, ph, pw)\n\n cp_loss = cp_loss_fn(\n ((1 - invalid_segmentation.contiguous()).unsqueeze(1) * log_segmentation.contiguous()).float(),\n ((1 - invalid_segmentation.contiguous()) * segmentation_gt.contiguous()).long())\n\n loss_segmentation += self.lambda_segmentation * cp_loss.item()\n\n loss += self.lambda_segmentation * cp_loss\n\n if self.use_automasking:\n thresh_gt = rgb_gt[..., -1:]\n rgb_coarse = rgb_coarse[..., :-1]\n rgb_fine = rgb_fine[..., :-1]\n rgb_gt = rgb_gt[..., :-1]\n\n rgb_coarse = rgb_coarse\n rgb_fine = rgb_fine\n rgb_gt = rgb_gt.unsqueeze(-2)\n\n using_fine = len(fine) > 0\n\n b, pc, h, w, nv, c = rgb_coarse.shape\n\n # Take minimum across all reconstructed views\n rgb_loss = self.rgb_coarse_crit(rgb_coarse, rgb_gt)\n rgb_loss = rgb_loss.amin(-2)\n\n if self.use_automasking:\n rgb_loss = torch.min(rgb_loss, thresh_gt)\n\n if self.ignore_invalid:\n rgb_loss = rgb_loss * (1 - invalid_coarse.to(torch.float32))\n\n if self.median_thresholding:\n threshold = torch.median(rgb_loss.view(b, -1), dim=-1)[0].view(-1, 1, 1, 1, 1)\n rgb_loss = rgb_loss[rgb_loss <= threshold]\n\n rgb_loss = rgb_loss.mean()\n\n loss_coarse_all += rgb_loss.item() * self.lambda_coarse\n if using_fine:\n fine_loss = self.rgb_fine_crit(rgb_fine, rgb_gt)\n fine_loss = fine_loss.amin(-2)\n\n if self.use_automasking:\n fine_loss = torch.min(fine_loss, thresh_gt)\n\n if self.ignore_invalid:\n fine_loss = fine_loss * (1 - invalid_fine.to(torch.float32))\n\n if self.median_thresholding:\n threshold = torch.median(fine_loss.view(b, -1), dim=-1)[0].view(-1, 1, 1, 1, 1)\n fine_loss = fine_loss[fine_loss <= threshold]\n\n fine_loss = fine_loss.mean()\n rgb_loss = rgb_loss * self.lambda_coarse + fine_loss * self.lambda_fine\n loss_fine_all += fine_loss.item() * self.lambda_fine\n else:\n loss_dict[\"loss_rgb_fine\"] = 0\n\n loss += rgb_loss\n\n if self.lambda_depth_reg > 0:\n depths = coarse[\"depth\"]\n diffs_x = depths[:, :, 1:, :] - depths[:, :, :-1, :]\n diffs_y = depths[:, :, :, 1:] - depths[:, :, :, :-1]\n loss_depth_reg_s = (diffs_x ** 2).mean() + (diffs_y ** 2).mean()\n loss_depth_reg += loss_depth_reg_s # * self.lambda_depth_reg\n loss += loss_depth_reg_s * self.lambda_depth_reg\n\n if self.lambda_alpha_reg > 0:\n alphas = coarse[\"alphas\"]\n n_smps = alphas.shape[-1]\n\n # alphas = alphas[..., :-1].sum(-1)\n # loss_alpha_reg_s = (alphas - (n_smps * self.alpha_reg_fraction)).clamp_min(0)\n # if self.ignore_invalid:\n # loss_alpha_reg_s = loss_alpha_reg_s * (1 - invalid_coarse.squeeze(-1).to(torch.float32))\n\n alpha_sum = alphas[..., :-1].sum(-1)\n min_cap = torch.ones_like(alpha_sum) * (n_smps * self.alpha_reg_fraction)\n\n if self.ignore_invalid:\n alpha_sum = alpha_sum * (1 - invalid_coarse.squeeze(-1).to(torch.float32))\n min_cap = min_cap * (1 - invalid_coarse.squeeze(-1).to(torch.float32))\n\n if self.alpha_reg_reduction == \"ray\":\n loss_alpha_reg_s = (alpha_sum - min_cap).clamp_min(0)\n elif self.alpha_reg_reduction == \"slice\":\n loss_alpha_reg_s = (alpha_sum.sum(dim=-1) - min_cap.sum(dim=-1)).clamp_min(0) / alpha_sum.shape[-1]\n\n # alphas = alphas[..., :-n_smps//16]\n # alpha_deltas = alphas[..., 1:] - alphas[..., :-1]\n # The sum of deltas should be zero. This means that the number of peaks (ie objects) is not limited, but there needs to be free space afterwards again.\n # We don't consider the last 1/16 samples. They are likely background.\n # loss_alpha_reg_s = alpha_deltas.sum(-1).clamp_min(0)\n\n loss_alpha_reg_s = loss_alpha_reg_s.mean()\n\n loss_alpha_reg += loss_alpha_reg_s\n loss += loss_alpha_reg_s * self.lambda_alpha_reg\n\n if self.lambda_surfaceness_reg > 0:\n alphas = coarse[\"alphas\"]\n n_smps = alphas.shape[-1]\n\n p = -torch.log(torch.exp(-alphas.abs()) + torch.exp(-(1 - alphas).abs()))\n p = p.mean(-1)\n\n if self.ignore_invalid:\n p = p * (1 - invalid_coarse.squeeze(-1).to(torch.float32))\n\n loss_surfaceness_reg_s = p.mean()\n\n loss_surfaceness_reg += loss_surfaceness_reg_s\n loss += loss_surfaceness_reg_s * self.lambda_surfaceness_reg\n\n if self.lambda_edge_aware_smoothness > 0:\n gt_img = rgb_gt\n depths = coarse[\"depth\"]\n loss_eas_s = edge_aware_smoothness(gt_img, depths)\n\n if self.ignore_invalid:\n invalid_scale = torch.ceil(F.interpolate(invalid_coarse.squeeze(-1).to(torch.float32), size=(depths.shape[-2:])))\n loss_eas_s = loss_eas_s * (1 - invalid_scale)\n\n loss_eas_s = loss_eas_s.mean()\n\n loss_eas += loss_eas_s\n loss += loss_eas_s * self.lambda_edge_aware_smoothness / (2 ** scale)\n\n if self.lambda_depth_smoothness > 0:\n depths = coarse[\"depth\"]\n loss_depth_smoothness_s = ((depths[..., :-1, :] - depths[..., 1:, :]) ** 2).mean() + ((depths[..., :, :-1] - depths[..., :, 1:]) ** 2).mean()\n\n loss_depth_smoothness += loss_depth_smoothness_s\n loss += loss_depth_smoothness_s * self.lambda_depth_smoothness\n\n\n loss = loss / n_scales\n\n loss_ray_entropy = torch.tensor(0.0, device=loss.device)\n if self.lambda_entropy > 0:\n alphas = coarse_0[\"alphas\"]\n alphas = alphas + 1e-5\n\n ray_density = alphas / alphas.sum(dim=-1, keepdim=True)\n ray_entropy = -(ray_density * torch.log(ray_density)).sum(-1) / (math.log2(alphas.shape[-1]))\n ray_entropy = ray_entropy * (1 - invalid_coarse.squeeze(-1).to(torch.float32))\n loss_ray_entropy = ray_entropy.mean()\n\n loss = loss + loss_ray_entropy * self.lambda_entropy\n\n # add density entropy loss\n loss_density_entropy = torch.tensor(0.0, device=loss.device)\n\n if self.lambda_density_entropy > 0:\n alphas = coarse_0[\"alphas\"]\n alphas = alphas + 1e-5\n density_entropy = (1 - alphas)*alphas\n loss_density_entropy = torch.mean(density_entropy) * self.lambda_density_entropy\n\n loss = loss + loss_density_entropy\n\n loss_dict[\"loss_rgb_coarse\"] = loss_coarse_all\n loss_dict[\"loss_rgb_fine\"] = loss_fine_all\n loss_dict[\"loss_segmentation\"] = loss_segmentation\n loss_dict[\"loss_ray_entropy\"] = loss_ray_entropy.item()\n loss_dict[\"loss_density_entropy\"] = loss_density_entropy.item()\n loss_dict[\"loss_depth_reg\"] = loss_depth_reg.item()\n loss_dict[\"loss_alpha_reg\"] = loss_alpha_reg.item()\n loss_dict[\"loss_eas\"] = loss_eas.item()\n loss_dict[\"loss_depth_smoothness\"] = loss_depth_smoothness.item()\n loss_dict[\"loss_invalid_ratio\"] = invalid_coarse.float().mean().item()\n loss_dict[\"loss\"] = loss.item()\n\n return loss, loss_dict" }, { "identifier": "get_metrics", "path": "models/bts/trainer.py", "snippet": "class BTSWrapper(nn.Module):\n def __init__(self, renderer, config, eval_nvs=False) -> None:\n def get_loss_metric_names():\n def forward(self, data):\n def compute_segmentation_metrics(self, data):\n def compute_depth_metrics(self, data):\n def compute_nvs_metrics(self, data):\ndef training(local_rank, config):\ndef get_dataflow(config, logger=None):\ndef get_metrics(config, device):\ndef initialize(config: dict, logger=None):\ndef visualize(engine: Engine, logger: TensorboardLogger, step: int, tag: str):" }, { "identifier": "render_profile", "path": "scripts/inference_setup.py", "snippet": "def render_profile(net, cam_incl_adjust):\n \"\"\"Note: For this to work you have to encode the image with the net first!!!\"\"\"\n q_pts = get_pts(OUT_RES.X_RANGE, OUT_RES.Y_RANGE, OUT_RES.Z_RANGE, OUT_RES.P_RES_ZX[1], OUT_RES.P_RES_Y, OUT_RES.P_RES_ZX[0], cam_incl_adjust=cam_incl_adjust)\n q_pts = q_pts.to(device).view(1, -1, 3)\n\n batch_size = 50000\n if q_pts.shape[1] > batch_size:\n sigmas = []\n invalid = []\n l = q_pts.shape[1]\n for i in range(math.ceil(l / batch_size)):\n f = i * batch_size\n t = min((i + 1) * batch_size, l)\n q_pts_ = q_pts[:, f:t, :]\n _, invalid_, sigmas_ = net.forward(q_pts_)\n sigmas.append(sigmas_)\n invalid.append(invalid_)\n sigmas = torch.cat(sigmas, dim=1)\n invalid = torch.cat(invalid, dim=1)\n else:\n _, invalid, sigmas = net.forward(q_pts)\n\n sigmas[torch.any(invalid, dim=-1)] = 1\n alphas = sigmas\n\n alphas = alphas.reshape(OUT_RES.P_RES_Y, *OUT_RES.P_RES_ZX)\n\n alphas_sum = torch.cumsum(alphas, dim=0)\n profile = (alphas_sum <= 8).float().sum(dim=0) / alphas.shape[0]\n return profile" }, { "identifier": "map_fn", "path": "utils/array_operations.py", "snippet": "def map_fn(batch, fn):\ndef to(data, device, non_blocking=True):\ndef set_requires_grad(nets, requires_grad=False):\ndef mask_mean(t: torch.Tensor, m: torch.Tensor, dim=None, keepdim=False):\ndef apply_crop(array, crop):\ndef shrink_mask(mask, shrink=3):\ndef get_mask(size, border=5, device=None):\ndef get_grid(H, W, normalize=True):\ndef detach(t):" }, { "identifier": "base_training", "path": "utils/base_trainer.py", "snippet": "def base_training(local_rank, config, get_dataflow, initialize, get_metrics, visualize):\n\n # copy the segmentation mode to the data and model_conf part of the config\n config['data']['segmentation_mode'] = config.get(\"segmentation_mode\", None)\n config['model_conf']['segmentation_mode'] = config.get(\"segmentation_mode\", None)\n\n rank = idist.get_rank()\n manual_seed(config[\"seed\"] + rank)\n device = idist.device()\n\n logger = setup_logger(name=config[\"name\"])\n\n log_basic_info(logger, config)\n\n output_path = config[\"output_path\"]\n if rank == 0:\n if config[\"stop_iteration\"] is None:\n now = datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n else:\n now = f\"stop-on-{config['stop_iteration']}\"\n\n folder_name = f\"{config['name']}_backend-{idist.backend()}-{idist.get_world_size()}_{now}\"\n output_path = Path(output_path) / folder_name\n if not output_path.exists():\n output_path.mkdir(parents=True)\n config[\"output_path\"] = output_path.as_posix()\n logger.info(f\"Output path: {config['output_path']}\")\n\n if \"cuda\" in device.type:\n config[\"cuda device name\"] = torch.cuda.get_device_name(local_rank)\n\n # Setup dataflow, model, optimizer, criterion\n loaders = get_dataflow(config, logger)\n if len(loaders) == 2:\n train_loader, test_loader = loaders\n vis_loader = None\n else:\n train_loader, test_loader, vis_loader = loaders\n\n if hasattr(train_loader, \"dataset\"):\n logger.info(f\"Dataset length: Train: {len(train_loader.dataset)}, Test: {len(test_loader.dataset)}\")\n\n config[\"num_iters_per_epoch\"] = len(train_loader)\n model, optimizer, criterion, lr_scheduler = initialize(config, logger)\n\n logger.info(f\"Model parameters: {sum(p.numel() for p in model.parameters())}\")\n\n # Let's now setup evaluator engine to perform model's validation and compute metrics\n metrics = get_metrics(config, device)\n metrics_loss = {k: MeanMetric((lambda y: lambda x: x[\"loss_dict\"][y])(k)) for k in criterion.get_loss_metric_names()}\n\n loss_during_validation = config.get(\"loss_during_validation\", True)\n if loss_during_validation:\n eval_metrics = {**metrics, **metrics_loss}\n else:\n eval_metrics = metrics\n\n # Create trainer for current task\n trainer = create_trainer(model, optimizer, criterion, lr_scheduler, train_loader.sampler if hasattr(train_loader, \"sampler\") else None, config, logger, metrics={})\n\n # We define two evaluators as they wont have exactly similar roles:\n # - `evaluator` will save the best model based on validation score\n evaluator = create_evaluator(model, metrics=eval_metrics, criterion=criterion if loss_during_validation else None, config=config)\n\n if vis_loader is not None:\n visualizer = create_evaluator(model, metrics=eval_metrics, criterion=criterion if loss_during_validation else None, config=config)\n else:\n visualizer = None\n\n def run_validation(engine):\n epoch = trainer.state.epoch\n state = evaluator.run(test_loader)\n log_metrics(logger, epoch, state.times[\"COMPLETED\"], \"Test\", state.metrics)\n\n def run_visualization(engine):\n epoch = trainer.state.epoch\n state = visualizer.run(vis_loader)\n log_metrics(logger, epoch, state.times[\"COMPLETED\"], \"Vis\", state.metrics)\n\n eval_use_iters = config.get(\"eval_use_iters\", False)\n vis_use_iters = config.get(\"vis_use_iters\", False)\n\n if not eval_use_iters:\n trainer.add_event_handler(Events.EPOCH_COMPLETED(every=config[\"validate_every\"]) | Events.COMPLETED, run_validation)\n else:\n trainer.add_event_handler(Events.ITERATION_COMPLETED(every=config[\"validate_every\"]) | Events.COMPLETED, run_validation)\n\n if visualizer:\n if not vis_use_iters:\n trainer.add_event_handler(Events.EPOCH_COMPLETED(every=config[\"visualize_every\"]) | Events.COMPLETED, run_visualization)\n else:\n trainer.add_event_handler(Events.ITERATION_COMPLETED(every=config[\"visualize_every\"]) | Events.COMPLETED, run_visualization)\n\n if rank == 0:\n # Setup TensorBoard logging on trainer and evaluators. Logged values are:\n # - Training metrics, e.g. running average loss values\n # - Learning rate\n # - Evaluation train/test metrics\n\n trainer_timer = IterationTimeHandler()\n trainer_timer_data = DataloaderTimeHandler()\n trainer.add_event_handler(Events.ITERATION_STARTED, trainer_timer.start_iteration)\n trainer.add_event_handler(Events.ITERATION_COMPLETED, trainer_timer.end_iteration)\n trainer.add_event_handler(Events.GET_BATCH_STARTED, trainer_timer_data.start_get_batch)\n trainer.add_event_handler(Events.GET_BATCH_COMPLETED, trainer_timer_data.end_get_batch)\n\n evaluator_timer = IterationTimeHandler()\n evaluator_timer_data = DataloaderTimeHandler()\n evaluator.add_event_handler(Events.ITERATION_STARTED, evaluator_timer.start_iteration)\n evaluator.add_event_handler(Events.ITERATION_COMPLETED, evaluator_timer.end_iteration)\n evaluator.add_event_handler(Events.GET_BATCH_STARTED, evaluator_timer_data.start_get_batch)\n evaluator.add_event_handler(Events.GET_BATCH_COMPLETED, evaluator_timer_data.end_get_batch)\n\n if visualizer:\n visualizer_timer = IterationTimeHandler()\n visualizer_timer_data = DataloaderTimeHandler()\n visualizer.add_event_handler(Events.ITERATION_STARTED, visualizer_timer.start_iteration)\n visualizer.add_event_handler(Events.ITERATION_COMPLETED, visualizer_timer.end_iteration)\n visualizer.add_event_handler(Events.GET_BATCH_STARTED, visualizer_timer_data.start_get_batch)\n visualizer.add_event_handler(Events.GET_BATCH_COMPLETED, visualizer_timer_data.end_get_batch)\n\n gst = lambda engine, event_name: trainer.state.epoch\n gst_it_epoch = lambda engine, event_name: (trainer.state.epoch - 1) * engine.state.epoch_length + engine.state.iteration - 1\n eval_gst_it_iters = lambda engine, event_name: (((trainer.state.epoch - 1) * trainer.state.epoch_length + trainer.state.iteration) // config[\"validate_every\"]) * engine.state.epoch_length + engine.state.iteration - 1\n vis_gst_it_iters = lambda engine, event_name: (((trainer.state.epoch - 1) * trainer.state.epoch_length + trainer.state.iteration) // config[\"visualize_every\"]) * engine.state.epoch_length + engine.state.iteration - 1\n\n eval_gst_ep_iters = lambda engine, event_name: (((trainer.state.epoch - 1) * trainer.state.epoch_length + trainer.state.iteration) // config[\"validate_every\"])\n vis_gst_ep_iters = lambda engine, event_name: (((trainer.state.epoch - 1) * trainer.state.epoch_length + trainer.state.iteration) // config[\"visualize_every\"])\n\n eval_gst_it = eval_gst_it_iters if eval_use_iters else gst_it_epoch\n vis_gst_it = vis_gst_it_iters if vis_use_iters else gst_it_epoch\n\n eval_gst_ep = eval_gst_ep_iters if eval_use_iters else gst\n vis_gst_ep = vis_gst_ep_iters if vis_use_iters else gst\n\n tb_logger = TensorboardLogger(log_dir=output_path)\n tb_logger.attach(trainer, MetricLoggingHandler(\"train\", optimizer), Events.ITERATION_COMPLETED(every=config.get(\"log_every_iters\", 1)))\n tb_logger.attach(evaluator, MetricLoggingHandler(\"val\", log_loss=False, global_step_transform=eval_gst_ep), Events.EPOCH_COMPLETED)\n if visualizer:\n tb_logger.attach(visualizer, MetricLoggingHandler(\"vis\", log_loss=False, global_step_transform=vis_gst_ep), Events.EPOCH_COMPLETED)\n\n # Plot config to tensorboard\n config_json = json.dumps(OmegaConf.to_container(config, resolve=True), indent=2)\n config_json = \"\".join(\"\\t\" + line for line in config_json.splitlines(True))\n tb_logger.writer.add_text(\"config\", text_string=config_json, global_step=0)\n\n if visualize is not None:\n train_log_interval = config.get(\"log_tb_train_every_iters\", -1)\n val_log_interval = config.get(\"log_tb_val_every_iters\", train_log_interval)\n vis_log_interval = config.get(\"log_tb_vis_every_iters\", 1)\n\n if train_log_interval > 0:\n tb_logger.attach(\n trainer,\n VisualizationHandler(tag=\"training\", visualizer=visualize),\n Events.ITERATION_COMPLETED(every=train_log_interval))\n if val_log_interval > 0:\n tb_logger.attach(\n evaluator,\n VisualizationHandler(tag=\"val\", visualizer=visualize, global_step_transform=eval_gst_it),\n Events.ITERATION_COMPLETED(every=val_log_interval))\n if visualizer and vis_log_interval > 0:\n tb_logger.attach(\n visualizer,\n VisualizationHandler(tag=\"vis\", visualizer=visualize, global_step_transform=vis_gst_it),\n Events.ITERATION_COMPLETED(every=vis_log_interval))\n\n if \"save_best\" in config:\n # Store 2 best models by validation accuracy starting from num_epochs / 2:\n save_best_config = config[\"save_best\"]\n metric_name = save_best_config[\"metric\"]\n sign = save_best_config.get(\"sign\", 1.0)\n\n best_model_handler = Checkpoint(\n {\"model\": model},\n get_save_handler(config),\n filename_prefix=\"best\",\n n_saved=2,\n global_step_transform=global_step_from_engine(trainer),\n score_name=metric_name,\n score_function=Checkpoint.get_default_score_fn(metric_name, score_sign=sign),\n )\n evaluator.add_event_handler(\n Events.COMPLETED(lambda *_: trainer.state.epoch > config[\"num_epochs\"] // 2), best_model_handler\n )\n\n # In order to check training resuming we can stop training on a given iteration\n if config[\"stop_iteration\"] is not None:\n\n @trainer.on(Events.ITERATION_STARTED(once=config[\"stop_iteration\"]))\n def _():\n logger.info(f\"Stop training on {trainer.state.iteration} iteration\")\n trainer.terminate()\n\n try:\n trainer.run(train_loader, max_epochs=config[\"num_epochs\"])\n except Exception as e:\n logger.exception(\"\")\n raise e\n\n if rank == 0:\n tb_logger.close()" }, { "identifier": "color_tensor", "path": "utils/plotting.py", "snippet": "def color_tensor(tensor: torch.Tensor, cmap, norm=False):\n if norm:\n tensor = (tensor - tensor.min()) / (tensor.max() - tensor.min())\n map = plt.cm.get_cmap(cmap)\n tensor = torch.tensor(map(tensor.cpu().numpy()), device=tensor.device)[..., :3]\n return tensor" }, { "identifier": "color_segmentation_tensor", "path": "utils/plotting.py", "snippet": "def color_segmentation_tensor(segmentation, n_classes=21):\n \"\"\"\n Transform a tensor of class indicies ranging from 0 to n_classes-1 into a rgb tensor\n (add another dimension to the end of size 3).\n \"\"\"\n # https://matplotlib.org/stable/gallery/color/colormap_reference.html\n palette = plt.cm.plasma(np.linspace(0, 1, n_classes))\n palette = palette[:, :3] # RGBA -> RGB\n\n segmentation = palette[segmentation.view(-1).cpu()].reshape(*segmentation.shape, 3)\n\n return segmentation" } ]
import math import ignite.distributed as idist import torch import numpy as np from copy import copy from typing import Optional, Union, Iterable, Sequence from ignite.contrib.handlers import TensorboardLogger from ignite.engine import Engine from matplotlib import pyplot as plt from torch import optim, nn from torch.utils.data import DataLoader, Dataset, Sampler from torch.utils.data.dataloader import T_co, _collate_fn_t, _worker_init_fn_t from torchvision.utils import make_grid from datasets.data_util import make_datasets from models.common.model.scheduler import make_scheduler from models.common.render import NeRFRenderer from models.bts.model.loss import ReconstructionLoss from models.bts.trainer import get_metrics, BTSWrapper, BTSNet from scripts.inference_setup import render_profile from utils.array_operations import map_fn, unsqueezer, to from utils.base_trainer import base_training from utils.plotting import color_tensor, color_segmentation_tensor
16,170
class EncoderDummy(nn.Module): def __init__(self, size, feat_dim, num_views=1) -> None: super().__init__() self.feats = nn.Parameter(torch.randn(num_views, feat_dim, *size)) self.latent_size = feat_dim def forward(self, x): n = x.shape[0] return [self.feats.expand(n, -1, -1, -1)] class DataloaderDummy(DataLoader): def __init__(self, dataset: Dataset[T_co], batch_size: Optional[int] = 1, shuffle: Optional[bool] = None, sampler: Union[Sampler, Iterable, None] = None, batch_sampler: Union[Sampler[Sequence], Iterable[Sequence], None] = None, num_workers: int = 0, collate_fn: Optional[_collate_fn_t] = None, pin_memory: bool = False, drop_last: bool = False, timeout: float = 0, worker_init_fn: Optional[_worker_init_fn_t] = None, multiprocessing_context=None, generator=None, *, prefetch_factor: int = 2, persistent_workers: bool = False, pin_memory_device: str = ""): super().__init__(dataset, batch_size, shuffle, sampler, batch_sampler, num_workers, collate_fn, pin_memory, drop_last, timeout, worker_init_fn, multiprocessing_context, generator, prefetch_factor=prefetch_factor, persistent_workers=persistent_workers, pin_memory_device=pin_memory_device) self.element = to(map_fn(map_fn(dataset.__getitem__(0), torch.tensor), unsqueezer), "cuda:0") def _get_iterator(self): return iter([self.element]) def __iter__(self): return super().__iter__() def __len__(self) -> int: return 1 class BTSWrapperOverfit(BTSWrapper): def __init__(self, renderer, config, eval_nvs=False, size=None) -> None: super().__init__(renderer, config, eval_nvs) self.encoder_dummy = EncoderDummy(size, config["encoder"]["d_out"], num_views=1) self.renderer.net.encoder = self.encoder_dummy self.renderer.net.flip_augmentation = False def training(local_rank, config): return base_training(local_rank, config, get_dataflow, initialize, get_metrics, visualize) def get_dataflow(config, logger=None): # - Get train/test datasets if idist.get_local_rank() > 0: # Ensure that only local rank 0 download the dataset # Thus each node will download a copy of the dataset idist.barrier() train_dataset, _ = make_datasets(config["data"]) train_dataset.load_kitti_360_segmentation_gt = True train_dataset.length = 1 train_dataset._skip = config["data"].get("skip", 0) vis_dataset = copy(train_dataset) test_dataset = copy(train_dataset) vis_dataset.return_depth = True test_dataset.return_depth = True if idist.get_local_rank() == 0: # Ensure that only local rank 0 download the dataset idist.barrier() # Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu train_loader = DataloaderDummy(train_dataset) test_loader = DataloaderDummy(test_dataset) vis_loader = DataloaderDummy(vis_dataset) return train_loader, test_loader, vis_loader def initialize(config: dict, logger=None): arch = config["model_conf"].get("arch", "BTSNet") net = globals()[arch](config["model_conf"]) renderer = NeRFRenderer.from_conf(config["renderer"]) renderer = renderer.bind_parallel(net, gpus=None).eval() mode = config.get("mode", "depth") model = BTSWrapperOverfit( renderer, config["model_conf"], mode == "nvs", size=config["data"].get("image_size", (192, 640)) ) model = idist.auto_model(model) optimizer = optim.Adam(model.parameters(), lr=config["learning_rate"]) optimizer = idist.auto_optim(optimizer) lr_scheduler = make_scheduler(config.get("scheduler", {}), optimizer)
class EncoderDummy(nn.Module): def __init__(self, size, feat_dim, num_views=1) -> None: super().__init__() self.feats = nn.Parameter(torch.randn(num_views, feat_dim, *size)) self.latent_size = feat_dim def forward(self, x): n = x.shape[0] return [self.feats.expand(n, -1, -1, -1)] class DataloaderDummy(DataLoader): def __init__(self, dataset: Dataset[T_co], batch_size: Optional[int] = 1, shuffle: Optional[bool] = None, sampler: Union[Sampler, Iterable, None] = None, batch_sampler: Union[Sampler[Sequence], Iterable[Sequence], None] = None, num_workers: int = 0, collate_fn: Optional[_collate_fn_t] = None, pin_memory: bool = False, drop_last: bool = False, timeout: float = 0, worker_init_fn: Optional[_worker_init_fn_t] = None, multiprocessing_context=None, generator=None, *, prefetch_factor: int = 2, persistent_workers: bool = False, pin_memory_device: str = ""): super().__init__(dataset, batch_size, shuffle, sampler, batch_sampler, num_workers, collate_fn, pin_memory, drop_last, timeout, worker_init_fn, multiprocessing_context, generator, prefetch_factor=prefetch_factor, persistent_workers=persistent_workers, pin_memory_device=pin_memory_device) self.element = to(map_fn(map_fn(dataset.__getitem__(0), torch.tensor), unsqueezer), "cuda:0") def _get_iterator(self): return iter([self.element]) def __iter__(self): return super().__iter__() def __len__(self) -> int: return 1 class BTSWrapperOverfit(BTSWrapper): def __init__(self, renderer, config, eval_nvs=False, size=None) -> None: super().__init__(renderer, config, eval_nvs) self.encoder_dummy = EncoderDummy(size, config["encoder"]["d_out"], num_views=1) self.renderer.net.encoder = self.encoder_dummy self.renderer.net.flip_augmentation = False def training(local_rank, config): return base_training(local_rank, config, get_dataflow, initialize, get_metrics, visualize) def get_dataflow(config, logger=None): # - Get train/test datasets if idist.get_local_rank() > 0: # Ensure that only local rank 0 download the dataset # Thus each node will download a copy of the dataset idist.barrier() train_dataset, _ = make_datasets(config["data"]) train_dataset.load_kitti_360_segmentation_gt = True train_dataset.length = 1 train_dataset._skip = config["data"].get("skip", 0) vis_dataset = copy(train_dataset) test_dataset = copy(train_dataset) vis_dataset.return_depth = True test_dataset.return_depth = True if idist.get_local_rank() == 0: # Ensure that only local rank 0 download the dataset idist.barrier() # Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu train_loader = DataloaderDummy(train_dataset) test_loader = DataloaderDummy(test_dataset) vis_loader = DataloaderDummy(vis_dataset) return train_loader, test_loader, vis_loader def initialize(config: dict, logger=None): arch = config["model_conf"].get("arch", "BTSNet") net = globals()[arch](config["model_conf"]) renderer = NeRFRenderer.from_conf(config["renderer"]) renderer = renderer.bind_parallel(net, gpus=None).eval() mode = config.get("mode", "depth") model = BTSWrapperOverfit( renderer, config["model_conf"], mode == "nvs", size=config["data"].get("image_size", (192, 640)) ) model = idist.auto_model(model) optimizer = optim.Adam(model.parameters(), lr=config["learning_rate"]) optimizer = idist.auto_optim(optimizer) lr_scheduler = make_scheduler(config.get("scheduler", {}), optimizer)
criterion = ReconstructionLoss(config["loss"], config["model_conf"].get("use_automasking", False))
3
2023-11-12 21:53:27+00:00
24k
newcastleuniversity/DISPEL
dispel/processing/extract.py
[ { "identifier": "EntityType", "path": "dispel/data/core.py", "snippet": "class ReadingSchema:\nclass Evaluation(Epoch):\nclass Session(Epoch):\nclass Reading(FlagMixIn):\n def __init__(\n self,\n *args,\n uuid: str,\n finished: Optional[bool] = None,\n exit_reason: Optional[str] = None,\n user_id: Optional[str] = None,\n **kwargs,\n ):\n def to_dict(self):\n def __init__(\n self,\n *args,\n uuid: Optional[str] = None,\n evaluation_codes: Optional[Iterable[str]] = None,\n **kwargs,\n ):\n def __init__(\n self,\n evaluation: Evaluation,\n session: Optional[Session] = None,\n levels: Optional[Iterable[Level]] = None,\n measure_set: Optional[MeasureSet] = None,\n schema: Optional[ReadingSchema] = None,\n date: Any = None,\n device: Optional[Device] = None,\n ):\n def get_level(self, level_id: Optional[LevelIdType] = None) -> Level:\n def __repr__(self) -> str:\n def __iter__(self) -> Iterable[Tuple[LevelIdType, Level]]:\n def __len__(self) -> int:\n def empty(self) -> bool:\n def levels(self) -> ValuesView[Level]:\n def level_ids(self) -> List[LevelId]:\n def has_raw_data_set(\n self,\n data_set_id: str,\n level_id: LevelIdType,\n ) -> bool:\n def get_raw_data_set(\n self,\n data_set_id: str,\n level_id: LevelIdType,\n ) -> RawDataSet:\n def get_measure_set(self, level_id: Optional[LevelIdType] = None) -> MeasureSet:\n def get_merged_measure_set(self) -> MeasureSet:\n def set(self, value, **kwargs):\n def _get_level(self, level: Optional[Union[LevelIdType, Level]] = None) -> Level:\n def _measure_set(\n self,\n value: MeasureSet,\n level: Optional[Union[LevelIdType, Level]] = None,\n ):\n def _measure_value(\n self,\n value: MeasureValue,\n level: Optional[Union[LevelIdType, Level]] = None,\n epoch: Optional[LevelEpoch] = None,\n ):\n def _raw_data_set(\n self,\n value: RawDataSet,\n level: Union[LevelIdType, Level],\n concatenate: bool = False,\n overwrite: bool = False,\n ):\n def _epoch_measure_set(self, value: LevelEpoch, level: Union[LevelIdType, Level]):\n def _level(self, value: Level):\n def _set_flag(self, value: Flag):" }, { "identifier": "Flag", "path": "dispel/data/flags.py", "snippet": "class Flag:\n \"\"\"A class for entity flag.\"\"\"\n\n #: The flag identifier (string or id format)\n id_: InitVar[FlagIdType]\n\n #: The flag identifier\n id: FlagId = field(init=False)\n\n #: The detailed reason for the flag\n reason: str\n\n #: Stop processing\n stop_processing: bool = False\n\n def __post_init__(self, id_: FlagIdType):\n if isinstance(id_, str):\n self.id = FlagId.from_str(id_)\n elif isinstance(id_, FlagId):\n self.id = id_\n else:\n raise TypeError(\n \"Flag id should be either a convertible string id or an \"\n \"FlagId class.\"\n )\n\n def __hash__(self):\n return hash((self.id, self.reason, self.stop_processing))\n\n def format(self, *args, **kwargs) -> \"Flag\":\n \"\"\"Format an flag.\"\"\"\n return Flag(\n id_=self.id.format(*args, **kwargs),\n reason=self.reason.format(*args, **kwargs),\n stop_processing=self.stop_processing,\n )" }, { "identifier": "FlagSeverity", "path": "dispel/data/flags.py", "snippet": "class FlagSeverity(AVEnum):\n \"\"\"An enumeration for flag severity.\"\"\"\n\n DEVIATION = \"deviation\"\n INVALIDATION = \"invalidation\"" }, { "identifier": "FlagType", "path": "dispel/data/flags.py", "snippet": "class FlagType(AVEnum):\n \"\"\"An enumeration for flag types.\"\"\"\n\n TECHNICAL = \"technical\"\n BEHAVIORAL = \"behavioral\"" }, { "identifier": "WrappedResult", "path": "dispel/data/flags.py", "snippet": "class WrappedResult(FlagMixIn, Generic[WrappedResultType]):\n \"\"\"A wrapped result to carry potential flags.\n\n This class provides a convenient way to add flags to values from extract steps that\n are known to be invalid. This avoids having to write a separate flag step and is\n useful in cases where the information to flag a result is only accessible in the\n extract function.\n\n Parameters\n ----------\n measure_value\n The value of the measure returned by the extraction function.\n\n Attributes\n ----------\n measure_value\n The value of the measure returned by the extraction function.\n\n Examples\n --------\n Assuming we wanted to flag measures directly inside a custom extraction function\n based on some metrics calculated, one can do\n\n >>> from dispel.processing.extract import WrappedResult\n >>> from dispel.data.flags import Flag\n >>> from typing import Union\n >>> def custom_aggregation_func(data) -> Union[WrappedResult, float]:\n ... result = data.agg('mean')\n ... if len(data) < 3:\n ... inv = Flag(\n ... reason='Not enough data point',\n ... flag_severity=FlagSeverity.INVALIDATION\n ... )\n ... result = WrappedResult(result, inv)\n ... return result\n\n During processing, the class `ExtractStep` allows the transformation function to\n output ``WrappedResult`` objects. The extract step will automatically add any flags\n present in the ``WrappedResult`` object to the measure value. The ``WrappedResult``\n class supports basic operations with other scalars or ``WrappedResult`` object:\n\n >>> from dispel.processing.extract import WrappedResult\n >>> res1 = WrappedResult(measure_value=1)\n >>> res2 = WrappedResult(measure_value=2)\n >>> melted_res = res1 + res2\n >>> melted_res2 = res1 + 1\n \"\"\"\n\n def __init__(self, measure_value: WrappedResultType, *args, **kwargs):\n self.measure_value: WrappedResultType = measure_value\n super().__init__(*args, **kwargs)\n\n def _binary_operator(\n self,\n func: Callable[[WrappedResultType, WrappedResultType], WrappedResultType],\n other: Union[WrappedResultType, \"WrappedResult[WrappedResultType]\"],\n ) -> \"WrappedResult[WrappedResultType]\":\n \"\"\"Perform binary operation on values.\"\"\"\n # Get measure value for both WrappedResult and float object\n if is_wrapped := isinstance(other, WrappedResult):\n value_other = cast(WrappedResult, other).measure_value\n else:\n value_other = other\n\n # Create a new WrappedResult object with the combination\n res = WrappedResult(\n func(self.measure_value, value_other)\n ) # type: WrappedResult[WrappedResultType]\n\n # Inherit flag from current objet\n res.add_flags(self, ignore_duplicates=True)\n\n # If other is also wrapped, inherit his flag as well\n if is_wrapped:\n res.add_flags(cast(WrappedResult, other), True)\n\n return res\n\n def _unary_operation(\n self, func: Callable[[WrappedResultType], WrappedResultType]\n ) -> \"WrappedResult[WrappedResultType]\":\n res = WrappedResult(func(self.measure_value))\n res.add_flags(self)\n return res\n\n def __abs__(self):\n return self._unary_operation(operator.abs)\n\n def __add__(\n self, other: \"WrappedResult[WrappedResultType]\"\n ) -> \"WrappedResult[WrappedResultType]\":\n return self._binary_operator(operator.add, other)\n\n def __sub__(\n self, other: \"WrappedResult[WrappedResultType]\"\n ) -> \"WrappedResult[WrappedResultType]\":\n return self._binary_operator(operator.sub, other)\n\n def __mul__(\n self, other: \"WrappedResult[WrappedResultType]\"\n ) -> \"WrappedResult[WrappedResultType]\":\n return self._binary_operator(operator.mul, other)\n\n def __truediv__(\n self, other: \"WrappedResult[WrappedResultType]\"\n ) -> \"WrappedResult[WrappedResultType]\":\n return self._binary_operator(operator.truediv, other)" }, { "identifier": "Level", "path": "dispel/data/levels.py", "snippet": "class Level(Epoch):\n \"\"\"An entity to separate sub-task inside each test (Levels).\n\n FIXME: DOC\n\n Attributes\n ----------\n context\n Contextual information about the level\n measure_set\n A :class:'~dispel.data.measures.MeasureSet' of a given Level\n\n Parameters\n ----------\n id_\n The identifier of a given Level.\n start\n The timestamp of the beginning of the level\n end\n The timestamp of the end of the level\n context\n Contextual information about the level\n raw_data_sets\n An iterable of :class:'~dispel.data.raw.RawDataSet' of a given Level\n measure_set\n A :class:'~dispel.data.measures.MeasureSet' of a given Level\n epochs\n An iterable of :class:`~dispel.data.measures.EpochMeasureSet` to be added to the\n level.\n \"\"\"\n\n def __init__(\n self,\n id_: Union[str, List[str], LevelId],\n start: Any,\n end: Any,\n context: Optional[Context] = None,\n raw_data_sets: Optional[Iterable[RawDataSet]] = None,\n measure_set: Optional[MeasureSet] = None,\n epochs: Optional[Iterable[LevelEpoch]] = None,\n ):\n if not isinstance(id_, LevelId):\n id_ = LevelId(id_)\n\n definition = EpochDefinition(id_=id_)\n super().__init__(start=start, end=end, definition=definition)\n\n self.context = context or Context()\n self.measure_set = measure_set or MeasureSet()\n\n # create dictionary of raw data sets\n self._raw_data_sets: Dict[str, RawDataSet] = {}\n\n # set raw data sets if arg is provided\n if raw_data_sets:\n for raw_data_set in raw_data_sets:\n self.set(raw_data_set)\n\n # create data frame for each epoch\n self._epochs = pd.DataFrame(columns=[\"definition_id\", \"start\", \"end\", \"epoch\"])\n if epochs:\n for epoch in epochs:\n self.set(epoch)\n\n @property\n def id(self) -> LevelId:\n \"\"\"Get the ID of the level from its definition.\n\n Returns\n -------\n LevelId\n The ID of the definition provided via `definition`.\n \"\"\"\n assert self.definition is not None, \"Require definition to access id\"\n return cast(LevelId, self.definition.id)\n\n @id.setter\n def id(self, value: Union[str, DefinitionId]):\n \"\"\"Set the ID of the level's definition.\n\n Parameters\n ----------\n value\n The ID to be set.\n \"\"\"\n assert self.definition is not None, \"Require definition to set id\"\n self.definition.id = value # type: ignore\n\n def __hash__(self):\n return hash(self.id)\n\n def __repr__(self):\n return f\"<Level: {self.id} ({self.flag_count_repr})>\"\n\n @property\n def raw_data_sets(self) -> List[RawDataSet]:\n \"\"\"Get all raw data sets.\"\"\"\n return list(self._raw_data_sets.values())\n\n def has_raw_data_set(self, id_: str) -> bool:\n \"\"\"Return ``True`` if the level contains the desired raw data set.\"\"\"\n return id_ in self._raw_data_sets\n\n def get_raw_data_set(self, id_: str) -> RawDataSet:\n \"\"\"Get the raw data set for a given data set id.\n\n Parameters\n ----------\n id_\n The id of the raw data set to be returned\n\n Returns\n -------\n RawDataSet\n The raw data set with the matching id\n\n Raises\n ------\n ValueError\n If the given id does not correspond to any existing raw data set within the\n level.\n \"\"\"\n if id_ not in self._raw_data_sets:\n raise ValueError(\n f'Unknown data set with id: \"{id_}\" for level_id == \"{self.id}\" '\n f\"please provide an id within {list(self._raw_data_sets.keys())}\"\n )\n\n return self._raw_data_sets[id_]\n\n @property\n def epochs(self) -> List[LevelEpoch]:\n \"\"\"Get all epoch measure sets.\"\"\"\n return self._epochs[\"epoch\"].tolist()\n\n @singledispatchmethod\n def set(self, value, **kwargs):\n \"\"\"Set a value inside a level.\"\"\"\n raise TypeError(f\"Unsupported set type: {type(value)}\")\n\n @set.register(MeasureSet)\n def _set_measure_set(self, value: MeasureSet):\n self.measure_set += value\n\n @set.register(MeasureValue)\n def _set_measure_value(self, value: MeasureValue):\n self.measure_set.set(value)\n\n @set.register(RawDataSet)\n def _set_raw_data_set(\n self, value: RawDataSet, concatenate: bool = False, overwrite: bool = False\n ):\n if overwrite and concatenate:\n raise ValueError(\n \"You cannot both concatenate and overwrite an existing raw data set. \"\n \"Only one of these arguments must be set to ``True``.\"\n )\n\n if (id_ := value.id) in self._raw_data_sets: # pylint: disable=all\n if concatenate:\n value = value.concat(self.get_raw_data_set(id_))\n elif not overwrite:\n raise RawDataSetAlreadyExists(\n id_, self.id, \"Use overwrite=True to overwrite\"\n )\n\n self._raw_data_sets[id_] = value\n\n @set.register(LevelEpoch)\n def _set_epoch(self, value: LevelEpoch):\n new_index = len(self._epochs)\n self._epochs.loc[new_index] = pd.Series(\n dict(\n definition_id=value.id if value.definition else None,\n start=value.start,\n end=value.end,\n epoch=value,\n )\n )\n\n @set.register(Flag)\n def _set_flag(self, value: Flag):\n self.add_flag(value)" }, { "identifier": "MeasureId", "path": "dispel/data/measures.py", "snippet": "class MeasureId(DefinitionId):\n \"\"\"The definition of a measure id for a task.\n\n Parameters\n ----------\n task_name\n The name and abbreviation of the task. Note that if no abbreviation is provided\n the name is used directly in the id.\n measure_name\n The name of the measure and its abbreviation.\n modalities\n The modalities and their abbreviations under which the measure is constituted.\n aggregation\n A method that was used to aggregate a sequence of the underlying measure,\n e.g., for the measure ``mean response time`` it would be ``mean``.\n\n Notes\n -----\n The abbreviations of values are passed using\n :class:`~dispel.data.values.AbbreviatedValue`. To generate the actual id the `.abbr`\n accessor is used. If one passes only strings, the class actually wraps those into\n ``AbbreviatedValue`` instances.\n\n Examples\n --------\n >>> from dispel.data.values import AbbreviatedValue as AV\n >>> from dispel.data.measures import MeasureId\n >>> MeasureId(\n ... task_name=AV('Cognitive Processing Speed', 'CPS'),\n ... measure_name=AV('reaction time', 'rt'),\n ... modalities=[AV('digit-to-digit', 'dtd')],\n ... aggregation='mean'\n ... )\n cps-dtd-rt-mean\n \"\"\"\n\n def __init__(\n self,\n task_name: Union[str, AV],\n measure_name: Union[str, AV],\n modalities: Optional[List[Union[str, AV]]] = None,\n aggregation: Optional[Union[str, AV]] = None,\n ):\n self.task_name = AV.wrap(task_name)\n self.measure_name = AV.wrap(measure_name)\n self.modalities = None\n if modalities:\n self.modalities = list(map(AV.wrap, modalities))\n self.aggregation = AV.wrap(aggregation) if aggregation else None\n\n id_ = _join_not_none(\n \"-\",\n [\n self.task_name.abbr.lower(),\n \"_\".join(map(lambda x: x.abbr.lower(), self.modalities))\n if self.modalities\n else None,\n self.measure_name.abbr.lower(),\n self.aggregation.abbr.lower() if self.aggregation else None,\n ],\n )\n\n super().__init__(id_)\n\n @classmethod\n def from_str(cls, value: str) -> DefinitionId:\n \"\"\"See :meth:`dispel.data.values.DefinitionId.from_str`.\n\n Parameters\n ----------\n value\n The string from which the definition id is to be constructed.\n\n Raises\n ------\n NotImplementedError\n Always raised. This method is not implemented since there is no unambiguous\n parsing of task ids.\n \"\"\"\n raise NotImplementedError(\"Not unambiguous parsing of ids possible\")" }, { "identifier": "MeasureSet", "path": "dispel/data/measures.py", "snippet": "class MeasureSet(ValueSet):\n \"\"\"A collection of measures.\"\"\"\n\n VALUE_CLS: ClassVar[Type[Value]] = MeasureValue\n\n @classmethod\n def from_data_frame(cls, data: pd.DataFrame) -> \"MeasureSet\":\n \"\"\"Create a MeasureSet from a data frame.\n\n Parameters\n ----------\n data\n A data frame containing information about measures\n\n Returns\n -------\n MeasureSet\n A measure set derived from the provided data frame.\n \"\"\"\n return cls(data.apply(row_to_value, axis=1).to_list())\n\n def to_list(self, stringify: bool = False) -> List[Dict[str, Optional[Any]]]:\n \"\"\"Convert measure set to a list of measure dictionaries.\n\n Parameters\n ----------\n stringify\n ``True`` if all dictionary values are converted to strings. ``False``\n otherwise.\n\n Returns\n -------\n List[Dict[str, Optional[Any]]]\n A dictionary summarizing measure value information.\n \"\"\"\n return [\n cast(self.VALUE_CLS, measure).to_dict(stringify) # type: ignore\n for measure in self.values()\n ]" }, { "identifier": "MeasureValue", "path": "dispel/data/measures.py", "snippet": "class MeasureValue(FlagMixIn, Value):\n \"\"\"A measure value.\"\"\"\n\n def __repr__(self):\n return (\n f\"<MeasureValue ({self.definition}): {self.value} \"\n f\"({self.flag_count_repr})>\"\n )\n\n @staticmethod\n def _to_string(value):\n return \"\" if value is None else str(value)\n\n def to_dict(self, stringify: bool = False) -> Dict[str, Optional[Any]]:\n \"\"\"Get a dictionary representation of measure information.\n\n Parameters\n ----------\n stringify\n ``True`` if all dictionary values are converted to strings. ``False``\n otherwise.\n\n Returns\n -------\n Dict[str, Optional[Any]]\n A dictionary summarizing measure value information.\n \"\"\"\n measure_min, measure_max = None, None\n if isinstance(self.definition.validator, RangeValidator):\n measure_min = self.definition.validator.lower_bound\n measure_max = self.definition.validator.upper_bound\n\n if stringify:\n value = str(self.value)\n measure_min = self._to_string(measure_min)\n measure_max = self._to_string(measure_max)\n else:\n value = self.value\n\n return dict(\n measure_id=str(self.id),\n measure_name=self.definition.name,\n measure_value=value,\n measure_unit=self.definition.unit,\n measure_type=self.definition.data_type,\n measure_min=measure_min,\n measure_max=measure_max,\n )" }, { "identifier": "MeasureValueDefinition", "path": "dispel/data/measures.py", "snippet": "class MeasureValueDefinition(ValueDefinition):\n \"\"\"The definition of measures from tasks.\n\n Parameters\n ----------\n task_name\n The full name of the task and its abbreviation, e.g., ``Cognitive Processing\n Speed test`` and ``CPS`` passed using\n :class:`~dispel.data.values.AbbreviatedValue`.\n measure_name\n The name of the measure, e.g. ``reaction time`` and its abbreviation passed\n using :class:`~dispel.data.values.AbbreviatedValue`. Note that aggregation\n methods are specified in ``aggregation`` and should not be direclty part of the\n measure name.\n unit\n See :class:`~dispel.data.values.ValueDefinition`.\n description\n See :class:`~dispel.data.values.ValueDefinition`.\n data_type\n See :class:`~dispel.data.values.ValueDefinition`.\n validator\n See :class:`~dispel.data.values.ValueDefinition`.\n modalities\n The modalities of the tasks, i.e. if there is more than one variant of the task.\n An example would be the ``digit-to-digit`` and ``symbol-to-digit`` or\n ``predefined key 1``, ``predefined key 2`` and ``random key`` variants of the\n CPS test. Abbreviations of the modalities can be passed using\n :class:`~dispel.data.values.AbbreviatedValue`.\n aggregation\n If the measure is the result of an aggregation, the method that was used to\n aggregate. E.g. for ``mean response time`` it would be ``mean``. Abbreviations\n are passed using :class:`~dispel.data.values.AbbreviatedValue`.\n precision\n See :class:`~dispel.data.values.ValueDefinition`.\n\n Examples\n --------\n >>> from dispel.data.values import AbbreviatedValue as AV\n >>> from dispel.data.measures import MeasureValueDefinition\n >>> from dispel.data.validators import RangeValidator\n >>> MeasureValueDefinition(\n ... task_name = AV('Cognitive Processing Speed test', 'CPS'),\n ... measure_name = AV('response time', 'rt'),\n ... unit = 's',\n ... description = 'The mean time to respond to a presented stimulus',\n ... data_type = 'float64',\n ... validator = RangeValidator(lower_bound=0),\n ... modalities = [\n ... AV('digit-to-digit', 'dtd'),\n ... AV('predefined key 1', 'key1')\n ... ],\n ... aggregation = 'mean'\n ... )\n <MeasureValueDefinition: cps-dtd_key1-rt-mean (CPS digit-to-digit ...>\n \"\"\"\n\n def __init__(\n self,\n task_name: Union[str, AV],\n measure_name: Union[str, AV],\n unit: Optional[str] = None,\n description: Optional[str] = None,\n data_type: Optional[str] = None,\n validator: Optional[Callable[[Any], None]] = None,\n modalities: Optional[List[Union[str, AV]]] = None,\n aggregation: Optional[Union[str, AV]] = None,\n precision: Optional[int] = None,\n ):\n self.task_name = AV.wrap(task_name)\n self.measure_name = AV.wrap(measure_name)\n self.modalities = None\n if modalities:\n self.modalities = list(map(AV.wrap, modalities))\n self.aggregation = AV.wrap(aggregation) if aggregation else None\n\n id_ = MeasureId(\n task_name=self.task_name,\n measure_name=self.measure_name,\n modalities=self.modalities,\n aggregation=aggregation,\n )\n\n name = _join_not_none(\n \" \",\n [\n self.task_name.abbr.upper(),\n \" \".join(map(str, self.modalities)) if self.modalities else None,\n self.aggregation if self.aggregation else None,\n self.measure_name,\n ],\n )\n\n super().__init__(\n id_=id_,\n name=name,\n unit=unit,\n description=description,\n data_type=data_type,\n validator=validator,\n precision=precision,\n )" }, { "identifier": "AbbreviatedValue", "path": "dispel/data/values.py", "snippet": "class AbbreviatedValue:\n \"\"\"An abbreviated value.\n\n Examples\n --------\n This class allows to consistently handle abbreviated terms. Assuming you have a name\n of an assessment, e.g. `Cognitive Processing Speed` test and the respective\n abbreviation would be `CPS`, then you can create an abbreviated value like this:\n\n >>> from dispel.data.values import AbbreviatedValue as AV\n >>> value = AV('Cognitive Processing Speed test', 'CPS')\n >>> value\n Cognitive Processing Speed test (CPS)\n\n While this seems like a lot of overhead, it comes in handy when describing value\n definitions or higher-level abstractions, such as measure definitions.\n\n Parameters\n ----------\n value\n The full description of the value\n abbr\n The abbreviated form of the value\n\n Attributes\n ----------\n value\n The full description of the value\n \"\"\"\n\n def __init__(self, value: str, abbr: Optional[str] = None):\n self.value = value\n self._abbr = abbr\n\n @property\n def abbr(self):\n \"\"\"Get the abbreviated form of the value.\"\"\"\n return self._abbr or self.value\n\n def __str__(self):\n return self.value\n\n def __repr__(self):\n if self._abbr:\n return f\"{self.value} ({self._abbr})\"\n return self.value\n\n def __hash__(self):\n return hash((self.value, self._abbr))\n\n def __eq__(self, other):\n if isinstance(other, str):\n return self._abbr is None and self.value == other\n if isinstance(other, AbbreviatedValue):\n return self.value == other.value and self.abbr == other.abbr\n return False\n\n def __lt__(self, other):\n if not isinstance(other, AbbreviatedValue):\n raise ValueError(f\"Unsupported type in comparison: {type(other)}\")\n if self.value == other.value:\n return self.abbr < other.abbr\n return self.value < other.value\n\n def format(self, *args, **kwargs):\n \"\"\"Format an abbreviated value.\"\"\"\n return AbbreviatedValue(\n self.value.format(*args, **kwargs),\n self._abbr.format(*args, **kwargs) if self._abbr else None,\n )\n\n @classmethod\n def wrap(cls, value):\n \"\"\"Wrap a value into an abbreviated value.\n\n This is a small helper class to conveniently wrap values into an abbreviated\n value, if they are not already one.\n\n Parameters\n ----------\n value\n The value to be wrapped\n\n Returns\n -------\n AbbreviatedValue\n The passed ``value`` if it is an instance of :class:`AbbreviatedValue`. If a\n string is passed, then the string is passed as ``value`` argument to the\n constructor.\n\n Raises\n ------\n ValueError\n If the passed value is neither a string nor an instance of\n :class:`AbbreviatedValue`.\n \"\"\"\n if isinstance(value, cls):\n return value\n if isinstance(value, str):\n return cls(value)\n\n raise ValueError(f\"Can only wrap string values. Got: {type(value)}\")" }, { "identifier": "DefinitionId", "path": "dispel/data/values.py", "snippet": "class AbbreviatedValue:\nclass DefinitionId:\nclass ValueDefinition:\nclass ValueDefinitionPrototype:\nclass Value:\nclass ValueSet:\nclass AVEnum(Enum):\n def __init__(self, value: str, abbr: Optional[str] = None):\n def abbr(self):\n def __str__(self):\n def __repr__(self):\n def __hash__(self):\n def __eq__(self, other):\n def __lt__(self, other):\n def format(self, *args, **kwargs):\n def wrap(cls, value):\n def __init__(self, id_: str):\n def id(self) -> str:\n def __str__(self):\n def __eq__(self, other):\n def __hash__(self):\n def from_str(cls, value: str) -> \"DefinitionId\":\n def __init__(\n self,\n id_: DefinitionIdType,\n name: str,\n unit: Optional[str] = None,\n description: Optional[str] = None,\n data_type: Optional[str] = None,\n validator: Optional[Callable[[Any], None]] = None,\n precision: Optional[int] = None,\n ):\n def __repr__(self):\n def __hash__(self):\n def __eq__(self, other):\n def _get_parameters(cls) -> Set[str]:\n def _to_dict(self) -> Dict[str, Any]:\n def _getattr(name):\n def derive(self, **kwargs) -> \"ValueDefinition\":\n def __init__(self, **kwargs):\n def create_definition(self, **values: Any) -> ValueDefinition:\n def _can_format(value):\n def create_definitions(\n self, items: Iterable[Dict[str, Any]]\n ) -> List[ValueDefinition]:\n def derive(self, **kwargs) -> \"ValueDefinitionPrototype\":\n def __init__(self, definition: ValueDefinition, value: Any):\n def id(self) -> DefinitionId:\n def __repr__(self):\n def __hash__(self):\n def __eq__(self, other):\n def __init__(\n self,\n values: Optional[List[Any]] = None,\n definitions: Optional[List[ValueDefinition]] = None,\n ):\n def set(\n self,\n value: Any,\n definition: Optional[ValueDefinition] = None,\n overwrite: bool = False,\n ):\n def set_values(\n self,\n values: List[Any],\n definitions: Optional[List[ValueDefinition]] = None,\n overwrite: bool = True,\n ):\n def has_value(self, id_: Union[DefinitionIdType, ValueDefinition]) -> bool:\n def __contains__(self, item: Union[DefinitionIdType, ValueDefinition]) -> bool:\n def get(self, id_: Union[DefinitionIdType, ValueDefinition]) -> Value:\n def __getitem__(self, key: Union[DefinitionIdType, ValueDefinition]) -> Value:\n def get_raw_value(self, id_: Union[DefinitionIdType, ValueDefinition]) -> Any:\n def get_definition(\n self, id_: Union[DefinitionIdType, ValueDefinition]\n ) -> ValueDefinition:\n def values(self) -> ValuesView[Value]:\n def ids(self) -> KeysView[DefinitionId]:\n def definitions(self) -> Iterable[ValueDefinition]:\n def __len__(self) -> int:\n def __iter__(self):\n def empty(self) -> bool:\n def _assert_add_type(other):\n def items(self) -> ItemsView[DefinitionId, Value]:\n def _combine(self, other, overwrite):\n def __add__(self, other):\n def __iadd__(self, other):\n def __or__(self, other: \"ValueSet\") -> \"ValueSet\":\n def __ior__(self, other: \"ValueSet\") -> \"ValueSet\":\n def __eq__(self, other):\n def __new__(cls, *_args, **_kwargs): # noqa: D102\n def __init__(self, value, abbr=None):\n def __repr__(self):\n def __str__(self):\n def __int__(self):\n def __lt__(self, other):\n def abbr(self):\n def variable(self):\n def from_abbr(cls, value: str):\n def from_variable(cls, value: str):\n VALUE_CLS: ClassVar[Type[Value]] = Value" }, { "identifier": "ErrorHandling", "path": "dispel/processing/core.py", "snippet": "class ProcessingError(Exception):\nclass StopProcessingError(ProcessingError):\nclass FlagError(ProcessingError):\nclass InvalidDataError(ProcessingError):\nclass ProcessingResultBase:\nclass ProcessingResult(ProcessingResultBase):\nclass ErrorHandling(Enum):\nclass ProcessingControlResult(ProcessingResultBase):\nclass Parameter(Generic[ParameterType]):\nclass ProcessingStep:\nclass CoreProcessingStepGroup(ProcessingStep):\nclass _ChainedProcesses(CoreProcessingStepGroup):\nclass FlagReadingStep(FlagStepMixin, ProcessingStep):\n def __init__(self, message: str, step: \"ProcessingStep\"):\n def __init__(self, flag: Flag, step: \"ProcessingStep\"):\n def get_kwargs(self) -> Dict[str, Any]:\n def get_kwargs(self) -> Dict[str, Any]:\n def get_sources(self) -> Iterable[SourcesType]:\n def should_raise(self) -> bool:\n def __bool__(self) -> bool:\n def from_bool(cls, stop_processing: bool) -> \"ErrorHandling\":\n def __post_init__(self):\n def get_targets(self) -> Iterable[EntityType]:\n def from_assertion_error(\n cls,\n step: \"ProcessingStep\",\n error: AssertionError,\n level: Optional[Level] = None,\n ):\n def from_flag(\n cls,\n flag: Flag,\n step: \"ProcessingStep\",\n targets: Iterable[EntityType],\n level: Optional[Level] = None,\n ):\n def __new__(cls, id_: str, *_args, **_kwargs):\n def __init__(\n self,\n id_: str,\n default_value: Optional[ParameterType] = None,\n validator: Optional[Callable[[Any], None]] = None,\n description: Optional[str] = None,\n ):\n def id(self):\n def value(self) -> ParameterType:\n def value(self, value: ParameterType):\n def has_parameter(cls, full_id: str) -> bool:\n def set_value(cls, full_id: str, value: Any):\n def __init__(self):\n def process(self, reading: Reading, **kwargs) -> ProcessResultType:\n def assert_valid_reading(self, reading: Reading, **kwargs):\n def flag_reading(self, reading: Reading, **kwargs) -> Generator[Flag, None, None]:\n def get_reading_flag_targets(\n self, reading: Reading, **kwargs\n ) -> Iterable[EntityType]:\n def process_reading(self, reading: Reading, **kwargs) -> ProcessResultType:\n def set_previous(self, step: \"ProcessingStep\"):\n def set_next(self, step: \"ProcessingStep\"):\n def chain(self, successor: \"ProcessingStep\") -> \"ProcessingStep\":\n def __and__(self, other):\n def get_parameters(self) -> List[Tuple[str, Parameter]]:\n def __new__(cls, *args, **kwargs):\n def __init__(self, steps: Optional[List[ProcessingStep]] = None, **kwargs):\n def set_kwargs(self, **kwargs):\n def get_kwargs(self) -> Dict[str, Any]:\n def set_steps(self, steps: List[ProcessingStep]):\n def get_steps(self) -> List[ProcessingStep]:\n def process_reading(self, reading: Reading, **kwargs) -> ProcessResultType:\n def chain(self, successor: \"ProcessingStep\") -> \"ProcessingStep\":\n def __init__(\n self,\n task_name: Optional[Union[AV, str]] = None,\n flag_name: Optional[Union[AV, str]] = None,\n flag_type: Optional[Union[FlagType, str]] = None,\n flag_severity: Optional[Union[FlagSeverity, str]] = None,\n reason: Optional[Union[AV, str]] = None,\n stop_processing: bool = False,\n flagging_function: Optional[Callable[..., bool]] = None,\n ):\n def process_reading(self, reading: Reading, **kwargs) -> ProcessResultType:\n def get_reading_flag_targets(\n self, reading: Reading, **kwargs\n ) -> Iterable[EntityType]:\n def get_flag_targets(\n self, reading: Reading, level: Optional[Level] = None, **kwargs\n ) -> Iterable[EntityType]:\n def flag_reading(self, reading: Reading, **kwargs) -> Generator[Flag, None, None]:\n RAISE = \"raise\"\n IGNORE = \"ignore\"" }, { "identifier": "MutateDataSetProcessingStepBase", "path": "dispel/processing/data_set.py", "snippet": "class RawDataSetProcessingResult(LevelProcessingResult):\nclass StorageError(Enum):\nclass DataSetProcessingStepProtocol(metaclass=ABCMeta):\nclass DataSetProcessingStepMixin(\n TaskMixin,\n DataSetProcessingStepProtocol,\n LevelProcessingStepProtocol,\n metaclass=ABCMeta,\n):\nclass DataSetProcessingStep(\n DataSetProcessingStepMixin, LevelProcessingStep, metaclass=ABCMeta\n):\nclass MutateDataSetProcessingStepBase(DataSetProcessingStep, metaclass=ABCMeta):\nclass FlagDataSetStep(FlagStepMixin, DataSetProcessingStep, metaclass=ABCMeta):\n def __post_init__(self):\n def overwrite(self) -> bool:\n def concatenate(self) -> bool:\n def process_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> ProcessResultType:\n def get_data_set_ids(self) -> Iterable[str]:\n def get_raw_data_sets(self, level: Level) -> List[RawDataSet]:\n def get_data_frames(self, level: Level) -> List[pd.DataFrame]:\n def assert_valid_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ):\n def flag_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> Generator[Flag, None, None]:\n def get_data_sets_flag_targets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> Iterable[EntityType]:\n def __init__(self, *args, **kwargs):\n def get_data_set_ids(self) -> Iterable[str]:\n def get_raw_data_sets(self, level: Level) -> List[RawDataSet]:\n def get_data_frames(self, level: Level) -> List[pd.DataFrame]:\n def assert_valid_level(self, level: Level, reading: Reading, **kwargs):\n def __init__(\n self,\n data_set_ids: Optional[Union[str, Iterable[str]]] = None,\n level_filter: Optional[LevelFilterType] = None,\n ):\n def process_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> ProcessResultType:\n def assert_valid_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ):\n def flag_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> Generator[Flag, None, None]:\n def process_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> ProcessResultType:\ndef transformation(_func=None, **kwargs):\n def wrapper(func):\ndef decorated_processing_function(\n func: Callable[..., Any],\n data_sets: Sequence[pd.DataFrame],\n reading: Reading,\n level: Level,\n **kwargs,\n) -> Any:\n def __init__(\n self,\n data_set_ids: Optional[Union[str, Iterable[str]]] = None,\n transform_function: Optional[Callable[..., Any]] = None,\n level_filter: Optional[LevelFilterType] = None,\n ):\n def get_transform_function(self) -> Optional[Callable[..., Any]]:\n def get_transform_functions(self) -> TransformationFunctionGeneratorType:\n def wrap_result(\n self, res: Any, level: Level, reading: Reading, **kwargs: Any\n ) -> WrapResultGeneratorType:\n def process_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> ProcessResultType:\n def __init__(\n self,\n data_set_ids: Optional[Union[str, Iterable[str]]] = None,\n level_filter: Optional[LevelFilterType] = None,\n task_name: Optional[Union[AV, str]] = None,\n flag_name: Optional[Union[AV, str]] = None,\n flag_type: Optional[Union[FlagType, str]] = None,\n flag_severity: Optional[Union[FlagSeverity, str]] = None,\n reason: Optional[Union[AV, str]] = None,\n stop_processing: bool = False,\n flagging_function: Optional[Callable[..., bool]] = None,\n target_ids: Optional[Union[Iterable[str], str]] = None,\n ):\n def get_target_ids(self) -> Iterable[str]:\n def process_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> ProcessResultType:\n def get_data_sets_flag_targets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> Iterable[EntityType]:\n def get_flag_targets(\n self, reading: Reading, level: Optional[Level] = None, **kwargs\n ) -> Iterable[EntityType]:\n def flag_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> Generator[Flag, None, None]:\n RAISE = \"raise\"\n IGNORE = \"ignore\"\n OVERWRITE = \"overwrite\"\n CONCATENATE = \"concatenate\"" }, { "identifier": "FlagStepMixin", "path": "dispel/processing/flags.py", "snippet": "class FlagStepMixin(TaskMixin, metaclass=ABCMeta):\n \"\"\"A flag mix in class.\"\"\"\n\n #: The name of the flag\n flag_name: Union[AV, str]\n\n #: The type of the flag\n flag_type: Union[FlagType, str]\n\n # The severity of the flag\n flag_severity: Union[FlagSeverity, str]\n\n #: The detailed reason of the flag\n reason: str\n\n #: The stop_processing status of the flag step\n stop_processing: bool = False\n\n #: The flagging function\n flagging_function: Optional[Callable[..., bool]] = None\n\n def __init__(self, *args, **kwargs):\n kwargs = set_attributes_from_kwargs(\n self,\n \"task_name\",\n \"flag_name\",\n \"flag_type\",\n \"flag_severity\",\n \"reason\",\n \"stop_processing\",\n \"flagging_function\",\n **kwargs,\n )\n\n self.kwargs = kwargs\n super().__init__(*args, **kwargs)\n\n def get_flag_name(self, **kwargs) -> Union[str, AV]:\n \"\"\"Get the flag name.\"\"\"\n flag_name = kwargs.get(\"flag_name\", None) or getattr(self, \"flag_name\")\n if isinstance(flag_name, (str, AV)):\n return flag_name.format(**kwargs)\n raise ValueError(\"Missing flag name.\")\n\n def get_flag_type(self, **kwargs) -> Union[str, FlagType]:\n \"\"\"Get the flag type.\"\"\"\n flag_type = kwargs.get(\"flag_type\", None) or getattr(self, \"flag_type\")\n if isinstance(flag_type, (str, FlagType)):\n return flag_type\n raise ValueError(\"Missing flag type.\")\n\n def get_flag_severity(self, **kwargs) -> Union[str, FlagSeverity]:\n \"\"\"Get the flag severity.\"\"\"\n flag_severity = kwargs.get(\"flag_severity\", None) or getattr(\n self, \"flag_severity\"\n )\n if isinstance(flag_severity, (str, FlagSeverity)):\n return flag_severity\n raise ValueError(\"Missing flag severity.\")\n\n def get_reason(self, **kwargs) -> str:\n \"\"\"Get the flag reason.\"\"\"\n reason = kwargs.get(\"reason\", None) or getattr(self, \"reason\")\n if isinstance(reason, str):\n return reason.format(**kwargs)\n raise ValueError(\"Missing flag reason.\")\n\n @abstractmethod\n def get_flag_targets(\n self, reading: Reading, level: Optional[Level] = None, **kwargs\n ) -> Iterable[EntityType]:\n \"\"\"Get flag targets.\n\n Parameters\n ----------\n reading\n The reading to which the targets are associated.\n level\n The level associated with the targets (if needed).\n kwargs\n Keyword arguments from which the flag targets are to be extracted.\n\n Returns\n -------\n Iterable[EntityType]\n An iterable of the flag targets.\n \"\"\"\n raise NotImplementedError\n\n def get_flagging_function(self) -> Optional[Callable[..., bool]]:\n \"\"\"Get the flagging function.\"\"\"\n # unbind bound methods\n func = self.flagging_function\n if func is not None and hasattr(func, \"__func__\"):\n return func.__func__ # type: ignore\n return func\n\n def get_flagging_functions(self) -> FlaggingFunctionGeneratorType:\n \"\"\"Get all flagging functions associated with this step.\"\"\"\n if func := self.get_flagging_function():\n yield func, {}\n\n members = inspect.getmembers(self, predicate=inspect.isroutine)\n for _, func in members:\n if func is not None and hasattr(func, \"__flagging_function__\"):\n yield func, func.__flag_kwargs__ # type: ignore\n\n def set_flag_kwargs(self, **kwargs):\n \"\"\"Set keyword arguments inside flagging function.\n\n Parameters\n ----------\n kwargs\n The keyword arguments to be added inside the flagging function\n keyword arguments.\n \"\"\"\n _, parent, *_ = inspect.stack()\n getattr(self, parent.function).__flag_kwargs__.update(kwargs)\n\n def get_flag(self, **kwargs) -> Flag:\n \"\"\"Get the flag corresponding to the flag step.\"\"\"\n (all_kwargs := self.kwargs.copy()).update(kwargs)\n return Flag(\n id_=FlagId(\n task_name=self.get_task_name(**all_kwargs),\n flag_name=self.get_flag_name(**all_kwargs),\n flag_type=self.get_flag_type(**all_kwargs),\n flag_severity=self.get_flag_severity(**all_kwargs),\n ),\n reason=self.get_reason(**all_kwargs),\n stop_processing=self.stop_processing,\n )" }, { "identifier": "LevelFilterType", "path": "dispel/processing/level.py", "snippet": "class LevelProcessingResultBase:\nclass LevelProcessingResult(ProcessingResult, LevelProcessingResultBase):\nclass LevelProcessingControlResult(ProcessingControlResult, LevelProcessingResultBase):\nclass LevelFilter(ABC):\nclass LevelIdFilter(LevelFilter):\nclass DefaultLevelFilter(LevelFilter):\nclass LevelProcessingStepProtocol(metaclass=ABCMeta):\nclass LevelFilterProcessingStepMixin:\nclass LevelProcessingStep(\n LevelProcessingStepProtocol, LevelFilterProcessingStepMixin, ProcessingStep\n):\nclass FlagLevelStep(FlagStepMixin, LevelProcessingStep):\nclass ProcessingStepGroup(LevelFilterProcessingStepMixin, CoreProcessingStepGroup):\n def __post_init__(self):\n def from_assertion_error(\n cls,\n step: \"ProcessingStep\",\n error: AssertionError,\n level: Optional[Level] = None,\n ):\n def from_flag(\n cls,\n flag: Flag,\n step: \"ProcessingStep\",\n targets: Iterable[EntityType],\n level: Optional[Level] = None,\n ):\ndef _intersection(a, b):\ndef _union(a, b):\n def __call__(self, levels: Iterable[Level]) -> Set[Level]:\n def __repr__(self) -> str:\n def repr(self) -> str:\n def filter(self, levels: Iterable[Level]) -> Set[Level]:\n def _combined(\n self, other: \"LevelFilter\", func: Callable[[Set, Set], Set]\n ) -> \"LevelFilter\":\n def _match(levels: Iterable[Level]) -> Set[Level]:\n def _repr() -> str:\n def __and__(self, other: \"LevelFilter\") -> \"LevelFilter\":\n def __or__(self, other: \"LevelFilter\") -> \"LevelFilter\":\n def __invert__(self) -> \"LevelFilter\":\n def _inverted_filter(levels: Iterable[Level]) -> Set[Level]:\n def _repr() -> str:\n def __init__(self, level_ids: MultipleLevelIdsType):\n def repr(self) -> str:\n def filter(self, levels: Iterable[Level]) -> Set[Level]:\n def repr(self) -> str:\n def filter(self, levels: Iterable[Level]) -> Set[Level]:\n def assert_valid_level(self, level: Level, reading: Reading, **kwargs):\n def flag_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> Generator[Flag, None, None]:\n def get_level_flag_targets(\n self, level: Level, reading: Reading, **kwargs\n ) -> Iterable[EntityType]:\n def process_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> ProcessResultType:\n def __init__(self, *args, **kwargs):\n def get_level_filter(self) -> LevelFilter:\n def set_level_filter(self, level_filter: LevelFilterType):\n def inject_level_filter_from_step(self, step: \"LevelFilterProcessingStepMixin\"):\n def _get_level_filter(inner_self) -> LevelFilter:\n def process_reading(self, reading: Reading, **kwargs) -> ProcessResultType:\n def flag_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> Generator[Flag, None, None]:\n def assert_valid_level(self, level: Level, reading: Reading, **kwargs):\n def process_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> ProcessResultType:\n def __init__(\n self,\n level_filter: Optional[LevelFilterType] = None,\n task_name: Optional[Union[AV, str]] = None,\n flag_name: Optional[Union[AV, str]] = None,\n flag_type: Optional[Union[FlagType, str]] = None,\n flag_severity: Optional[Union[FlagSeverity, str]] = None,\n reason: Optional[Union[AV, str]] = None,\n stop_processing: bool = False,\n flagging_function: Optional[Callable[..., bool]] = None,\n ):\n def process_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> ProcessResultType:\n def get_level_flag_targets(\n self, level: Level, reading: Reading, **kwargs\n ) -> Iterable[EntityType]:\n def get_flag_targets(\n self, reading: Reading, level: Optional[Level] = None, **kwargs\n ) -> Iterable[EntityType]:\n def flag_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> Generator[Flag, None, None]:\n def set_steps(self, steps: List[ProcessingStep]):\n def inject_level_filter_from_step(self, step: LevelFilterProcessingStepMixin):" }, { "identifier": "TransformStepChainMixIn", "path": "dispel/processing/transform.py", "snippet": "class TransformStepChainMixIn(DataSetProcessingStepProtocol, metaclass=ABCMeta):\n \"\"\"A mixin class that allows to chain transformation steps.\n\n The basic idea is to leverage the new data set ids from the previous transform step\n as the required data set ids for the current step. This avoids having to define the\n `data_set_ids` attribute.\n \"\"\"\n\n def get_data_set_ids(self) -> Iterable[str]:\n \"\"\"Get the data set ids to be processed.\n\n This uses the new data set ids from a previous transform step if set. Otherwise,\n falls back to the default behavior of returning the set data set ids from the\n constructor or class variable.\n\n Returns\n -------\n Iterable[str]\n An iterable of data set ids.\n \"\"\"\n assert isinstance(\n self, ProcessingStep\n ), \"TransformStepChainMixIn must inherit from ProcessingStep\"\n # pylint: disable=no-member\n if isinstance(self.predecessor, TransformStep):\n return [self.predecessor.get_new_data_set_id()]\n # pylint: enable=no-member\n\n return super().get_data_set_ids() # type: ignore[safe-super]" }, { "identifier": "iqr", "path": "dispel/stats/core.py", "snippet": "def mad(data: np.ndarray, axis=None):\ndef variation(\n data: pd.Series, error: Literal[\"raise\", \"coerce\", \"omit\"] = \"coerce\"\n) -> float:\ndef variation_increase(\n data: pd.Series, error: Literal[\"raise\", \"coerce\", \"omit\"] = \"coerce\"\n) -> float:\ndef q_factory(percentile: float, name: str) -> Callable[[pd.Series], float]:\ndef freq_nan(data: pd.Series) -> float:\ndef iqr(data: pd.Series) -> float:\ndef npcv(data: pd.Series) -> float:" } ]
import inspect import math import warnings import numpy as np import pandas as pd from typing import ( Any, Callable, Dict, Generator, Iterable, List, Optional, Sequence, Tuple, Union, cast, ) from deprecated import deprecated from dispel.data.core import EntityType, Reading from dispel.data.flags import Flag, FlagSeverity, FlagType, WrappedResult from dispel.data.levels import Level from dispel.data.measures import ( MeasureId, MeasureSet, MeasureValue, MeasureValueDefinition, ) from dispel.data.values import AbbreviatedValue as AV from dispel.data.values import ( DefinitionId, DefinitionIdType, ValueDefinition, ValueDefinitionPrototype, ) from dispel.processing.core import ( ErrorHandling, ProcessingControlResult, ProcessingResult, ProcessingStep, ProcessResultType, ) from dispel.processing.data_set import ( MutateDataSetProcessingStepBase, TransformationFunctionGeneratorType, WrapResultGeneratorType, ) from dispel.processing.flags import FlagStepMixin from dispel.processing.level import LevelFilterType, LevelProcessingResult from dispel.processing.transform import TransformStepChainMixIn from dispel.stats.core import iqr, npcv, percentile_95, variation, variation_increase
15,453
... 'data-set-id', ... [ ... {'func': np.mean, 'method': 'average'}, ... {'func': np.median, 'method': 'median'} ... ], ... ValueDefinitionPrototype( ... id_='measure-{method}', ... name='{method} measure', ... unit='s' ... ) ... ) This extraction step will result in two measure values, one for the mean and one with the median. """ transform_functions: Iterable[Dict[str, Any]] def __init__( self, data_set_ids: Optional[Union[str, Iterable[str]]] = None, transform_functions: Optional[Iterable[Dict[str, Any]]] = None, definition: Optional[ValueDefinitionPrototype] = None, level_filter: Optional[LevelFilterType] = None, yield_if_nan: Optional[bool] = None, ): super().__init__( definition=definition, data_set_ids=data_set_ids, level_filter=level_filter, yield_if_nan=yield_if_nan, ) if transform_functions: self.transform_functions = transform_functions def get_transform_functions(self) -> TransformationFunctionGeneratorType: """Get the transform functions applied to the data sets.""" yield from super().get_transform_functions() for function_spec in self.transform_functions: spec = function_spec.copy() yield spec.pop("func"), spec AggregationFunctionType = Union[str, Callable[[pd.Series], float]] def agg_column( column: str, method: AggregationFunctionType ) -> Callable[[pd.DataFrame], float]: """Create a function to apply an aggregation function on a column. Parameters ---------- column The column to be aggregated method A function to apply on the column Returns ------- Callable[[pandas.DataFrame], float] A function that aggregates one column of a `~pandas.DataFrame`. """ def _function(data: pd.DataFrame) -> float: return data[column].agg(method) return _function #: A list of basic used aggregation methods BASIC_AGGREGATIONS: List[Tuple[str, str]] = [ ("mean", "mean"), ("std", "standard deviation"), ] #: A list of commonly used aggregation methods DEFAULT_AGGREGATIONS: List[Tuple[str, str]] = [ *BASIC_AGGREGATIONS, ("median", "median"), ("min", "minimum"), ("max", "maximum"), ] #: A list of commonly used aggregation methods plus coefficient of variation DEFAULT_AGGREGATIONS_CV: List[Tuple[Union[Callable[[Any], float], str], str]] = [ *DEFAULT_AGGREGATIONS, (variation, "coefficient of variation"), ] #: A list of commonly used aggregation methods plus 95th percentile DEFAULT_AGGREGATIONS_Q95: List[Tuple[Union[Callable[[Any], float], str], str]] = [ *DEFAULT_AGGREGATIONS, (percentile_95, "95th percentile"), ] #: A list of commonly used aggregation methods plus inter-quartile range DEFAULT_AGGREGATIONS_IQR: List[Tuple[Union[Callable[[Any], float], str], str]] = [ *DEFAULT_AGGREGATIONS, (iqr, "iqr"), ] #: An extended list of commonly used aggregation methods EXTENDED_AGGREGATIONS: List[Tuple[str, str]] = [ *DEFAULT_AGGREGATIONS, ("skew", "skewness"), ("kurtosis", "kurtosis"), ] DEFAULT_AGGREGATIONS_Q95_CV: List[Tuple[Union[Callable[[Any], float], str], str]] = [ *DEFAULT_AGGREGATIONS_Q95, (variation, "coefficient of variation"), ] #: A dictionary containing all aggregation methods AGGREGATION_REGISTRY: Dict[str, Tuple[AggregationFunctionType, str]] = { **{agg: (agg, agg_label) for agg, agg_label in EXTENDED_AGGREGATIONS}, "cv": (variation, "coefficient of variation"),
"""Extraction functionalities for processing module.""" from __future__ import annotations class MeasureDefinitionMixin: """A mixin class for processing steps producing measure values. Parameters ---------- definition An optional value definition. If no value definition is provided, the :data:`definition` class variable will be used. Alternatively, one can overwrite :meth:`get_definition` to provide the definition. """ #: The specification of the measure definition definition: Optional[Union[ValueDefinition, ValueDefinitionPrototype]] = None def __init__(self, *args, **kwargs): definition = kwargs.pop("definition", None) self.definition = definition or self.definition super().__init__(*args, **kwargs) def get_definition(self, **kwargs) -> ValueDefinition: """Get the measure definition. Parameters ---------- kwargs Optional parameters that will be passed along to the creation of measure definitions from prototypes. See :meth:`~dispel.data.values.ValueDefinitionPrototype.create_definition` Returns ------- ValueDefinition The definition of the value """ assert ( self.definition is not None ), "Definition must be set or get_definition must be overwritten." definition = self.definition if isinstance(definition, ValueDefinitionPrototype): definition = cast(ValueDefinition, definition.create_definition(**kwargs)) return definition def get_value(self, value: Any, **kwargs) -> MeasureValue: """Get a measure value based on the definition. Parameters ---------- value The value kwargs Optional arguments passed to :meth:`get_definition`. Returns ------- MeasureValue The ``value`` wrapped with the definition from :meth:`get_definition`. """ return MeasureValue(self.get_definition(**kwargs), value) class ExtractStep( MeasureDefinitionMixin, TransformStepChainMixIn, MutateDataSetProcessingStepBase ): r"""A measure extraction processing step. This class provides a convenient way to extract a measure from one or more data sets by specifying their id, their level_ids or level filter, a transformation function and a measure value definition. Parameters ---------- data_set_ids An optional list of data set ids to be used for the transformation. See :class:`~dispel.processing.data_set.DataSetProcessingStepMixin`. transform_function An optional function to be applied to the data sets. See :class:`~dispel.processing.data_set.MutateDataSetProcessingStepBase`. definition An optional value definition or prototype. See :class:`MeasureDefinitionMixin`. level_filter An optional filter to limit the levels being processed. See :class:`~dispel.processing.level.LevelProcessingStep`. yield_if_nan If ``True``, yield null values as measure values. Otherwise, processing will not return a measure value in case of a null result for the extraction. Examples -------- Assuming we wanted to compute the maximum value of a raw data set we can create the following step >>> from dispel.data.values import ValueDefinition >>> from dispel.processing.extract import ExtractStep >>> step = ExtractStep( ... 'data-set-id', ... lambda data: data.max(axis=0), ... ValueDefinition('maximum','Maximum value') ... ) A common approach is to define a processing step for re-use and leveraging the ``@transformation`` decorator to specify the transformation function: >>> import pandas as pd >>> from dispel.data.values import ValueDefinition >>> from dispel.processing.extract import ExtractStep >>> from dispel.processing.data_set import transformation >>> class MyExtractStep(ExtractStep): ... data_set_ids = 'data-set-id' ... definition = ValueDefinition('maximum','Maximum value') ... ... @transformation ... def _max(self, data: pd.DataFrame) -> float: ... return data.max(axis=0) Often one wants to extract multiple measures from one data set. This can be achieved by using prototypes and optional named arguments with ``@transformation``: >>> import pandas as pd >>> from dispel.data.values import ValueDefinitionPrototype >>> from dispel.processing.extract import ExtractStep >>> from dispel.processing.data_set import transformation >>> class MyExtractStep(ExtractStep): ... data_set_ids = 'data-set-id' ... definition = ValueDefinitionPrototype( ... id_='id-{agg_abbr}', ... name='{agg} value' ... ) ... ... @transformation(agg='Maximum', agg_abbr='max') ... def _max(self, data: pd.DataFrame) -> float: ... return data.max(axis=0) ... ... @transformation(agg='Minimum', agg_abbr='min') ... def _min(self, data: pd.DataFrame) -> float: ... return data.min(axis=0) """ yield_if_nan: bool = False def __init__( self, data_set_ids: Optional[Union[str, Iterable[str]]] = None, transform_function: Optional[Callable[..., Any]] = None, definition: Optional[Union[ValueDefinition, ValueDefinitionPrototype]] = None, level_filter: Optional[LevelFilterType] = None, yield_if_nan: Optional[bool] = None, ): super().__init__( definition=definition, data_set_ids=data_set_ids, transform_function=transform_function, level_filter=level_filter, ) self.yield_if_nan = yield_if_nan or self.yield_if_nan def wrap_result( self, res: Any, level: Level, reading: Reading, **kwargs: Any ) -> WrapResultGeneratorType: """Wrap the result from the processing function into a class. Parameters ---------- res Any result returned by the extraction step. If res is a :class:`~dispel.data.flags.WrappedResult`, the flag contained in the object will be automatically added to the :class:`~dispel.data.measures.MeasureValue`, hence the flagged wrapped results will always translate into flagged :class:`~dispel.data.measures.MeasureValue`. level The current level reading The current reading kwargs Additional kwargs Yields ------ LevelProcessingResult The processing result """ try: if len(res) == 0: res = math.nan warnings.warn("Extract step returned an iterable!", UserWarning) except TypeError: pass if is_wrapped := isinstance(res, WrappedResult): measure_value = res.measure_value else: measure_value = res if not (is_nan := math.isnan(measure_value)) or (is_nan and self.yield_if_nan): value = self.get_value(measure_value, **kwargs) # If result is wrapped, add the flag to the measure value if is_wrapped: value.add_flags(res, ignore_duplicates=True) yield LevelProcessingResult( step=self, sources=self.get_raw_data_sets(level), result=value, level=level, ) @deprecated(reason="Use ExtractStep and @transformation decorator") class ExtractMultipleStep(ExtractStep): r"""A measure extraction processing step for multiple measures. This processing step allows to produce multiple :class:`~dispel.data.measures.MeasureValue`\ s by providing a list of functions and a :class:`~dispel.data.values.ValueDefinitionPrototype` to create the :class:`~dispel.data.values.ValueDefinition`\ s from. Parameters ---------- data_set_ids An optional list of data set ids to be used for the transformation. See :class:`~dispel.processing.data_set.DataSetProcessingStepMixin`. transform_functions An optional list of dictionaries containing at least the processing function under the key ``func``, which consumes the specified data sets though ``data_set_ids`` as positional arguments and returns a measure value passed to :class:`~dispel.data.measures.MeasureValue`. Additional keywords will be passed to :meth:`~dispel.data.values.ValueDefinitionPrototype.create_definition`. If no functions are provided, the :data:`transform_functions` class variable will be used. definition A :class:`~dispel.data.values.ValueDefinitionPrototype` that is used to create the :class:`~dispel.data.measures.MeasureValueDefinition`\ s for the transformation functions provided in ``transform_functions``. level_filter An optional filter to limit the levels being processed. See :class:`~dispel.processing.level.LevelProcessingStep`. yield_if_nan If ``True``, yield null values as measure values. Otherwise, processing will not return a measure value in case of a null result for the extraction. Examples -------- To ease the generation of multiple similar measures the :class:`ExtractMultipleStep` provides a convenient way to do so. Assume you want to create both the mean and median of a data set this can be achieved as follows: >>> import numpy as np >>> from dispel.data.values import ValueDefinitionPrototype >>> from dispel.processing.extract import ExtractMultipleStep >>> step = ExtractMultipleStep( ... 'data-set-id', ... [ ... {'func': np.mean, 'method': 'average'}, ... {'func': np.median, 'method': 'median'} ... ], ... ValueDefinitionPrototype( ... id_='measure-{method}', ... name='{method} measure', ... unit='s' ... ) ... ) This extraction step will result in two measure values, one for the mean and one with the median. """ transform_functions: Iterable[Dict[str, Any]] def __init__( self, data_set_ids: Optional[Union[str, Iterable[str]]] = None, transform_functions: Optional[Iterable[Dict[str, Any]]] = None, definition: Optional[ValueDefinitionPrototype] = None, level_filter: Optional[LevelFilterType] = None, yield_if_nan: Optional[bool] = None, ): super().__init__( definition=definition, data_set_ids=data_set_ids, level_filter=level_filter, yield_if_nan=yield_if_nan, ) if transform_functions: self.transform_functions = transform_functions def get_transform_functions(self) -> TransformationFunctionGeneratorType: """Get the transform functions applied to the data sets.""" yield from super().get_transform_functions() for function_spec in self.transform_functions: spec = function_spec.copy() yield spec.pop("func"), spec AggregationFunctionType = Union[str, Callable[[pd.Series], float]] def agg_column( column: str, method: AggregationFunctionType ) -> Callable[[pd.DataFrame], float]: """Create a function to apply an aggregation function on a column. Parameters ---------- column The column to be aggregated method A function to apply on the column Returns ------- Callable[[pandas.DataFrame], float] A function that aggregates one column of a `~pandas.DataFrame`. """ def _function(data: pd.DataFrame) -> float: return data[column].agg(method) return _function #: A list of basic used aggregation methods BASIC_AGGREGATIONS: List[Tuple[str, str]] = [ ("mean", "mean"), ("std", "standard deviation"), ] #: A list of commonly used aggregation methods DEFAULT_AGGREGATIONS: List[Tuple[str, str]] = [ *BASIC_AGGREGATIONS, ("median", "median"), ("min", "minimum"), ("max", "maximum"), ] #: A list of commonly used aggregation methods plus coefficient of variation DEFAULT_AGGREGATIONS_CV: List[Tuple[Union[Callable[[Any], float], str], str]] = [ *DEFAULT_AGGREGATIONS, (variation, "coefficient of variation"), ] #: A list of commonly used aggregation methods plus 95th percentile DEFAULT_AGGREGATIONS_Q95: List[Tuple[Union[Callable[[Any], float], str], str]] = [ *DEFAULT_AGGREGATIONS, (percentile_95, "95th percentile"), ] #: A list of commonly used aggregation methods plus inter-quartile range DEFAULT_AGGREGATIONS_IQR: List[Tuple[Union[Callable[[Any], float], str], str]] = [ *DEFAULT_AGGREGATIONS, (iqr, "iqr"), ] #: An extended list of commonly used aggregation methods EXTENDED_AGGREGATIONS: List[Tuple[str, str]] = [ *DEFAULT_AGGREGATIONS, ("skew", "skewness"), ("kurtosis", "kurtosis"), ] DEFAULT_AGGREGATIONS_Q95_CV: List[Tuple[Union[Callable[[Any], float], str], str]] = [ *DEFAULT_AGGREGATIONS_Q95, (variation, "coefficient of variation"), ] #: A dictionary containing all aggregation methods AGGREGATION_REGISTRY: Dict[str, Tuple[AggregationFunctionType, str]] = { **{agg: (agg, agg_label) for agg, agg_label in EXTENDED_AGGREGATIONS}, "cv": (variation, "coefficient of variation"),
"cvi": (variation_increase, "coefficient of variation increase"),
17
2023-11-14 10:06:46+00:00
24k
Jisencc/yolov5_dual_weighting
segment/val.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True):\n # Usage:\n # PyTorch: weights = *.pt\n # TorchScript: *.torchscript\n # ONNX Runtime: *.onnx\n # ONNX OpenCV DNN: *.onnx --dnn\n # OpenVINO: *_openvino_model\n # CoreML: *.mlmodel\n # TensorRT: *.engine\n # TensorFlow SavedModel: *_saved_model\n # TensorFlow GraphDef: *.pb\n # TensorFlow Lite: *.tflite\n # TensorFlow Edge TPU: *_edgetpu.tflite\n # PaddlePaddle: *_paddle_model\n from models.experimental import attempt_download, attempt_load # scoped to avoid circular import\n\n super().__init__()\n w = str(weights[0] if isinstance(weights, list) else weights)\n pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)\n fp16 &= pt or jit or onnx or engine or triton # FP16\n nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)\n stride = 32 # default stride\n cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA\n if not (pt or triton):\n w = attempt_download(w) # download if not local\n\n if pt: # PyTorch\n model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)\n stride = max(int(model.stride.max()), 32) # model stride\n names = model.module.names if hasattr(model, 'module') else model.names # get class names\n model.half() if fp16 else model.float()\n self.model = model # explicitly assign for to(), cpu(), cuda(), half()\n elif jit: # TorchScript\n LOGGER.info(f'Loading {w} for TorchScript inference...')\n extra_files = {'config.txt': ''} # model metadata\n model = torch.jit.load(w, _extra_files=extra_files, map_location=device)\n model.half() if fp16 else model.float()\n if extra_files['config.txt']: # load metadata dict\n d = json.loads(extra_files['config.txt'],\n object_hook=lambda d: {\n int(k) if k.isdigit() else k: v\n for k, v in d.items()})\n stride, names = int(d['stride']), d['names']\n elif dnn: # ONNX OpenCV DNN\n LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')\n check_requirements('opencv-python>=4.5.4')\n net = cv2.dnn.readNetFromONNX(w)\n elif onnx: # ONNX Runtime\n LOGGER.info(f'Loading {w} for ONNX Runtime inference...')\n check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))\n import onnxruntime\n providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']\n session = onnxruntime.InferenceSession(w, providers=providers)\n output_names = [x.name for x in session.get_outputs()]\n meta = session.get_modelmeta().custom_metadata_map # metadata\n if 'stride' in meta:\n stride, names = int(meta['stride']), eval(meta['names'])\n elif xml: # OpenVINO\n LOGGER.info(f'Loading {w} for OpenVINO inference...')\n check_requirements('openvino>=2023.0') # requires openvino-dev: https://pypi.org/project/openvino-dev/\n from openvino.runtime import Core, Layout, get_batch\n core = Core()\n if not Path(w).is_file(): # if not *.xml\n w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir\n ov_model = core.read_model(model=w, weights=Path(w).with_suffix('.bin'))\n if ov_model.get_parameters()[0].get_layout().empty:\n ov_model.get_parameters()[0].set_layout(Layout('NCHW'))\n batch_dim = get_batch(ov_model)\n if batch_dim.is_static:\n batch_size = batch_dim.get_length()\n ov_compiled_model = core.compile_model(ov_model, device_name='AUTO') # AUTO selects best available device\n stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata\n elif engine: # TensorRT\n LOGGER.info(f'Loading {w} for TensorRT inference...')\n import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download\n check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0\n if device.type == 'cpu':\n device = torch.device('cuda:0')\n Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))\n logger = trt.Logger(trt.Logger.INFO)\n with open(w, 'rb') as f, trt.Runtime(logger) as runtime:\n model = runtime.deserialize_cuda_engine(f.read())\n context = model.create_execution_context()\n bindings = OrderedDict()\n output_names = []\n fp16 = False # default updated below\n dynamic = False\n for i in range(model.num_bindings):\n name = model.get_binding_name(i)\n dtype = trt.nptype(model.get_binding_dtype(i))\n if model.binding_is_input(i):\n if -1 in tuple(model.get_binding_shape(i)): # dynamic\n dynamic = True\n context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2]))\n if dtype == np.float16:\n fp16 = True\n else: # output\n output_names.append(name)\n shape = tuple(context.get_binding_shape(i))\n im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)\n bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))\n binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())\n batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size\n elif coreml: # CoreML\n LOGGER.info(f'Loading {w} for CoreML inference...')\n import coremltools as ct\n model = ct.models.MLModel(w)\n elif saved_model: # TF SavedModel\n LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')\n import tensorflow as tf\n keras = False # assume TF1 saved_model\n model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)\n elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt\n LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')\n import tensorflow as tf\n\n def wrap_frozen_graph(gd, inputs, outputs):\n x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), []) # wrapped\n ge = x.graph.as_graph_element\n return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))\n\n def gd_outputs(gd):\n name_list, input_list = [], []\n for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef\n name_list.append(node.name)\n input_list.extend(node.input)\n return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp'))\n\n gd = tf.Graph().as_graph_def() # TF GraphDef\n with open(w, 'rb') as f:\n gd.ParseFromString(f.read())\n frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd))\n elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python\n try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu\n from tflite_runtime.interpreter import Interpreter, load_delegate\n except ImportError:\n import tensorflow as tf\n Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,\n if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime\n LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')\n delegate = {\n 'Linux': 'libedgetpu.so.1',\n 'Darwin': 'libedgetpu.1.dylib',\n 'Windows': 'edgetpu.dll'}[platform.system()]\n interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])\n else: # TFLite\n LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')\n interpreter = Interpreter(model_path=w) # load TFLite model\n interpreter.allocate_tensors() # allocate\n input_details = interpreter.get_input_details() # inputs\n output_details = interpreter.get_output_details() # outputs\n # load metadata\n with contextlib.suppress(zipfile.BadZipFile):\n with zipfile.ZipFile(w, 'r') as model:\n meta_file = model.namelist()[0]\n meta = ast.literal_eval(model.read(meta_file).decode('utf-8'))\n stride, names = int(meta['stride']), meta['names']\n elif tfjs: # TF.js\n raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported')\n elif paddle: # PaddlePaddle\n LOGGER.info(f'Loading {w} for PaddlePaddle inference...')\n check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')\n import paddle.inference as pdi\n if not Path(w).is_file(): # if not *.pdmodel\n w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir\n weights = Path(w).with_suffix('.pdiparams')\n config = pdi.Config(str(w), str(weights))\n if cuda:\n config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)\n predictor = pdi.create_predictor(config)\n input_handle = predictor.get_input_handle(predictor.get_input_names()[0])\n output_names = predictor.get_output_names()\n elif triton: # NVIDIA Triton Inference Server\n LOGGER.info(f'Using {w} as Triton Inference Server...')\n check_requirements('tritonclient[all]')\n from utils.triton import TritonRemoteModel\n model = TritonRemoteModel(url=w)\n nhwc = model.runtime.startswith('tensorflow')\n else:\n raise NotImplementedError(f'ERROR: {w} is not a supported format')\n\n # class names\n if 'names' not in locals():\n names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)}\n if names[0] == 'n01440764' and len(names) == 1000: # ImageNet\n names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names\n\n self.__dict__.update(locals()) # assign all variables to self\n\n def forward(self, im, augment=False, visualize=False):\n # YOLOv5 MultiBackend inference\n b, ch, h, w = im.shape # batch, channel, height, width\n if self.fp16 and im.dtype != torch.float16:\n im = im.half() # to FP16\n if self.nhwc:\n im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)\n\n if self.pt: # PyTorch\n y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)\n elif self.jit: # TorchScript\n y = self.model(im)\n elif self.dnn: # ONNX OpenCV DNN\n im = im.cpu().numpy() # torch to numpy\n self.net.setInput(im)\n y = self.net.forward()\n elif self.onnx: # ONNX Runtime\n im = im.cpu().numpy() # torch to numpy\n y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})\n elif self.xml: # OpenVINO\n im = im.cpu().numpy() # FP32\n y = list(self.ov_compiled_model(im).values())\n elif self.engine: # TensorRT\n if self.dynamic and im.shape != self.bindings['images'].shape:\n i = self.model.get_binding_index('images')\n self.context.set_binding_shape(i, im.shape) # reshape if dynamic\n self.bindings['images'] = self.bindings['images']._replace(shape=im.shape)\n for name in self.output_names:\n i = self.model.get_binding_index(name)\n self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))\n s = self.bindings['images'].shape\n assert im.shape == s, f\"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}\"\n self.binding_addrs['images'] = int(im.data_ptr())\n self.context.execute_v2(list(self.binding_addrs.values()))\n y = [self.bindings[x].data for x in sorted(self.output_names)]\n elif self.coreml: # CoreML\n im = im.cpu().numpy()\n im = Image.fromarray((im[0] * 255).astype('uint8'))\n # im = im.resize((192, 320), Image.BILINEAR)\n y = self.model.predict({'image': im}) # coordinates are xywh normalized\n if 'confidence' in y:\n box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels\n conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)\n y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)\n else:\n y = list(reversed(y.values())) # reversed for segmentation models (pred, proto)\n elif self.paddle: # PaddlePaddle\n im = im.cpu().numpy().astype(np.float32)\n self.input_handle.copy_from_cpu(im)\n self.predictor.run()\n y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]\n elif self.triton: # NVIDIA Triton Inference Server\n y = self.model(im)\n else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)\n im = im.cpu().numpy()\n if self.saved_model: # SavedModel\n y = self.model(im, training=False) if self.keras else self.model(im)\n elif self.pb: # GraphDef\n y = self.frozen_func(x=self.tf.constant(im))\n else: # Lite or Edge TPU\n input = self.input_details[0]\n int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model\n if int8:\n scale, zero_point = input['quantization']\n im = (im / scale + zero_point).astype(np.uint8) # de-scale\n self.interpreter.set_tensor(input['index'], im)\n self.interpreter.invoke()\n y = []\n for output in self.output_details:\n x = self.interpreter.get_tensor(output['index'])\n if int8:\n scale, zero_point = output['quantization']\n x = (x.astype(np.float32) - zero_point) * scale # re-scale\n y.append(x)\n y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]\n y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels\n\n if isinstance(y, (list, tuple)):\n return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]\n else:\n return self.from_numpy(y)\n\n def from_numpy(self, x):\n return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x\n\n def warmup(self, imgsz=(1, 3, 640, 640)):\n # Warmup model by running inference once\n warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton\n if any(warmup_types) and (self.device.type != 'cpu' or self.triton):\n im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input\n for _ in range(2 if self.jit else 1): #\n self.forward(im) # warmup\n\n @staticmethod\n def _model_type(p='path/to/model.pt'):\n # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx\n # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]\n from export import export_formats\n from utils.downloads import is_url\n sf = list(export_formats().Suffix) # export suffixes\n if not is_url(p, check=False):\n check_suffix(p, sf) # checks\n url = urlparse(p) # if url may be Triton inference server\n types = [s in Path(p).name for s in sf]\n types[8] &= not types[9] # tflite &= not edgetpu\n triton = not any(types) and all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc])\n return types + [triton]\n\n @staticmethod\n def _load_metadata(f=Path('path/to/meta.yaml')):\n # Load metadata from meta.yaml if it exists\n if f.exists():\n d = yaml_load(f)\n return d['stride'], d['names'] # assign stride, names\n return None, None" }, { "identifier": "SegmentationModel", "path": "models/yolo.py", "snippet": "class SegmentationModel(DetectionModel):\n # YOLOv5 segmentation model\n def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None):\n super().__init__(cfg, ch, nc, anchors)" }, { "identifier": "Callbacks", "path": "utils/callbacks.py", "snippet": "class Callbacks:\n \"\"\"\"\n Handles all registered callbacks for YOLOv5 Hooks\n \"\"\"\n\n def __init__(self):\n # Define the available callbacks\n self._callbacks = {\n 'on_pretrain_routine_start': [],\n 'on_pretrain_routine_end': [],\n 'on_train_start': [],\n 'on_train_epoch_start': [],\n 'on_train_batch_start': [],\n 'optimizer_step': [],\n 'on_before_zero_grad': [],\n 'on_train_batch_end': [],\n 'on_train_epoch_end': [],\n 'on_val_start': [],\n 'on_val_batch_start': [],\n 'on_val_image_end': [],\n 'on_val_batch_end': [],\n 'on_val_end': [],\n 'on_fit_epoch_end': [], # fit = train + val\n 'on_model_save': [],\n 'on_train_end': [],\n 'on_params_update': [],\n 'teardown': [], }\n self.stop_training = False # set True to interrupt training\n\n def register_action(self, hook, name='', callback=None):\n \"\"\"\n Register a new action to a callback hook\n\n Args:\n hook: The callback hook name to register the action to\n name: The name of the action for later reference\n callback: The callback to fire\n \"\"\"\n assert hook in self._callbacks, f\"hook '{hook}' not found in callbacks {self._callbacks}\"\n assert callable(callback), f\"callback '{callback}' is not callable\"\n self._callbacks[hook].append({'name': name, 'callback': callback})\n\n def get_registered_actions(self, hook=None):\n \"\"\"\"\n Returns all the registered actions by callback hook\n\n Args:\n hook: The name of the hook to check, defaults to all\n \"\"\"\n return self._callbacks[hook] if hook else self._callbacks\n\n def run(self, hook, *args, thread=False, **kwargs):\n \"\"\"\n Loop through the registered actions and fire all callbacks on main thread\n\n Args:\n hook: The name of the hook to check, defaults to all\n args: Arguments to receive from YOLOv5\n thread: (boolean) Run callbacks in daemon thread\n kwargs: Keyword Arguments to receive from YOLOv5\n \"\"\"\n\n assert hook in self._callbacks, f\"hook '{hook}' not found in callbacks {self._callbacks}\"\n for logger in self._callbacks[hook]:\n if thread:\n threading.Thread(target=logger['callback'], args=args, kwargs=kwargs, daemon=True).start()\n else:\n logger['callback'](*args, **kwargs)" }, { "identifier": "LOGGER", "path": "utils/general.py", "snippet": "FILE = Path(__file__).resolve()\nROOT = FILE.parents[1] # YOLOv5 root directory\nRANK = int(os.getenv('RANK', -1))\nNUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads\nDATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory\nAUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode\nVERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode\nTQDM_BAR_FORMAT = '{l_bar}{bar:10}{r_bar}' # tqdm bar format\nFONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf\nLOGGING_NAME = 'yolov5'\nLOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.)\nCONFIG_DIR = user_config_dir() # Ultralytics settings dir\ndef is_ascii(s=''):\ndef is_chinese(s='人工智能'):\ndef is_colab():\ndef is_jupyter():\ndef is_kaggle():\ndef is_docker() -> bool:\ndef is_writeable(dir, test=False):\ndef set_logging(name=LOGGING_NAME, verbose=True):\ndef user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):\n def __init__(self, t=0.0):\n def __enter__(self):\n def __exit__(self, type, value, traceback):\n def time(self):\n def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True):\n def _timeout_handler(self, signum, frame):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def __init__(self, new_dir):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\ndef methods(instance):\ndef print_args(args: Optional[dict] = None, show_file=True, show_func=False):\ndef init_seeds(seed=0, deterministic=False):\ndef intersect_dicts(da, db, exclude=()):\ndef get_default_args(func):\ndef get_latest_run(search_dir='.'):\ndef file_age(path=__file__):\ndef file_date(path=__file__):\ndef file_size(path):\ndef check_online():\n def run_once():\ndef git_describe(path=ROOT): # path must be a directory\ndef check_git_status(repo='ultralytics/yolov5', branch='master'):\ndef check_git_info(path='.'):\ndef check_python(minimum='3.8.0'):\ndef check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):\ndef check_img_size(imgsz, s=32, floor=0):\ndef check_imshow(warn=False):\ndef check_suffix(file='yolov5s.pt', suffix=('.pt', ), msg=''):\ndef check_yaml(file, suffix=('.yaml', '.yml')):\ndef check_file(file, suffix=''):\ndef check_font(font=FONT, progress=False):\ndef check_dataset(data, autodownload=True):\ndef check_amp(model):\n def amp_allclose(model, im):\ndef yaml_load(file='data.yaml'):\ndef yaml_save(file='data.yaml', data={}):\ndef unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')):\ndef url2file(url):\ndef download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3):\n def download_one(url, dir):\ndef make_divisible(x, divisor):\ndef clean_str(s):\ndef one_cycle(y1=0.0, y2=1.0, steps=100):\ndef colorstr(*input):\ndef labels_to_class_weights(labels, nc=80):\ndef labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):\ndef coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)\ndef xyxy2xywh(x):\ndef xywh2xyxy(x):\ndef xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):\ndef xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):\ndef xyn2xy(x, w=640, h=640, padw=0, padh=0):\ndef segment2box(segment, width=640, height=640):\ndef segments2boxes(segments):\ndef resample_segments(segments, n=1000):\ndef scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):\ndef scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False):\ndef clip_boxes(boxes, shape):\ndef clip_segments(segments, shape):\ndef non_max_suppression(\n prediction,\n conf_thres=0.25,\n iou_thres=0.45,\n classes=None,\n agnostic=False,\n multi_label=False,\n labels=(),\n max_det=300,\n nm=0, # number of masks\n):\ndef strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()\ndef print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')):\ndef apply_classifier(x, model, img, im0):\ndef increment_path(path, exist_ok=False, sep='', mkdir=False):\ndef imread(filename, flags=cv2.IMREAD_COLOR):\ndef imwrite(filename, img):\ndef imshow(path, im):\nclass Profile(contextlib.ContextDecorator):\nclass Timeout(contextlib.ContextDecorator):\nclass WorkingDirectory(contextlib.ContextDecorator):" }, { "identifier": "ConfusionMatrix", "path": "utils/metrics.py", "snippet": "class ConfusionMatrix:\n # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix\n def __init__(self, nc, conf=0.25, iou_thres=0.45):\n self.matrix = np.zeros((nc + 1, nc + 1))\n self.nc = nc # number of classes\n self.conf = conf\n self.iou_thres = iou_thres\n\n def process_batch(self, detections, labels):\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n detections (Array[N, 6]), x1, y1, x2, y2, conf, class\n labels (Array[M, 5]), class, x1, y1, x2, y2\n Returns:\n None, updates confusion matrix accordingly\n \"\"\"\n if detections is None:\n gt_classes = labels.int()\n for gc in gt_classes:\n self.matrix[self.nc, gc] += 1 # background FN\n return\n\n detections = detections[detections[:, 4] > self.conf]\n gt_classes = labels[:, 0].int()\n detection_classes = detections[:, 5].int()\n iou = box_iou(labels[:, 1:], detections[:, :4])\n\n x = torch.where(iou > self.iou_thres)\n if x[0].shape[0]:\n matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()\n if x[0].shape[0] > 1:\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 1], return_index=True)[1]]\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 0], return_index=True)[1]]\n else:\n matches = np.zeros((0, 3))\n\n n = matches.shape[0] > 0\n m0, m1, _ = matches.transpose().astype(int)\n for i, gc in enumerate(gt_classes):\n j = m0 == i\n if n and sum(j) == 1:\n self.matrix[detection_classes[m1[j]], gc] += 1 # correct\n else:\n self.matrix[self.nc, gc] += 1 # true background\n\n if n:\n for i, dc in enumerate(detection_classes):\n if not any(m1 == i):\n self.matrix[dc, self.nc] += 1 # predicted background\n\n def tp_fp(self):\n tp = self.matrix.diagonal() # true positives\n fp = self.matrix.sum(1) - tp # false positives\n # fn = self.matrix.sum(0) - tp # false negatives (missed detections)\n return tp[:-1], fp[:-1] # remove background class\n\n @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure')\n def plot(self, normalize=True, save_dir='', names=()):\n import seaborn as sn\n\n array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns\n array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)\n\n fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True)\n nc, nn = self.nc, len(names) # number of classes, names\n sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size\n labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels\n ticklabels = (names + ['background']) if labels else 'auto'\n with warnings.catch_warnings():\n warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered\n sn.heatmap(array,\n ax=ax,\n annot=nc < 30,\n annot_kws={\n 'size': 8},\n cmap='Blues',\n fmt='.2f',\n square=True,\n vmin=0.0,\n xticklabels=ticklabels,\n yticklabels=ticklabels).set_facecolor((1, 1, 1))\n ax.set_xlabel('True')\n ax.set_ylabel('Predicted')\n ax.set_title('Confusion Matrix')\n fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)\n plt.close(fig)\n\n def print(self):\n for i in range(self.nc + 1):\n print(' '.join(map(str, self.matrix[i])))" }, { "identifier": "box_iou", "path": "utils/metrics.py", "snippet": "def box_iou(box1, box2, eps=1e-7):\n # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n box1 (Tensor[N, 4])\n box2 (Tensor[M, 4])\n Returns:\n iou (Tensor[N, M]): the NxM matrix containing the pairwise\n IoU values for every element in boxes1 and boxes2\n \"\"\"\n\n # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)\n (a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2)\n inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2)\n\n # IoU = inter / (area1 + area2 - inter)\n return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps)" }, { "identifier": "output_to_target", "path": "utils/plots.py", "snippet": "def output_to_target(output, max_det=300):\n # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] for plotting\n targets = []\n for i, o in enumerate(output):\n box, conf, cls = o[:max_det, :6].cpu().split((4, 1, 1), 1)\n j = torch.full((conf.shape[0], 1), i)\n targets.append(torch.cat((j, cls, xyxy2xywh(box), conf), 1))\n return torch.cat(targets, 0).numpy()" }, { "identifier": "plot_val_study", "path": "utils/plots.py", "snippet": "def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()\n # Plot file=study.txt generated by val.py (or plot all study*.txt in dir)\n save_dir = Path(file).parent if file else Path(dir)\n plot2 = False # plot additional results\n if plot2:\n ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()\n\n fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)\n # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:\n for f in sorted(save_dir.glob('study*.txt')):\n y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T\n x = np.arange(y.shape[1]) if x is None else np.array(x)\n if plot2:\n s = ['P', 'R', '[email protected]', '[email protected]:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)']\n for i in range(7):\n ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)\n ax[i].set_title(s[i])\n\n j = y[3].argmax() + 1\n ax2.plot(y[5, 1:j],\n y[3, 1:j] * 1E2,\n '.-',\n linewidth=2,\n markersize=8,\n label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))\n\n ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],\n 'k.-',\n linewidth=2,\n markersize=8,\n alpha=.25,\n label='EfficientDet')\n\n ax2.grid(alpha=0.2)\n ax2.set_yticks(np.arange(20, 60, 5))\n ax2.set_xlim(0, 57)\n ax2.set_ylim(25, 55)\n ax2.set_xlabel('GPU Speed (ms/img)')\n ax2.set_ylabel('COCO AP val')\n ax2.legend(loc='lower right')\n f = save_dir / 'study.png'\n print(f'Saving {f}...')\n plt.savefig(f, dpi=300)" }, { "identifier": "create_dataloader", "path": "utils/segment/dataloaders.py", "snippet": "def create_dataloader(path,\n imgsz,\n batch_size,\n stride,\n single_cls=False,\n hyp=None,\n augment=False,\n cache=False,\n pad=0.0,\n rect=False,\n rank=-1,\n workers=8,\n image_weights=False,\n quad=False,\n prefix='',\n shuffle=False,\n mask_downsample_ratio=1,\n overlap_mask=False,\n seed=0):\n if rect and shuffle:\n LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False')\n shuffle = False\n with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP\n dataset = LoadImagesAndLabelsAndMasks(\n path,\n imgsz,\n batch_size,\n augment=augment, # augmentation\n hyp=hyp, # hyperparameters\n rect=rect, # rectangular batches\n cache_images=cache,\n single_cls=single_cls,\n stride=int(stride),\n pad=pad,\n image_weights=image_weights,\n prefix=prefix,\n downsample_ratio=mask_downsample_ratio,\n overlap=overlap_mask)\n\n batch_size = min(batch_size, len(dataset))\n nd = torch.cuda.device_count() # number of CUDA devices\n nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers\n sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)\n loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates\n generator = torch.Generator()\n generator.manual_seed(6148914691236517205 + seed + RANK)\n return loader(\n dataset,\n batch_size=batch_size,\n shuffle=shuffle and sampler is None,\n num_workers=nw,\n sampler=sampler,\n pin_memory=True,\n collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn,\n worker_init_fn=seed_worker,\n generator=generator,\n ), dataset" }, { "identifier": "mask_iou", "path": "utils/segment/general.py", "snippet": "def mask_iou(mask1, mask2, eps=1e-7):\n \"\"\"\n mask1: [N, n] m1 means number of predicted objects\n mask2: [M, n] m2 means number of gt objects\n Note: n means image_w x image_h\n\n return: masks iou, [N, M]\n \"\"\"\n intersection = torch.matmul(mask1, mask2.t()).clamp(0)\n union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection\n return intersection / (union + eps)" }, { "identifier": "process_mask", "path": "utils/segment/general.py", "snippet": "def process_mask(protos, masks_in, bboxes, shape, upsample=False):\n \"\"\"\n Crop before upsample.\n proto_out: [mask_dim, mask_h, mask_w]\n out_masks: [n, mask_dim], n is number of masks after nms\n bboxes: [n, 4], n is number of masks after nms\n shape:input_image_size, (h, w)\n\n return: h, w, n\n \"\"\"\n\n c, mh, mw = protos.shape # CHW\n ih, iw = shape\n masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW\n\n downsampled_bboxes = bboxes.clone()\n downsampled_bboxes[:, 0] *= mw / iw\n downsampled_bboxes[:, 2] *= mw / iw\n downsampled_bboxes[:, 3] *= mh / ih\n downsampled_bboxes[:, 1] *= mh / ih\n\n masks = crop_mask(masks, downsampled_bboxes) # CHW\n if upsample:\n masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW\n return masks.gt_(0.5)" }, { "identifier": "process_mask_native", "path": "utils/segment/general.py", "snippet": "def process_mask_native(protos, masks_in, bboxes, shape):\n \"\"\"\n Crop after upsample.\n protos: [mask_dim, mask_h, mask_w]\n masks_in: [n, mask_dim], n is number of masks after nms\n bboxes: [n, 4], n is number of masks after nms\n shape: input_image_size, (h, w)\n\n return: h, w, n\n \"\"\"\n c, mh, mw = protos.shape # CHW\n masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)\n gain = min(mh / shape[0], mw / shape[1]) # gain = old / new\n pad = (mw - shape[1] * gain) / 2, (mh - shape[0] * gain) / 2 # wh padding\n top, left = int(pad[1]), int(pad[0]) # y, x\n bottom, right = int(mh - pad[1]), int(mw - pad[0])\n masks = masks[:, top:bottom, left:right]\n\n masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW\n masks = crop_mask(masks, bboxes) # CHW\n return masks.gt_(0.5)" }, { "identifier": "scale_image", "path": "utils/segment/general.py", "snippet": "def scale_image(im1_shape, masks, im0_shape, ratio_pad=None):\n \"\"\"\n img1_shape: model input shape, [h, w]\n img0_shape: origin pic shape, [h, w, 3]\n masks: [h, w, num]\n \"\"\"\n # Rescale coordinates (xyxy) from im1_shape to im0_shape\n if ratio_pad is None: # calculate from im0_shape\n gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new\n pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding\n else:\n pad = ratio_pad[1]\n top, left = int(pad[1]), int(pad[0]) # y, x\n bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0])\n\n if len(masks.shape) < 2:\n raise ValueError(f'\"len of masks shape\" should be 2 or 3, but got {len(masks.shape)}')\n masks = masks[top:bottom, left:right]\n # masks = masks.permute(2, 0, 1).contiguous()\n # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0]\n # masks = masks.permute(1, 2, 0).contiguous()\n masks = cv2.resize(masks, (im0_shape[1], im0_shape[0]))\n\n if len(masks.shape) == 2:\n masks = masks[:, :, None]\n return masks" }, { "identifier": "Metrics", "path": "utils/segment/metrics.py", "snippet": "class Metrics:\n \"\"\"Metric for boxes and masks.\"\"\"\n\n def __init__(self) -> None:\n self.metric_box = Metric()\n self.metric_mask = Metric()\n\n def update(self, results):\n \"\"\"\n Args:\n results: Dict{'boxes': Dict{}, 'masks': Dict{}}\n \"\"\"\n self.metric_box.update(list(results['boxes'].values()))\n self.metric_mask.update(list(results['masks'].values()))\n\n def mean_results(self):\n return self.metric_box.mean_results() + self.metric_mask.mean_results()\n\n def class_result(self, i):\n return self.metric_box.class_result(i) + self.metric_mask.class_result(i)\n\n def get_maps(self, nc):\n return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc)\n\n @property\n def ap_class_index(self):\n # boxes and masks have the same ap_class_index\n return self.metric_box.ap_class_index" }, { "identifier": "ap_per_class_box_and_mask", "path": "utils/segment/metrics.py", "snippet": "def ap_per_class_box_and_mask(\n tp_m,\n tp_b,\n conf,\n pred_cls,\n target_cls,\n plot=False,\n save_dir='.',\n names=(),\n):\n \"\"\"\n Args:\n tp_b: tp of boxes.\n tp_m: tp of masks.\n other arguments see `func: ap_per_class`.\n \"\"\"\n results_boxes = ap_per_class(tp_b,\n conf,\n pred_cls,\n target_cls,\n plot=plot,\n save_dir=save_dir,\n names=names,\n prefix='Box')[2:]\n results_masks = ap_per_class(tp_m,\n conf,\n pred_cls,\n target_cls,\n plot=plot,\n save_dir=save_dir,\n names=names,\n prefix='Mask')[2:]\n\n results = {\n 'boxes': {\n 'p': results_boxes[0],\n 'r': results_boxes[1],\n 'ap': results_boxes[3],\n 'f1': results_boxes[2],\n 'ap_class': results_boxes[4]},\n 'masks': {\n 'p': results_masks[0],\n 'r': results_masks[1],\n 'ap': results_masks[3],\n 'f1': results_masks[2],\n 'ap_class': results_masks[4]}}\n return results" }, { "identifier": "plot_images_and_masks", "path": "utils/segment/plots.py", "snippet": "@threaded\ndef plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg', names=None):\n # Plot image grid with labels\n if isinstance(images, torch.Tensor):\n images = images.cpu().float().numpy()\n if isinstance(targets, torch.Tensor):\n targets = targets.cpu().numpy()\n if isinstance(masks, torch.Tensor):\n masks = masks.cpu().numpy().astype(int)\n\n max_size = 1920 # max image size\n max_subplots = 16 # max image subplots, i.e. 4x4\n bs, _, h, w = images.shape # batch size, _, height, width\n bs = min(bs, max_subplots) # limit plot images\n ns = np.ceil(bs ** 0.5) # number of subplots (square)\n if np.max(images[0]) <= 1:\n images *= 255 # de-normalise (optional)\n\n # Build Image\n mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init\n for i, im in enumerate(images):\n if i == max_subplots: # if last batch has fewer images than we expect\n break\n x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin\n im = im.transpose(1, 2, 0)\n mosaic[y:y + h, x:x + w, :] = im\n\n # Resize (optional)\n scale = max_size / ns / max(h, w)\n if scale < 1:\n h = math.ceil(scale * h)\n w = math.ceil(scale * w)\n mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))\n\n # Annotate\n fs = int((h + w) * ns * 0.01) # font size\n annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names)\n for i in range(i + 1):\n x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin\n annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders\n if paths:\n annotator.text([x + 5, y + 5], text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames\n if len(targets) > 0:\n idx = targets[:, 0] == i\n ti = targets[idx] # image targets\n\n boxes = xywh2xyxy(ti[:, 2:6]).T\n classes = ti[:, 1].astype('int')\n labels = ti.shape[1] == 6 # labels if no conf column\n conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred)\n\n if boxes.shape[1]:\n if boxes.max() <= 1.01: # if normalized with tolerance 0.01\n boxes[[0, 2]] *= w # scale to pixels\n boxes[[1, 3]] *= h\n elif scale < 1: # absolute coords need scale if image scales\n boxes *= scale\n boxes[[0, 2]] += x\n boxes[[1, 3]] += y\n for j, box in enumerate(boxes.T.tolist()):\n cls = classes[j]\n color = colors(cls)\n cls = names[cls] if names else cls\n if labels or conf[j] > 0.25: # 0.25 conf thresh\n label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}'\n annotator.box_label(box, label, color=color)\n\n # Plot masks\n if len(masks):\n if masks.max() > 1.0: # mean that masks are overlap\n image_masks = masks[[i]] # (1, 640, 640)\n nl = len(ti)\n index = np.arange(nl).reshape(nl, 1, 1) + 1\n image_masks = np.repeat(image_masks, nl, axis=0)\n image_masks = np.where(image_masks == index, 1.0, 0.0)\n else:\n image_masks = masks[idx]\n\n im = np.asarray(annotator.im).copy()\n for j, box in enumerate(boxes.T.tolist()):\n if labels or conf[j] > 0.25: # 0.25 conf thresh\n color = colors(classes[j])\n mh, mw = image_masks[j].shape\n if mh != h or mw != w:\n mask = image_masks[j].astype(np.uint8)\n mask = cv2.resize(mask, (w, h))\n mask = mask.astype(bool)\n else:\n mask = image_masks[j].astype(bool)\n with contextlib.suppress(Exception):\n im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6\n annotator.fromarray(im)\n annotator.im.save(fname) # save" }, { "identifier": "de_parallel", "path": "utils/torch_utils.py", "snippet": "def de_parallel(model):\n # De-parallelize a model: returns single-GPU model if model is of type DP or DDP\n return model.module if is_parallel(model) else model" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=0, newline=True):\n # device = None or 'cpu' or 0 or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} '\n device = str(device).strip().lower().replace('cuda:', '').replace('none', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n mps = device == 'mps' # Apple Metal Performance Shaders (MPS)\n if cpu or mps:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available()\n assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \\\n f\"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)\"\n\n if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size > 0: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\\n\" # bytes to MB\n arg = 'cuda:0'\n elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available\n s += 'MPS\\n'\n arg = 'mps'\n else: # revert to CPU\n s += 'CPU\\n'\n arg = 'cpu'\n\n if not newline:\n s = s.rstrip()\n LOGGER.info(s)\n return torch.device(arg)" }, { "identifier": "smart_inference_mode", "path": "utils/torch_utils.py", "snippet": "def smart_inference_mode(torch_1_9=check_version(torch.__version__, '1.9.0')):\n # Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator\n def decorate(fn):\n return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn)\n\n return decorate" } ]
import argparse import json import os import subprocess import sys import numpy as np import torch import torch.nn.functional as F from multiprocessing.pool import ThreadPool from pathlib import Path from tqdm import tqdm from models.common import DetectMultiBackend from models.yolo import SegmentationModel from utils.callbacks import Callbacks from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, box_iou from utils.plots import output_to_target, plot_val_study from utils.segment.dataloaders import create_dataloader from utils.segment.general import mask_iou, process_mask, process_mask_native, scale_image from utils.segment.metrics import Metrics, ap_per_class_box_and_mask from utils.segment.plots import plot_images_and_masks from utils.torch_utils import de_parallel, select_device, smart_inference_mode from pycocotools.mask import encode from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
17,353
assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=rect, workers=workers, prefix=colorstr(f'{task}: '), overlap_mask=overlap, mask_downsample_ratio=mask_downsample_ratio)[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = model.names if hasattr(model, 'names') else model.module.names # get class names if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', 'R', 'mAP50', 'mAP50-95)') dt = Profile(), Profile(), Profile() metrics = Metrics() loss = torch.zeros(4, device=device) jdict, stats = [], [] # callbacks.run('on_val_start') pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar): # callbacks.run('on_val_batch_start') with dt[0]: if cuda: im = im.to(device, non_blocking=True) targets = targets.to(device) masks = masks.to(device) masks = masks.float() im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 nb, _, height, width = im.shape # batch size, channels, height, width # Inference with dt[1]: preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None) # Loss if compute_loss: loss += compute_loss((train_out, protos), targets, masks)[1] # box, obj, cls # NMS targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling with dt[2]: preds = non_max_suppression(preds, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls, max_det=max_det, nm=nm) # Metrics plot_masks = [] # masks for plotting for si, (pred, proto) in enumerate(zip(preds, protos)): labels = targets[targets[:, 0] == si, 1:] nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions path, shape = Path(paths[si]), shapes[si][0] correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init seen += 1 if npr == 0: if nl: stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0])) if plots: confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) continue # Masks midx = [si] if overlap else targets[:, 0] == si gt_masks = masks[midx] pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:]) # Predictions if single_cls: pred[:, 5] = 0 predn = pred.clone() scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred # Evaluate if nl: tbox = xywh2xyxy(labels[:, 1:5]) # target boxes scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels correct_bboxes = process_batch(predn, labelsn, iouv) correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True) if plots: confusion_matrix.process_batch(predn, labelsn) stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # (conf, pcls, tcls) pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) if plots and batch_i < 3: plot_masks.append(pred_masks[:15]) # filter top 15 to plot # Save/log if save_txt: save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') if save_json: pred_masks = scale_image(im[si].shape[1:], pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1]) save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary # callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) # Plot images if plots and batch_i < 3: if len(plot_masks): plot_masks = torch.cat(plot_masks, dim=0)
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Validate a trained YOLOv5 segment model on a segment dataset Usage: $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images) $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate COCO-segments Usage - formats: $ python segment/val.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s-seg_openvino_label # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel yolov5s-seg.pb # TensorFlow GraphDef yolov5s-seg.tflite # TensorFlow Lite yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU yolov5s-seg_paddle_model # PaddlePaddle """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative def save_one_txt(predn, save_conf, shape, file): # Save one txt result gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(file, 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') def save_one_json(predn, jdict, path, class_map, pred_masks): # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} def single_encode(x): rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0] rle['counts'] = rle['counts'].decode('utf-8') return rle image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner pred_masks = np.transpose(pred_masks, (2, 0, 1)) with ThreadPool(NUM_THREADS) as pool: rles = pool.map(single_encode, pred_masks) for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): jdict.append({ 'image_id': image_id, 'category_id': class_map[int(p[5])], 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5), 'segmentation': rles[i]}) def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): """ Return correct prediction matrix Arguments: detections (array[N, 6]), x1, y1, x2, y2, conf, class labels (array[M, 5]), class, x1, y1, x2, y2 Returns: correct (array[N, 10]), for 10 IoU levels """ if masks: if overlap: nl = len(labels) index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes iou = box_iou(labels[:, 1:], detections[:, :4]) correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) correct_class = labels[:, 0:1] == detections[:, 5] for i in range(len(iouv)): x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] correct[matches[:, 1].astype(int), i] = True return torch.tensor(correct, dtype=torch.bool, device=iouv.device) @smart_inference_mode() def run( data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) conf_thres=0.001, # confidence threshold iou_thres=0.6, # NMS IoU threshold max_det=300, # maximum detections per image task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val-seg', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, overlap=False, mask_downsample_ratio=1, compute_loss=None, callbacks=Callbacks(), ): if save_json: check_requirements('pycocotools>=2.0.6') process = process_mask_native # more accurate else: process = process_mask # faster # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() nm = de_parallel(model).model[-1].nm # number of masks else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half = model.fp16 # FP16 supported on limited backends with CUDA nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks if engine: batch_size = model.batch_size else: device = model.device if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1 LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') # Data data = check_dataset(data) # check # Configure model.eval() cuda = device.type != 'cpu' is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for [email protected]:0.95 niou = iouv.numel() # Dataloader if not training: if pt and not single_cls: # check --weights are trained on --data ncm = model.model.nc assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=rect, workers=workers, prefix=colorstr(f'{task}: '), overlap_mask=overlap, mask_downsample_ratio=mask_downsample_ratio)[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = model.names if hasattr(model, 'names') else model.module.names # get class names if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', 'R', 'mAP50', 'mAP50-95)') dt = Profile(), Profile(), Profile() metrics = Metrics() loss = torch.zeros(4, device=device) jdict, stats = [], [] # callbacks.run('on_val_start') pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar): # callbacks.run('on_val_batch_start') with dt[0]: if cuda: im = im.to(device, non_blocking=True) targets = targets.to(device) masks = masks.to(device) masks = masks.float() im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 nb, _, height, width = im.shape # batch size, channels, height, width # Inference with dt[1]: preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None) # Loss if compute_loss: loss += compute_loss((train_out, protos), targets, masks)[1] # box, obj, cls # NMS targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling with dt[2]: preds = non_max_suppression(preds, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls, max_det=max_det, nm=nm) # Metrics plot_masks = [] # masks for plotting for si, (pred, proto) in enumerate(zip(preds, protos)): labels = targets[targets[:, 0] == si, 1:] nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions path, shape = Path(paths[si]), shapes[si][0] correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init seen += 1 if npr == 0: if nl: stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0])) if plots: confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) continue # Masks midx = [si] if overlap else targets[:, 0] == si gt_masks = masks[midx] pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:]) # Predictions if single_cls: pred[:, 5] = 0 predn = pred.clone() scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred # Evaluate if nl: tbox = xywh2xyxy(labels[:, 1:5]) # target boxes scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels correct_bboxes = process_batch(predn, labelsn, iouv) correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True) if plots: confusion_matrix.process_batch(predn, labelsn) stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # (conf, pcls, tcls) pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) if plots and batch_i < 3: plot_masks.append(pred_masks[:15]) # filter top 15 to plot # Save/log if save_txt: save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') if save_json: pred_masks = scale_image(im[si].shape[1:], pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1]) save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary # callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) # Plot images if plots and batch_i < 3: if len(plot_masks): plot_masks = torch.cat(plot_masks, dim=0)
plot_images_and_masks(im, targets, masks, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names)
15
2023-11-12 13:28:26+00:00
24k
cyberark/ark-sdk-python
ark_sdk_python/cli_services/dpa/vm/ark_dpa_vm_policies_editor_service.py
[ { "identifier": "ArkInquirerRender", "path": "ark_sdk_python/args/ark_args_formatter.py", "snippet": "class ArkInquirerRender(ConsoleRender):\n # pylint: disable=keyword-arg-before-vararg,protected-access\n def __init__(self, event_generator=None, *args, **kwargs):\n super().__init__(event_generator=event_generator, theme=ARK_INQUIRER_THEME, *args, **kwargs)\n\n def render(self, question, answers=None):\n question.answers = answers or {}\n\n if question.ignore:\n return question.default\n\n clazz = self.render_factory(question.kind)\n render = clazz(question, terminal=self.terminal, theme=self._theme, show_default=question.show_default)\n if isinstance(\n render, (inquirer.render.console._text.Text, inquirer.render.console._password.Password, inquirer.render.console._path.Path)\n ):\n render.current = ''\n self.clear_eos()\n\n try:\n a = self._event_loop(render)\n if not a and question.default:\n a = question.default\n elif not a and question.name in answers:\n a = answers[question.name]\n return a\n finally:\n print('')\n\n def _print_header(self, render):\n base = render.get_header()\n\n header = base[: self.width - 9] + '...' if len(base) > self.width - 6 else base\n default_value = '{normal} ({default})'.format(default=render.question.default, normal=self.terminal.normal)\n show_default = render.question.default and render.show_default\n header += default_value if show_default else ''\n msg_template = '{t.move_up}{t.clear_eol}{tq.brackets_color}{tq.mark_color}?{tq.brackets_color} {msg}{t.normal}'\n\n escaped_current_value = str(render.get_current_value()).replace('{', '{{').replace('}', '}}')\n self.print_str(\n f'\\n{msg_template} {escaped_current_value}',\n msg=header,\n lf=not render.title_inline,\n tq=self._theme.Question,\n )" }, { "identifier": "ArkISPAuth", "path": "ark_sdk_python/auth/ark_isp_auth.py", "snippet": "class ArkISPAuth(ArkAuth):\n def __perform_identity_authentication(\n self, profile: ArkProfile, auth_profile: ArkAuthProfile, secret: Optional[ArkSecret], force: bool\n ) -> ArkToken:\n try:\n method_settings = cast(IdentityArkAuthMethodSettings, auth_profile.auth_method_settings)\n identity = ArkIdentity(\n username=auth_profile.username,\n password=secret.secret.get_secret_value() if secret else None,\n identity_url=method_settings.identity_url,\n mfa_type=method_settings.identity_mfa_method,\n logger=self._logger,\n cache_authentication=self._cache_authentication,\n )\n identity.auth_identity(profile, ArkSystemConfig.is_interactive() and method_settings.identity_mfa_interactive, force)\n env = AwsEnv(os.environ.get('DEPLOY_ENV', AwsEnv.PROD.value))\n found_env = list(filter(lambda e: ROOT_DOMAIN[e] in identity.identity_url, ROOT_DOMAIN.keys()))\n if found_env:\n env = found_env[0]\n token_lifetime = identity.session_details.token_lifetime\n if not token_lifetime:\n token_lifetime = DEFAULT_TOKEN_LIFETIME\n return ArkToken(\n token=identity.session_token,\n username=auth_profile.username,\n endpoint=identity.identity_url,\n token_type=ArkTokenType.JWT,\n auth_method=ArkAuthMethod.Identity,\n expires_in=datetime.now() + timedelta(seconds=token_lifetime),\n refresh_token=identity.session_details.refresh_token,\n metadata={'env': env, 'cookies': codecs.encode(pickle.dumps(identity.session.cookies), 'base64').decode()},\n )\n except Exception as ex:\n self._logger.exception(f'Failed to authenticate to identity security platform [{str(ex)}]')\n raise ArkAuthException from ex\n\n def __perform_identity_refresh_authentication(self, profile: ArkProfile, auth_profile: ArkAuthProfile, token: ArkToken) -> ArkToken:\n try:\n method_settings = cast(IdentityArkAuthMethodSettings, auth_profile.auth_method_settings)\n identity = ArkIdentity(\n username=auth_profile.username,\n password=None,\n identity_url=method_settings.identity_url,\n mfa_type=method_settings.identity_mfa_method,\n logger=self._logger,\n cache_authentication=self._cache_authentication,\n load_cache=True,\n cache_profile=profile,\n )\n identity.refresh_auth_identity(profile, method_settings.identity_mfa_interactive, False)\n env = AwsEnv(os.environ.get('DEPLOY_ENV', AwsEnv.PROD.value))\n found_env = list(filter(lambda e: ROOT_DOMAIN[e] in identity.identity_url, ROOT_DOMAIN.keys()))\n if found_env:\n env = found_env[0]\n token_lifetime = identity.session_details.token_lifetime\n if not token_lifetime:\n token_lifetime = DEFAULT_TOKEN_LIFETIME\n return ArkToken(\n token=identity.session_token,\n username=auth_profile.username,\n endpoint=identity.identity_url,\n token_type=ArkTokenType.JWT,\n auth_method=ArkAuthMethod.Identity,\n expires_in=datetime.now() + timedelta(seconds=token_lifetime),\n refresh_token=identity.session_details.refresh_token,\n metadata={'env': env, 'cookies': codecs.encode(pickle.dumps(identity.session.cookies), 'base64').decode()},\n )\n except Exception as ex:\n raise ArkAuthException('Failed to authenticate to isp via identity') from ex\n\n def __perform_identity_service_user_authentication(\n self, profile: ArkProfile, auth_profile: ArkAuthProfile, secret: Optional[ArkSecret], force: bool\n ) -> ArkToken:\n try:\n if not secret:\n raise ArkException('Token secret is required for identity service user auth')\n method_settings = cast(IdentityServiceUserArkAuthMethodSettings, auth_profile.auth_method_settings)\n identity = ArkIdentityServiceUser(\n username=auth_profile.username,\n token=secret.secret.get_secret_value(),\n app_name=method_settings.identity_authorization_application,\n logger=self._logger,\n cache_authentication=self._cache_authentication,\n )\n identity.auth_identity(profile, force)\n env = AwsEnv(os.environ.get('DEPLOY_ENV', AwsEnv.PROD.value))\n found_env = list(filter(lambda e: ROOT_DOMAIN[e] in identity.identity_url, ROOT_DOMAIN.keys()))\n if found_env:\n env = found_env[0]\n return ArkToken(\n token=identity.session_token,\n username=auth_profile.username,\n endpoint=identity.identity_url,\n token_type=ArkTokenType.JWT,\n auth_method=ArkAuthMethod.IdentityServiceUser,\n expires_in=datetime.now() + timedelta(hours=4),\n metadata={'env': env, 'cookies': codecs.encode(pickle.dumps(identity.session.cookies), 'base64').decode()},\n )\n except Exception as ex:\n self._logger.exception(f'Failed to authenticate to identity security platform with service user [{str(ex)}]')\n raise ArkAuthException from ex\n\n @overrides\n def _perform_authentication(\n self, profile: ArkProfile, auth_profile: ArkAuthProfile, secret: Optional[ArkSecret] = None, force: bool = False\n ) -> ArkToken:\n \"\"\"\n Performs authentication to the identity security platform identity tenant\n Authentication can be done with either a service user or a normal user\n Authentication Methods:\n - Identity, Default\n - IdentityServiceUser\n\n Args:\n profile (ArkProfile): _description_\n auth_profile (ArkAuthProfile): _description_\n secret (Optional[ArkSecret], optional): _description_. Defaults to None.\n force (bool, optional): _description_. Defaults to False.\n\n Raises:\n ArkAuthException: _description_\n\n Returns:\n ArkToken: _description_\n \"\"\"\n self._logger.info('Performing authentication to ISP')\n if auth_profile.auth_method in [ArkAuthMethod.Identity, ArkAuthMethod.Default]:\n return self.__perform_identity_authentication(profile, auth_profile, secret, force)\n if auth_profile.auth_method == ArkAuthMethod.IdentityServiceUser:\n return self.__perform_identity_service_user_authentication(profile, auth_profile, secret, force)\n raise ArkAuthException('Given auth method is not supported')\n\n @overrides\n def _perform_refresh_authentication(self, profile: ArkProfile, auth_profile: ArkAuthProfile, token: ArkToken) -> ArkToken:\n \"\"\"\n Refresh for isp tenant is supported only for identity\n\n Args:\n profile (ArkProfile): _description_\n auth_profile (ArkAuthProfile): _description_\n token (ArkToken): _description_\n\n Returns:\n ArkToken: _description_\n \"\"\"\n self._logger.info('Performing refresh authentication to ISP')\n if auth_profile.auth_method in [ArkAuthMethod.Identity, ArkAuthMethod.Default]:\n return self.__perform_identity_refresh_authentication(profile, auth_profile, token)\n return token\n\n @staticmethod\n @overrides\n def authenticator_name() -> str:\n return AUTH_NAME\n\n @staticmethod\n @overrides\n def authenticator_human_readable_name() -> str:\n return AUTH_HUMAN_READABLE_NAME\n\n @staticmethod\n @overrides\n def supported_auth_methods() -> List[ArkAuthMethod]:\n return AUTH_METHODS\n\n @staticmethod\n @overrides\n def default_auth_method() -> Tuple[ArkAuthMethod, ArkAuthMethodSettings]:\n return DEFAULT_AUTH_METHOD, DEFAULT_AUTH_METHOD_SETTINGS" }, { "identifier": "ArkDPABasePoliciesEditorService", "path": "ark_sdk_python/cli_services/dpa/common/ark_dpa_base_policies_editor_service.py", "snippet": "class ArkDPABasePoliciesEditorService(\n ArkService, ABC, Generic[PolicyType, PolicyListItemType, AddPolicyType, UpdatePolicyType, GeneratePolicyType]\n):\n def __init__(\n self,\n policy_type: PolicyType,\n add_policy_type: AddPolicyType,\n update_policy_type: UpdatePolicyType,\n isp_auth: ArkISPAuth,\n policies_family: str,\n tenant_id: str,\n policies_cache_dir: Optional[str] = None,\n profile: Optional[ArkProfile] = None,\n ) -> None:\n super().__init__(isp_auth)\n profile = profile or ArkProfileLoader.load_default_profile()\n self._policies_family = policies_family\n self.__policies_cache_dir = Path(policies_cache_dir or Path.home() / '.ark_cache' / 'profiles' / profile.profile_name / tenant_id)\n if not policies_cache_dir and 'ARK_DPA_POLICIES_EDITOR_FOLDER' in os.environ:\n self.__policies_cache_dir = Path(os.environ['ARK_DPA_POLICIES_EDITOR_FOLDER'])\n self.__policies_cache_dir = self.__policies_cache_dir / policies_family\n self.__policies_cache_dir.mkdir(exist_ok=True, parents=True)\n self.__policy_type = policy_type\n self.__add_policy_type = add_policy_type\n self.__update_policy_type = update_policy_type\n\n @abstractmethod\n def _policy(self, get_policy: ArkDPAGetPolicy) -> PolicyType:\n pass\n\n @abstractmethod\n def _list_policies(self) -> List[PolicyListItemType]:\n pass\n\n @abstractmethod\n def _add_policy(self, add_policy: AddPolicyType) -> PolicyType:\n pass\n\n @abstractmethod\n def _update_policy(self, update_policy: UpdatePolicyType) -> PolicyType:\n pass\n\n @abstractmethod\n def _delete_policy(self, delete_policy: ArkDPADeletePolicy) -> None:\n pass\n\n @abstractmethod\n def _generate_policy(self, generate_policy: GeneratePolicyType, workspace_policies: List[PolicyType]) -> PolicyType:\n pass\n\n def __load_policy_diff(self, workspace_policy: PolicyType) -> Optional[Tuple[PolicyType, PolicyType]]:\n remote_policy = self._policy(ArkDPAGetPolicy(policy_id=str(workspace_policy.policy_id)))\n if remote_policy != workspace_policy:\n return (workspace_policy, remote_policy)\n return None\n\n def __load_policies_diff(self) -> Dict[str, Tuple[PolicyType, PolicyType]]:\n workspace_policies = self.__load_existing_policies_from_workspace()\n with ThreadPoolExecutor() as executor:\n remote_policies = {\n p[0].policy_name: p for p in executor.map(self.__load_policy_diff, workspace_policies.values()) if p is not None\n }\n return remote_policies\n\n def __load_policies_from_workspace_by_suffix(self, suffix: str = '') -> Dict[str, PolicyType]:\n p = Path(self.__policies_cache_dir).glob(f'*.json{suffix}')\n policies_files = [x for x in p if x.is_file() and x.suffix == suffix or '.json']\n policies = {}\n for f in policies_files:\n policy = self.__policy_type.parse_file(f)\n policies[policy.policy_name] = policy\n return policies\n\n def __load_removed_policies_from_workspace(self) -> Dict[str, PolicyType]:\n return self.__load_policies_from_workspace_by_suffix('.removed')\n\n def __load_generated_policies_from_workspace(self) -> Dict[str, PolicyType]:\n return self.__load_policies_from_workspace_by_suffix('.generated')\n\n def __load_existing_policies_from_workspace(self) -> Dict[str, PolicyType]:\n return self.__load_policies_from_workspace_by_suffix()\n\n def __load_policy_to_workspace(self, policy: PolicyListItemType, override: bool) -> Optional[PolicyType]:\n policy_data = self._policy(ArkDPAGetPolicy(policy_id=policy.policy_id))\n policy_path = Path(self.__policies_cache_dir) / (policy_data.policy_name + '.json')\n if policy_path.exists():\n existing_data = self.__policy_type.parse_raw(policy_path.read_text())\n if existing_data != policy_data:\n if not override:\n return policy_data\n if not policy_data.policy_id:\n policy_data.policy_id = policy.policy_id\n policy_path.write_text(policy_data.json(indent=4))\n (Path(self.__policies_cache_dir) / (policy_data.policy_name + '.json.removed')).unlink(missing_ok=True)\n\n def load_policies(self, load_policies: ArkDPALoadPolicies) -> ArkDPALoadedPolicies:\n \"\"\"\n Loads all remote policies into the local workspace.\n The user is asked whether to overwrite existing policies that were edited either locally or remotely.\n When default overwrite is enabled, existing policies are overwritten without prompts.\n\n Args:\n load_policies (ArkDPALoadPolicies): _description_\n\n Returns:\n ArkDPALoadedPolicies: _description_\n \"\"\"\n policies = self._list_policies()\n policies_to_query: Dict[str, PolicyType] = []\n with ThreadPoolExecutor() as executor:\n policies_to_query = {\n p.policy_name: p\n for p in executor.map(lambda p: self.__load_policy_to_workspace(p, load_policies.override), policies)\n if p is not None\n }\n # Build the query editor to ask the user\n policies_to_override = []\n if policies_to_query:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'override',\n message=f'Conflicts detected, please choose if you wish to override local {self._policies_family} policies or leave them as is',\n choices=[p.policy_name for p in policies_to_query.values()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policies_to_override = answers['override']\n for policy_name in policies_to_override:\n policy_path = Path(self.__policies_cache_dir) / (policy_name + '.json')\n if policy_path.exists() and policy_name in policies_to_query:\n policy_path.write_text(policies_to_query[policy_name].json(indent=4))\n return ArkDPALoadedPolicies(\n loaded_path=str(self.__policies_cache_dir),\n overall_policies_count=len(policies),\n loaded_policies_count=len(policies) - len(policies_to_query),\n overriden_policies_count=len(policies_to_override),\n untouched_policies_count=len(policies_to_query) - len(policies_to_override),\n )\n\n def edit_policies(self, edit_policies: ArkDPAEditPolicies) -> None:\n \"\"\"\n Edits the set of specified policies one at a time, either via the CLI or the default OS editor.\n Edited policies are only saved locally until they are committed.\n\n Args:\n edit_policies (ArkDPAEditPolicies): _description_\n\n Raises:\n ArkServiceException: _description_\n \"\"\"\n workspace_policies = self.__load_existing_policies_from_workspace()\n workspace_policies.update(self.__load_generated_policies_from_workspace())\n if not workspace_policies:\n raise ArkServiceException(\n f'No {self._policies_family} policies to edit in the workspace, please load the policies or generate a new one'\n )\n policy_names = edit_policies.names\n if not policy_names:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to edit?, press space to select',\n choices=[p.policy_name for p in workspace_policies.values()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policy_names = answers['names']\n try:\n answers = inquirer.prompt(\n [\n inquirer.Editor(f'{name}_edit', message=f'Chosen {self._policies_family} policy [{name}] is about to be edited')\n for name in policy_names\n ],\n render=ArkInquirerRender(),\n answers={f'{name}_edit': workspace_policies[name].json(indent=4) for name in policy_names},\n )\n for name in policy_names:\n policy = self.__policy_type.parse_raw(answers[f'{name}_edit'])\n for path in [\n Path(self.__policies_cache_dir) / (name + '.json'),\n Path(self.__policies_cache_dir) / (name + '.json.generated'),\n ]:\n if path.exists():\n path.write_text(policy.json(indent=4))\n break\n except EditorError as ex:\n self._logger.error(\n f'An error occurred while trying to edit {self._policies_family} policies, '\n f'you can edit the policies at [{self.__policies_cache_dir}] [{str(ex)}]'\n )\n\n def remove_policies(self, remove_policies: ArkDPARemovePolicies) -> None:\n \"\"\"\n Removes one or more policies from the local workspace.\n Until changes are committed, removing a remote policy only appends the `.deleted` indication to its name.\n After committing the changes, the policies are deleted both locally and remotely.\n New, uncommitted policies are deleted locally after the user consents.\n\n Args:\n remove_policies (ArkDPARemovePolicies): _description_\n\n Raises:\n ArkServiceException: _description_\n \"\"\"\n workspace_policies = self.__load_existing_policies_from_workspace()\n workspace_policies.update(self.__load_generated_policies_from_workspace())\n if not workspace_policies:\n raise ArkServiceException(\n f'No {self._policies_family} policies to remove in the workspace, please load the policies or generate a new one'\n )\n policy_names = remove_policies.names\n if not policy_names:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to remove?, press space to select',\n choices=[p.policy_name for p in workspace_policies.values()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policy_names = answers['names']\n for policy_name in policy_names:\n for path in [\n Path(self.__policies_cache_dir) / (policy_name + '.json'),\n Path(self.__policies_cache_dir) / (policy_name + '.json.generated'),\n ]:\n if path.exists():\n if path.suffix == '.json':\n path.rename(Path(self.__policies_cache_dir) / (policy_name + '.json.removed'))\n else:\n answers = inquirer.prompt(\n [\n inquirer.Confirm(\n 'remove',\n message=f'Are you sure you want to remove local {self._policies_family} policy [{policy_name}]?, removing an uncommitted local policy cannot be reverted',\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n if answers['remove']:\n path.unlink(missing_ok=True)\n\n def view_policies(self, view_policies: ArkDPAViewPolicies) -> None:\n \"\"\"\n Allows the user to view one or more policies either together or individually, as defined in the CLI user prompt.\n Policies are viewed in the machine's default editor (both existing policies and newly generated policies).\n\n Args:\n view_policies (ArkDPAViewPolicies): _description_\n \"\"\"\n workspace_policies = self.__load_existing_policies_from_workspace()\n workspace_policies.update(self.__load_generated_policies_from_workspace())\n policy_names = view_policies.names\n if not policy_names:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to view?',\n choices=[p.policy_name for p in workspace_policies.values()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policy_names = answers['names']\n if not policy_names:\n return\n try:\n if view_policies.unified:\n inquirer.prompt(\n [inquirer.Editor('views', f'Show all selected {self._policies_family} policies')],\n answers={\n 'views': '\\n\\n\\n'.join(\n [f'# Policy [{policy_name}]\\n{workspace_policies[policy_name].json(indent=4)}' for policy_name in policy_names]\n )\n },\n render=ArkInquirerRender(),\n )\n else:\n inquirer.prompt(\n [inquirer.Editor(f'{policy_name}_view', f'Show [{policy_name}]') for policy_name in policy_names],\n render=ArkInquirerRender(),\n answers={f'{policy_name}_view': workspace_policies[policy_name].json(indent=4) for policy_name in policy_names},\n )\n except EditorError as ex:\n self._logger.error(\n f'An error occurred while trying to view the {self._policies_family} policies, '\n f'you can view the policies at [{self.__policies_cache_dir}] [{str(ex)}]'\n )\n\n def reset_policies(self, reset_policy: ArkDPAResetPolicies) -> None:\n \"\"\"\n Resets local workspace policies.\n When all policies are reset, all local policies are overwritten and deleted policies are removed.\n Otherwise, the user can select which policies are reset.\n This function does not alter newly generated uncommitted policies.\n\n Args:\n reset_policy (ArkDPAResetPolicies): _description_\n \"\"\"\n if reset_policy.all:\n answers = inquirer.prompt(\n [inquirer.Confirm('reset', message=f'Are you sure you want to reset all edited {self._policies_family} policies?')]\n )\n if not answers:\n return\n if answers['reset']:\n self.load_policies(ArkDPALoadPolicies(override=True))\n else:\n policies_diff = self.__load_policies_diff()\n removed_policies = self.__load_removed_policies_from_workspace()\n if not policies_diff and not removed_policies:\n return\n policy_names = reset_policy.names\n if not policy_names:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to reset?, press space to select',\n choices=[p for p in policies_diff.keys() + removed_policies.keys()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policy_names = answers['names']\n policy_names = [p for p in policy_names if p in policies_diff or p in removed_policies]\n for policy_name in policy_names:\n policy_path = Path(self.__policies_cache_dir) / (policy_name + '.json')\n if policy_name in policies_diff:\n policy_path.write_text(policies_diff[policy_name][1].json(indent=4))\n elif policy_name in removed_policies:\n policy_path.write_text(removed_policies[policy_name].json(indent=4))\n (Path(self.__policies_cache_dir) / (policy_name + '.json.removed')).unlink(missing_ok=True)\n\n def generate_policy(self, generate_policy: GeneratePolicyType) -> None:\n \"\"\"\n Generates a new policy from a template and the user's parameters.\n The user is prompted for the parameters when they are not specified in the CLI.\n After policy's parameters are defined, the policy is generates in memory and can bee edited.\n The new policy is saved locally until it is committed.\n\n Args:\n generate_policy (GeneratePolicyType): _description_\n \"\"\"\n workspace_policies = self.__load_existing_policies_from_workspace()\n workspace_policies.update(self.__load_generated_policies_from_workspace())\n policy = self._generate_policy(generate_policy, workspace_policies)\n policy_path = Path(self.__policies_cache_dir) / (policy.policy_name + '.json.generated')\n # Let the user edit the generated policy\n if not generate_policy.disable_edit:\n try:\n answers = inquirer.prompt(\n [\n inquirer.Editor(\n 'policy_editor',\n f'Newly {self._policies_family} policy is generated and ready to be edited, once edited, it will be saved to the local workspace',\n )\n ],\n render=ArkInquirerRender(),\n answers={'policy_editor': policy.json(indent=4, exclude_none=True)},\n )\n if not answers:\n return\n policy = self.__policy_type.parse_raw(answers['policy_editor'])\n except EditorError as ex:\n self._logger.error(\n f'An error occurred while trying to edit the {self._policies_family} policy, '\n f'the policy will be saved to [{policy_path}] and can be edited manually [{str(ex)}]'\n )\n policy_path.write_text(policy.json(indent=4))\n\n def policies_diff(self, policies_diff: ArkDPAPoliciesDiff) -> None:\n \"\"\"\n Calculates the diff between the local workspace and remote policies.\n This diff includes uncommitted removed policies. A unified or per policy diff can be displayed.\n\n Args:\n policies_diff (ArkDPAPoliciesDiff): _description_\n \"\"\"\n loaded_policies_diff = self.__load_policies_diff()\n removed_policies = self.__load_removed_policies_from_workspace()\n if not loaded_policies_diff and not removed_policies:\n return\n if policies_diff.names:\n loaded_policies_diff = {k: v for k, v in loaded_policies_diff.items() if k in policies_diff.names}\n removed_policies = {k: v for k, v in removed_policies.items() if k in policies_diff.names}\n if not loaded_policies_diff and not removed_policies:\n return\n diffs = {\n policy_name: difflib.unified_diff(\n policy_tuple[1].json(indent=4).splitlines(True),\n policy_tuple[0].json(indent=4).splitlines(True),\n fromfile=f'local policy [{policy_name}]',\n tofile=f'remote policy [{policy_name}]',\n n=MAX_LINE_DIFF,\n )\n for policy_name, policy_tuple in loaded_policies_diff.items()\n }\n diffs.update(\n {\n policy_name: difflib.unified_diff(\n policy.json(indent=4).splitlines(True),\n '',\n fromfile=f'local policy [{policy_name}]',\n tofile=f'remote policy [{policy_name}]',\n n=MAX_LINE_DIFF,\n )\n for policy_name, policy in removed_policies.items()\n }\n )\n try:\n if policies_diff.unified:\n inquirer.prompt(\n [inquirer.Editor('diffs', 'Show all diffs')],\n render=ArkInquirerRender(),\n answers={'diffs': '\\n\\n\\n'.join([''.join(d) for d in diffs.values()])},\n )\n else:\n inquirer.prompt(\n [inquirer.Editor(f'{policy_name}_diff', f'Show [{policy_name}] diff') for policy_name in diffs.keys()],\n render=ArkInquirerRender(),\n answers={f'{policy_name}_diff': ''.join(policy_diffs) for policy_name, policy_diffs in diffs.items()},\n )\n except EditorError as ex:\n self._logger.error(\n f'An error occurred while trying to show {self._policies_family} policies diff, '\n f'you can view the policies at [{self.__policies_cache_dir}] [{str(ex)}]'\n )\n\n def policies_status(self, get_policies_status: ArkDPAGetPoliciesStatus) -> ArkDPAPoliciesStatus:\n \"\"\"\n Gets the status of locally altered policies.\n\n Args:\n get_policies_status (ArkDPAGetPoliciesStatus): _description_\n\n Returns:\n ArkDPAPoliciesStatus: _description_\n \"\"\"\n loaded_policies_diff = self.__load_policies_diff()\n removed_policies = self.__load_removed_policies_from_workspace()\n generated_policies = self.__load_generated_policies_from_workspace()\n if get_policies_status.names:\n loaded_policies_diff = {k: v for k, v in loaded_policies_diff.items() if k in get_policies_status.names}\n removed_policies = {k: v for k, v in removed_policies.items() if k in get_policies_status.names}\n generated_policies = {k: v for k, v in generated_policies.items() if k in get_policies_status.names}\n return ArkDPAPoliciesStatus(\n modified_policies=list(loaded_policies_diff.keys()),\n removed_policies=list(removed_policies.keys()),\n added_policies=list(generated_policies.keys()),\n )\n\n def commit_policies(self, commit_policies: ArkDPACommitPolicies) -> None:\n \"\"\"\n Commits policies.\n The function first calculates the differences between the local and remote policies to find out which policies were edited, including\n the policies selected for deletion and new, uncommitted policies. It also\n allows selecting whether to commit all the edited policies or only specific policies by name.\n\n After all policies are committed, the workspace is reorganized accordingly.\n\n Args:\n commit_policies (ArkDPACommitPolicies): _description_\n \"\"\"\n loaded_policies_diff = self.__load_policies_diff()\n removed_policies = self.__load_removed_policies_from_workspace()\n generated_policies = self.__load_generated_policies_from_workspace()\n if not loaded_policies_diff and not removed_policies and not generated_policies:\n return\n if commit_policies.all:\n answers = inquirer.prompt(\n [inquirer.Confirm('reset', message=f'Are you sure you want to commit all edited {self._policies_family} policies?')]\n )\n if not answers or not answers['reset']:\n return\n else:\n if commit_policies.names:\n loaded_policies_diff = {k: v for k, v in loaded_policies_diff.items() if k in commit_policies.names}\n removed_policies = {k: v for k, v in removed_policies.items() if k in commit_policies.names}\n generated_policies = {k: v for k, v in generated_policies.items() if k in commit_policies.names}\n else:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to commit?, press space to select',\n choices=list(loaded_policies_diff.keys()) + list(removed_policies.keys()) + list(generated_policies.keys()),\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n loaded_policies_diff = {k: v for k, v in loaded_policies_diff.items() if k in answers['names']}\n removed_policies = {k: v for k, v in removed_policies.items() if k in answers['names']}\n generated_policies = {k: v for k, v in generated_policies.items() if k in answers['names']}\n if not loaded_policies_diff and not removed_policies and not generated_policies:\n return\n with ThreadPoolExecutor() as executor:\n added = executor.map(lambda p: self._add_policy(self.__add_policy_type(**p.dict())), generated_policies.values())\n updated = executor.map(lambda p: self._update_policy(self.__update_policy_type(**p[0].dict())), loaded_policies_diff.values())\n deleted = executor.map(\n lambda p: self._delete_policy(ArkDPADeletePolicy(policy_id=p.policy_id, policy_name=p.policy_name)),\n removed_policies.values(),\n )\n # Loop for exception checking\n added_policies = list(added)\n for _ in itertools.chain(updated, deleted):\n pass\n for policy_name in removed_policies.keys():\n (Path(self.__policies_cache_dir) / (policy_name + '.json.removed')).unlink(missing_ok=True)\n for policy_name in generated_policies.keys():\n for policy in added_policies:\n if policy.policy_name == policy_name:\n (Path(self.__policies_cache_dir) / (policy_name + '.json.generated')).rename(\n (Path(self.__policies_cache_dir) / (policy_name + '.json'))\n )\n (Path(self.__policies_cache_dir) / (policy_name + '.json')).write_text(policy.json(indent=4))" }, { "identifier": "ArkProfile", "path": "ark_sdk_python/models/ark_profile.py", "snippet": "class ArkProfile(ArkModel):\n profile_name: str = Field(default='ark', alias='Profile Name', description='Profile name for storage')\n profile_description: str = Field(default='Default Ark Profile', alias='Profile Description', description='Info about the profile')\n auth_profiles: Dict[str, ArkAuthProfile] = Field(\n description='Authentication profiles configurations, map from name of the authenticator to its profile', default_factory=dict\n )\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('auth_profiles', pre=True)\n def validate_auth_profiles(cls, val):\n auth_profiles = {}\n for k, v in val.items():\n auth_profile = ArkAuthProfile.parse_obj(v)\n # Make sure that the settings are parsed with the correct class\n # Due to properties overlapping\n if 'auth_method_settings' in v:\n auth_profile.auth_method_settings = ArkAuthMethodSettingsMap[auth_profile.auth_method].parse_obj(v['auth_method_settings'])\n auth_profiles[k] = auth_profile\n return auth_profiles" }, { "identifier": "ArkDPAVMGeneratePolicy", "path": "ark_sdk_python/models/cli_services/dpa/policies_editor/vm/ark_dpa_vm_generate_policy.py", "snippet": "class ArkDPAVMGeneratePolicy(ArkDPABaseGeneratePolicy):\n providers: Optional[Set[Literal['AWS', 'Azure', 'OnPrem']]] = Field(description='Providers to generate the policy for')\n protocols: Optional[Set[Literal['ssh', 'rdp']]] = Field(description='Protocols to generate the policy for')" }, { "identifier": "ArkProtocolType", "path": "ark_sdk_python/models/common/ark_protocol_type.py", "snippet": "class ArkProtocolType(str, MultiValueEnum):\n SSH = 'ssh', 'SSH'\n SCP = 'scp', 'SCP'\n SFTP = 'sftp', 'SFTP'\n RDP = 'rdp', 'RDP'\n CLI = 'cli', 'CLI'\n CONSOLE = 'console', 'Console'\n HTTPS = 'https', 'HTTPS'\n K8S = 'K8S', 'k8s'\n DB = 'Database', 'database', 'DATABASE'" }, { "identifier": "ArkWorkspaceType", "path": "ark_sdk_python/models/common/ark_workspace_type.py", "snippet": "class ArkWorkspaceType(str, MultiValueEnum):\n AWS = 'aws', 'AWS', 'Aws'\n AZURE = 'azure', 'AZURE', 'Azure'\n ONPREM = 'onprem', 'ON-PREMISE', 'OnPrem'\n DB = 'db', 'DATABASES', 'Databases'\n GCP = 'gcp', 'GCP'\n MYSQL = 'mysql', 'MySQL'\n MARIADB = 'mariadb', 'MariaDB'\n MSSQL = 'mssql', 'MSSQL'\n ORACLE = 'oracle', 'Oracle'\n POSTGRES = 'postgres', 'Postgres'\n FAULT = 'fault', 'FAULT'\n UNKNOWN = 'unknown', 'UNKNOWN', 'Unknown'" }, { "identifier": "ArkServiceConfig", "path": "ark_sdk_python/models/services/ark_service_config.py", "snippet": "class ArkServiceConfig(ArkModel):\n service_name: str = Field(description='Name of the service')\n required_authenticator_names: List[str] = Field(description='Required authenticators for the service to properly work')\n optional_authenticator_names: List[str] = Field(\n description='Optional authenticators for the service for extra capabilities', default_factory=list\n )" }, { "identifier": "ArkDPADeletePolicy", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_delete_policy.py", "snippet": "class ArkDPADeletePolicy(ArkModel):\n policy_id: Optional[str] = Field(description='Policy id to delete')\n policy_name: Optional[str] = Field(description='Policy name to delete')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'policy_id' not in values and 'policy_name' not in values:\n raise ValueError('Either policy id or policy name needs to be provided')\n return values" }, { "identifier": "ArkDPAGetPolicy", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_get_policy.py", "snippet": "class ArkDPAGetPolicy(ArkModel):\n policy_id: Optional[str] = Field(description='Policy id to get')\n policy_name: Optional[str] = Field(description='Policy name to get')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'policy_id' not in values and 'policy_name' not in values:\n raise ValueError('Either policy id or policy name needs to be provided')\n return values" }, { "identifier": "ArkDPARuleStatus", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_rule_status.py", "snippet": "class ArkDPARuleStatus(str, Enum):\n Enabled = 'Enabled'\n Disabled = 'Disabled'\n Draft = 'Draft'\n Expired = 'Expired'" }, { "identifier": "ArkDPAUserData", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_user_data.py", "snippet": "class ArkDPAUserData(ArkCamelizedModel):\n roles: Optional[List[Union[str, ArkDPAUserDataAttribute]]] = Field(description='Roles allowed for auth rule', default_factory=list)\n groups: Optional[List[Union[str, ArkDPAUserDataAttribute]]] = Field(description='Groups allowed for auth rule', default_factory=list)\n users: Optional[List[Union[str, ArkDPAUserDataAttribute]]] = Field(description='Users allowed for auth rule', default_factory=list)" }, { "identifier": "ArkDPAVMAddPolicy", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_add_policy.py", "snippet": "class ArkDPAVMAddPolicy(ArkDPABaseAddPolicy):\n providers_data: Optional[ArkDPAVMProvidersDict] = Field(\n description='Workspaces / cloud providers data per type of cloud provider, '\n 'for example for AWS, how to filter ec2 instances to connect to'\n )\n user_access_rules: Optional[List[ArkDPAVMAuthorizationRule]] = Field(\n description='Rules describing how and who will be able to connect to the target instances filtered by the cloud providers'\n )\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('providers_data', pre=True)\n def validate_providers_data(cls, val):\n if val is not None:\n for k in val.keys():\n val[k]['providerName'] = serialize_dpa_vm_policies_workspace_type(ArkWorkspaceType(k))\n if ArkWorkspaceType(k) not in [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM]:\n raise ValueError('Invalid Platform / Workspace Type')\n return val" }, { "identifier": "ArkDPAVMAuthorizationRule", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_authorization_rule.py", "snippet": "class ArkDPAVMAuthorizationRule(ArkDPABaseAuthorizationRule):\n connection_information: ArkDPAVMConnectionInformation = Field(description='Rule information on how access is made')" }, { "identifier": "ArkDPAVMConnectionInformation", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_authorization_rule.py", "snippet": "class ArkDPAVMConnectionInformation(ArkDPABaseConnectionInformation):\n connect_as: ArkDPAVMProvidersConnectionDict = Field(description='In which fashion the connection is made')\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('connect_as')\n def validate_connect_as(cls, val):\n for k, v in val.items():\n if ArkWorkspaceType(k) not in [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM]:\n raise ValueError('Invalid Platform / Workspace Type')\n for k2 in v.keys():\n if ArkProtocolType(k2) not in [\n ArkProtocolType.SSH,\n ArkProtocolType.RDP,\n ArkProtocolType.SFTP,\n ArkProtocolType.SCP,\n ArkProtocolType.HTTPS,\n ]:\n raise ValueError('Invalid connection type')\n return val" }, { "identifier": "ArkDPAVMConnectionDataType", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_connection_data.py", "snippet": "class ArkDPAVMConnectionMethodData(ArkCamelizedModel):\nclass ArkDPAVMLocalEphemeralUserConnectionMethodData(ArkDPAVMConnectionMethodData):\nclass ArkDPAVMRDPLocalEphemeralUserConnectionData(ArkCamelizedModel):" }, { "identifier": "ArkDPAVMPolicy", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_policy.py", "snippet": "class ArkDPAVMPolicy(ArkDPABasePolicy):\n providers_data: Optional[ArkDPAVMProvidersDict] = Field(description='Cloud providers info of the policy')\n user_access_rules: Optional[List[ArkDPAVMAuthorizationRule]] = Field(description='Authorization rules of the policy')\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('providers_data', pre=True)\n def validate_providers_data(cls, val):\n if val is not None:\n for k in val.keys():\n val[k]['providerName'] = serialize_dpa_vm_policies_workspace_type(ArkWorkspaceType(k))\n if ArkWorkspaceType(k) not in [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM]:\n raise ValueError('Invalid Platform / Workspace Type')\n return val" }, { "identifier": "ArkDPAVMPolicyListItem", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_policy_list_item.py", "snippet": "class ArkDPAVMPolicyListItem(ArkDPABasePolicyListItem):\n platforms: Optional[List[ArkWorkspaceType]] = Field(description='Names of the platforms of the policy')\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('platforms')\n def validate_platforms(cls, val):\n if val is not None:\n for plat in val:\n if ArkWorkspaceType(plat) not in [\n ArkWorkspaceType.AWS,\n ArkWorkspaceType.AZURE,\n ArkWorkspaceType.GCP,\n ArkWorkspaceType.ONPREM,\n ]:\n raise ValueError('Invalid Platform / Workspace Type')\n return val" }, { "identifier": "ArkDPAVMAWSProviderData", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_providers.py", "snippet": "class ArkDPAVMAWSProviderData(ArkCamelizedModel):\nclass ArkDPAVMAzureProviderData(ArkCamelizedModel):\nclass ArkDPAVMGCPProviderData(ArkCamelizedModel):\nclass ArkDPAVMFQDNRulesConjunction(str, Enum):\nclass ArkDPAVMFQDNOperator(str, Enum):\nclass ArkDPAVMFQDNRule(ArkCamelizedModel):\nclass ArkDPAVMOnPremProviderData(ArkCamelizedModel):\n AND = 'AND'\n OR = 'OR'\n EXACTLY = 'EXACTLY'\n WILDCARD = 'WILDCARD'\n PREFIX = 'PREFIX'\n SUFFIX = 'SUFFIX'\n CONTAINS = 'CONTAINS'" }, { "identifier": "ArkDPAVMUpdatePolicy", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_update_policy.py", "snippet": "class ArkDPAVMUpdatePolicy(ArkDPABaseUpdatePolicy):\n providers_data: Optional[ArkDPAVMProvidersDict] = Field(description='New cloud providers to update')\n user_access_rules: Optional[List[ArkDPAVMAuthorizationRule]] = Field(description='New access rules to update')\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('providers_data', pre=True)\n def validate_providers_data(cls, val):\n if val is not None:\n for k in val.keys():\n val[k]['providerName'] = serialize_dpa_vm_policies_workspace_type(ArkWorkspaceType(k))\n if ArkWorkspaceType(k) not in [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM]:\n raise ValueError('Invalid Platform / Workspace Type')\n return val" }, { "identifier": "ArkDPAVMPoliciesService", "path": "ark_sdk_python/services/dpa/policies/vm/ark_dpa_vm_policies_service.py", "snippet": "class ArkDPAVMPoliciesService(ArkService):\n def __init__(self, isp_auth: ArkISPAuth) -> None:\n super().__init__(isp_auth)\n self.__isp_auth = isp_auth\n self.__client: ArkISPServiceClient = ArkISPServiceClient.from_isp_auth(self.__isp_auth, 'dpa')\n\n @property\n def isp_client(self) -> ArkISPServiceClient:\n return self.__client\n\n def __policy_id_by_name(self, policy_name: str) -> str:\n policies = self.list_policies_by(ArkDPAVMPoliciesFilter(name=policy_name))\n if not policies:\n raise ArkServiceException(f'Failed to find vm policy id by name [{policy_name}]')\n return policies[0].policy_id\n\n @staticmethod\n def __serialize_providers_dict(providers_data: ArkDPAVMProvidersDict) -> Dict:\n serialized_providers_data = {}\n for k in list(providers_data.keys()):\n serialized_providers_data[serialize_dpa_vm_policies_workspace_type(k)] = providers_data[k].dict(by_alias=True)\n return serialized_providers_data\n\n @staticmethod\n def __serialize_authorization_rules_dict(authorization_rules: List[Dict]) -> None:\n for rule in authorization_rules:\n for k in list(rule['connectionInformation']['connectAs'].keys()):\n for pk in list(rule['connectionInformation']['connectAs'][k].keys()):\n item = rule['connectionInformation']['connectAs'][k][pk]\n del rule['connectionInformation']['connectAs'][k][pk]\n rule['connectionInformation']['connectAs'][k][serialize_dpa_vm_policies_protocol_type(pk)] = item\n item = rule['connectionInformation']['connectAs'][k]\n del rule['connectionInformation']['connectAs'][k]\n rule['connectionInformation']['connectAs'][serialize_dpa_vm_policies_workspace_type(k)] = item\n\n def add_policy(self, add_policy: ArkDPAVMAddPolicy) -> ArkDPAVMPolicy:\n \"\"\"\n Adds a new VM policy with the specified information.\n\n Args:\n add_policy (ArkDPVMAAddPolicy): _description_\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n ArkDPAVMPolicy: _description_\n \"\"\"\n self._logger.info(f'Adding new vm policy [{add_policy.policy_name}]')\n add_policy_dict = add_policy.dict(by_alias=True)\n add_policy_dict['providersData'] = self.__serialize_providers_dict(add_policy.providers_data)\n self.__serialize_authorization_rules_dict(add_policy_dict['userAccessRules'])\n resp: Response = self.__client.post(VM_POLICIES_API, json=add_policy_dict)\n if resp.status_code == HTTPStatus.CREATED:\n try:\n policy_id = resp.json()['policyId']\n return self.policy(ArkDPAGetPolicy(policy_id=policy_id))\n except (ValidationError, JSONDecodeError, KeyError) as ex:\n self._logger.exception(f'Failed to parse add vm policy response [{str(ex)}] - [{resp.text}]')\n raise ArkServiceException(f'Failed to parse add vm policy response [{str(ex)}]') from ex\n raise ArkServiceException(f'Failed to add vm policy [{resp.text}] - [{resp.status_code}]')\n\n def delete_policy(self, delete_policy: ArkDPADeletePolicy) -> None:\n \"\"\"\n Deletes the specified (ID or name) VM policy.\n\n Args:\n delete_policy (ArkDPADeletePolicy): _description_\n\n Raises:\n ArkServiceException: _description_\n \"\"\"\n if delete_policy.policy_name and not delete_policy.policy_id:\n delete_policy.policy_id = self.__policy_id_by_name(delete_policy.policy_name)\n self._logger.info(f'Deleting vm policy [{delete_policy.policy_id}]')\n resp: Response = self.__client.delete(VM_POLICY_API.format(policy_id=delete_policy.policy_id))\n if resp.status_code != HTTPStatus.NO_CONTENT:\n raise ArkServiceException(f'Failed to delete vm policy [{resp.text}] - [{resp.status_code}]')\n\n def update_policy(self, update_policy: ArkDPAVMUpdatePolicy) -> ArkDPAVMPolicy:\n \"\"\"\n Updates a VM policy.\n\n Args:\n update_policy (ArkDPAVMUpdatePolicy): _description_\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n ArkDPAVMPolicy: _description_\n \"\"\"\n if update_policy.policy_name and not update_policy.policy_id:\n update_policy.policy_id = self.__policy_id_by_name(update_policy.policy_name)\n self._logger.info(f'Updating vm policy [{update_policy.policy_id}]')\n update_dict = json.loads(update_policy.json(by_alias=True, exclude_none=True, exclude={'new_policy_name', 'policy_name'}))\n if update_policy.new_policy_name:\n update_dict['policyName'] = update_policy.new_policy_name\n else:\n update_dict['policyName'] = update_policy.policy_name\n if update_policy.providers_data:\n update_dict['providersData'] = self.__serialize_providers_dict(update_policy.providers_data)\n if 'userAccessRules' in update_dict:\n self.__serialize_authorization_rules_dict(update_dict['userAccessRules'])\n resp: Response = self.__client.put(VM_POLICY_API.format(policy_id=update_policy.policy_id), json=update_dict)\n if resp.status_code == HTTPStatus.OK:\n try:\n return ArkDPAVMPolicy.parse_obj(resp.json())\n except (ValidationError, JSONDecodeError) as ex:\n self._logger.exception(f'Failed to parse update vm policy response [{str(ex)}] - [{resp.text}]')\n raise ArkServiceException(f'Failed to parse update vm policy response [{str(ex)}]') from ex\n raise ArkServiceException(f'Failed to update vm policy [{resp.text}] - [{resp.status_code}]')\n\n def update_policy_status(self, update_policy_status: ArkDPAUpdatePolicyStatus) -> ArkDPAVMPolicy:\n \"\"\"\n Updates the status of the specified (by ID) VM policy.\n\n Args:\n update_policy_status (ArkDPAUpdatePolicyStatus): _description_\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n ArkDPAVMPolicy: _description_\n \"\"\"\n if update_policy_status.policy_name and not update_policy_status.policy_id:\n update_policy_status.policy_id = self.__policy_id_by_name(update_policy_status.policy_name)\n self._logger.info(f'Updating vm policy status [{update_policy_status.policy_id}]')\n resp: Response = self.__client.put(\n VM_UPDATE_POLICY_STATUS_API.format(policy_id=update_policy_status.policy_id),\n json=update_policy_status.dict(exclude={'policy_id'}),\n )\n if resp.status_code == HTTPStatus.OK:\n return self.policy(ArkDPAGetPolicy(policy_id=update_policy_status.policy_id))\n raise ArkServiceException(f'Failed to update vm policy status [{resp.text}] - [{resp.status_code}]')\n\n def list_policies(self) -> List[ArkDPAVMPolicyListItem]:\n \"\"\"\n Lists all of the tenants's VM policies.\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n List[ArkDPAVMPolicyListItem]: _description_\n \"\"\"\n self._logger.info('Retrieving all vm policies')\n resp: Response = self.__client.get(VM_POLICIES_API)\n if resp.status_code == HTTPStatus.OK:\n try:\n return parse_obj_as(List[ArkDPAVMPolicyListItem], resp.json()['items'])\n except (ValidationError, JSONDecodeError, KeyError) as ex:\n self._logger.exception(f'Failed to parse list vm policies response [{str(ex)}] - [{resp.text}]')\n raise ArkServiceException(f'Failed to parse list vm policies response [{str(ex)}]') from ex\n raise ArkServiceException(f'Failed to list vm policies [{resp.text}] - [{resp.status_code}]')\n\n def list_policies_by(self, policies_filter: ArkDPAVMPoliciesFilter) -> List[ArkDPAVMPolicyListItem]:\n \"\"\"\n Lists VM policies that match the specified filters.\n\n Args:\n policies_filter (ArkDPAVMPoliciesFilter): _description_\n\n Returns:\n List[ArkDPAVMPolicyListItem]: _description_\n \"\"\"\n self._logger.info(f'Retrieving vm policies by filter [{policies_filter}]')\n policies = self.list_policies()\n\n # Filter by statuses\n if policies_filter.statuses:\n policies = [p for p in policies if p.status in policies_filter.statuses]\n\n # Filter by name wildcard\n if policies_filter.name:\n policies = [p for p in policies if fnmatch(p.policy_name, policies_filter.name)]\n\n # Filter by cloud providers\n if policies_filter.providers:\n policies = [p for p in policies if all(cp.value in p.platforms for cp in policies_filter.providers)]\n\n return policies\n\n def policy(self, get_policy: ArkDPAGetPolicy) -> ArkDPAVMPolicy:\n \"\"\"\n Retrieves a VM policy by ID.\n\n Args:\n get_policy (ArkDPAGetPolicy): _description_\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n ArkDPAVMPolicy: _description_\n \"\"\"\n if get_policy.policy_name and not get_policy.policy_id:\n get_policy.policy_id = self.__policy_id_by_name(get_policy.policy_name)\n self._logger.info(f'Retrieving vm policy [{get_policy.policy_id}]')\n resp: Response = self.__client.get(VM_POLICY_API.format(policy_id=get_policy.policy_id))\n if resp.status_code == HTTPStatus.OK:\n try:\n return ArkDPAVMPolicy.parse_obj(resp.json())\n except (ValidationError, JSONDecodeError) as ex:\n self._logger.exception(f'Failed to parse vm policy response [{str(ex)}] - [{resp.text}]')\n raise ArkServiceException(f'Failed to parse vm policy response [{str(ex)}]') from ex\n raise ArkServiceException(f'Failed to retrieve vm policy [{get_policy.policy_id}] [{resp.text}] - [{resp.status_code}]')\n\n def policies_stats(self) -> ArkDPAVMPoliciesStats:\n \"\"\"\n Calculates VM policy statistics.\n\n Returns:\n ArkDPAVMPoliciesStats: _description_\n \"\"\"\n self._logger.info('Calculating vm policies stats')\n policies = self.list_policies()\n policies_stats = ArkDPAVMPoliciesStats.construct()\n policies_stats.policies_count = len(policies)\n\n # Count policies per status\n status_types: Set[ArkDPARuleStatus] = {p.status for p in policies if p.status}\n policies_stats.policies_count_per_status = {st: len([p for p in policies if p.status and p.status == st]) for st in status_types}\n\n # Count policies per platforms\n policies_stats.policies_count_per_provider = {}\n for policy in policies:\n for platform in policy.platforms:\n if platform not in policies_stats.policies_count_per_provider:\n policies_stats.policies_count_per_provider[platform] = 0\n policies_stats.policies_count_per_provider[platform] += 1\n\n return policies_stats\n\n @staticmethod\n @overrides\n def service_config() -> ArkServiceConfig:\n return SERVICE_CONFIG" } ]
from datetime import date, timedelta from typing import Dict, Final, List, Optional from overrides import overrides from ark_sdk_python.args.ark_args_formatter import ArkInquirerRender from ark_sdk_python.auth.ark_isp_auth import ArkISPAuth from ark_sdk_python.cli_services.dpa.common.ark_dpa_base_policies_editor_service import ArkDPABasePoliciesEditorService from ark_sdk_python.models.ark_profile import ArkProfile from ark_sdk_python.models.cli_services.dpa.policies_editor.vm import ArkDPAVMGeneratePolicy from ark_sdk_python.models.common import ArkProtocolType, ArkWorkspaceType from ark_sdk_python.models.services import ArkServiceConfig from ark_sdk_python.models.services.dpa.policies.common import ArkDPADeletePolicy, ArkDPAGetPolicy, ArkDPARuleStatus, ArkDPAUserData from ark_sdk_python.models.services.dpa.policies.vm import ( ArkDPAVMAddPolicy, ArkDPAVMAuthorizationRule, ArkDPAVMAWSProviderData, ArkDPAVMAzureProviderData, ArkDPAVMConnectionDataType, ArkDPAVMConnectionInformation, ArkDPAVMFQDNOperator, ArkDPAVMFQDNRule, ArkDPAVMFQDNRulesConjunction, ArkDPAVMGCPProviderData, ArkDPAVMLocalEphemeralUserConnectionMethodData, ArkDPAVMOnPremProviderData, ArkDPAVMPolicy, ArkDPAVMPolicyListItem, ArkDPAVMProvider, ArkDPAVMRDPLocalEphemeralUserConnectionData, ArkDPAVMUpdatePolicy, ) from ark_sdk_python.services.dpa.policies.vm.ark_dpa_vm_policies_service import ArkDPAVMPoliciesService import inquirer
14,456
SERVICE_CONFIG: Final[ArkServiceConfig] = ArkServiceConfig( service_name='dpa-policies-vm-editor', required_authenticator_names=['isp'], optional_authenticator_names=[] ) DEFAULT_GENERATED_POLICY: Final[ArkDPAVMPolicy] = ArkDPAVMPolicy( policy_name='Default VM Policy', status=ArkDPARuleStatus.Draft, description='Auto generated vm policy', providers_data={}, start_date=date.today().strftime('%Y-%m-%d'), end_date=(date.today() + timedelta(days=7)).strftime('%Y-%m-%d'), user_access_rules=[], ) DEFAULT_GENERATED_AUTHORIZATION_RULE: Final[ArkDPAVMAuthorizationRule] = ArkDPAVMAuthorizationRule( rule_name='Default VM Rule', user_data=ArkDPAUserData(roles=['DpaAdmin'], groups=[], users=[]), connection_information=ArkDPAVMConnectionInformation( connect_as={}, grant_access=2, idle_time=10, days_of_week=[], full_days=True, hours_from='07:00', hours_to='17:00', time_zone='Asia/Jerusalem', ), ) DEFAULT_GENERATED_PROVIDERS: Final[Dict[ArkWorkspaceType, ArkDPAVMProvider]] = { ArkWorkspaceType.AWS: ArkDPAVMAWSProviderData(regions=[], tags=[{'key': 'value'}], vpc_ids=[], account_ids=[]), ArkWorkspaceType.AZURE: ArkDPAVMAzureProviderData( regions=[], tags=[{'key': 'value'}], resource_groups=[], vnet_ids=[], subscriptions=[] ), ArkWorkspaceType.GCP: ArkDPAVMGCPProviderData(regions=[], tags=[{'key': 'value'}], network_ids=[], projects=[]), ArkWorkspaceType.ONPREM: ArkDPAVMOnPremProviderData( fqdn_rules_conjunction=ArkDPAVMFQDNRulesConjunction.OR, fqdn_rules=[ArkDPAVMFQDNRule(operator=ArkDPAVMFQDNOperator.WILDCARD, computername_pattern='*', domain='default.com')], ), } DEFAULT_GENERATED_PROTOCOLS: Final[Dict[ArkProtocolType, ArkDPAVMConnectionDataType]] = { ArkProtocolType.SSH: 'root', ArkProtocolType.RDP: ArkDPAVMRDPLocalEphemeralUserConnectionData( local_ephemeral_user=ArkDPAVMLocalEphemeralUserConnectionMethodData(assign_groups={'Administrators'}) ), } SUPPORTED_SSH_PROTOCOL_PROVIDERS: Final[ArkWorkspaceType] = [ ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM, ] SUPPORTED_RDP_PROTOCOL_PROVIDERS: Final[ArkWorkspaceType] = [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.ONPREM] class ArkDPAVMPoliciesEditorService(
SERVICE_CONFIG: Final[ArkServiceConfig] = ArkServiceConfig( service_name='dpa-policies-vm-editor', required_authenticator_names=['isp'], optional_authenticator_names=[] ) DEFAULT_GENERATED_POLICY: Final[ArkDPAVMPolicy] = ArkDPAVMPolicy( policy_name='Default VM Policy', status=ArkDPARuleStatus.Draft, description='Auto generated vm policy', providers_data={}, start_date=date.today().strftime('%Y-%m-%d'), end_date=(date.today() + timedelta(days=7)).strftime('%Y-%m-%d'), user_access_rules=[], ) DEFAULT_GENERATED_AUTHORIZATION_RULE: Final[ArkDPAVMAuthorizationRule] = ArkDPAVMAuthorizationRule( rule_name='Default VM Rule', user_data=ArkDPAUserData(roles=['DpaAdmin'], groups=[], users=[]), connection_information=ArkDPAVMConnectionInformation( connect_as={}, grant_access=2, idle_time=10, days_of_week=[], full_days=True, hours_from='07:00', hours_to='17:00', time_zone='Asia/Jerusalem', ), ) DEFAULT_GENERATED_PROVIDERS: Final[Dict[ArkWorkspaceType, ArkDPAVMProvider]] = { ArkWorkspaceType.AWS: ArkDPAVMAWSProviderData(regions=[], tags=[{'key': 'value'}], vpc_ids=[], account_ids=[]), ArkWorkspaceType.AZURE: ArkDPAVMAzureProviderData( regions=[], tags=[{'key': 'value'}], resource_groups=[], vnet_ids=[], subscriptions=[] ), ArkWorkspaceType.GCP: ArkDPAVMGCPProviderData(regions=[], tags=[{'key': 'value'}], network_ids=[], projects=[]), ArkWorkspaceType.ONPREM: ArkDPAVMOnPremProviderData( fqdn_rules_conjunction=ArkDPAVMFQDNRulesConjunction.OR, fqdn_rules=[ArkDPAVMFQDNRule(operator=ArkDPAVMFQDNOperator.WILDCARD, computername_pattern='*', domain='default.com')], ), } DEFAULT_GENERATED_PROTOCOLS: Final[Dict[ArkProtocolType, ArkDPAVMConnectionDataType]] = { ArkProtocolType.SSH: 'root', ArkProtocolType.RDP: ArkDPAVMRDPLocalEphemeralUserConnectionData( local_ephemeral_user=ArkDPAVMLocalEphemeralUserConnectionMethodData(assign_groups={'Administrators'}) ), } SUPPORTED_SSH_PROTOCOL_PROVIDERS: Final[ArkWorkspaceType] = [ ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM, ] SUPPORTED_RDP_PROTOCOL_PROVIDERS: Final[ArkWorkspaceType] = [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.ONPREM] class ArkDPAVMPoliciesEditorService(
ArkDPABasePoliciesEditorService[ArkDPAVMPolicy, ArkDPAVMPolicyListItem, ArkDPAVMAddPolicy, ArkDPAVMUpdatePolicy, ArkDPAVMGeneratePolicy]
19
2023-11-13 09:24:31+00:00
24k
kampta/asic
train.py
[ { "identifier": "Logger", "path": "commons/logger.py", "snippet": "class Logger(SummaryWriter):\n\n def __init__(self, results_path, log_to_tb=False, log_to_wandb=True):\n super().__init__(results_path)\n self.results_path = results_path\n self.log_to_tb = log_to_tb\n self.log_to_wandb = log_to_wandb\n\n def _log_image_grid(self, images, logging_name, prefix, itr, range=(-1, 1),\n scale_each=False, nrow=None, **kwargs):\n nrow = max(1, int(len(images) ** 0.5+0.5)) if nrow is None else nrow\n if type(images[0]) is torch.Tensor:\n ndarr = images2grid(images, return_as_PIL=True, nrow=nrow,\n normalize=True, value_range=range,\n scale_each=scale_each, **kwargs)\n grid = Image.fromarray(ndarr)\n grid.save(f\"{self.results_path}/{logging_name}_{str(itr).zfill(7)}.png\")\n if self.log_to_wandb:\n wandb.log({logging_name: wandb.Image(grid)}, step=itr)\n else:\n grid = concat_v(*images)\n grid.save(f\"{self.results_path}/{logging_name}_{str(itr).zfill(7)}.png\")\n if self.log_to_wandb:\n wandb.log({logging_name: [wandb.Image(im) for im in images]}, step=itr)\n\n if self.log_to_tb:\n self.add_image(f\"{prefix}/{logging_name}\", ndarr, itr,\n dataformats='HWC')\n\n def log_image_grid(self, images, logging_name, itr, imgs_to_show,\n log_mean_img=True, mean_range=None, range=(-1, 1),\n scale_each=False, num_heads=1, nrow=None, **kwargs):\n self._log_image_grid(images[:imgs_to_show], logging_name, \"grids\", itr,\n range=range, scale_each=scale_each, nrow=nrow, **kwargs)\n if log_mean_img: # Log average images:\n images = images.reshape(images.size(0) // num_heads, num_heads,\n *images.size()[1:])\n self._log_image_grid(images.mean(dim=0), f'mean_{logging_name}',\n \"means\", itr, range=mean_range,\n scale_each=True, nrow=nrow)\n\n def add_scalar(self, tag, scalar_value, global_step=None, **kwargs):\n if self.log_to_wandb:\n wandb.log({tag: scalar_value}, step=global_step)\n return super().add_scalar(tag, scalar_value, global_step, **kwargs)\n\n def add_scalars(self, main_tag, tag_scalar_dict, global_step=None, **kwargs):\n if self.log_to_wandb:\n wandb.log(tag_scalar_dict, step=global_step)\n return super().add_scalars(main_tag, tag_scalar_dict, global_step, **kwargs)" }, { "identifier": "log_visuals", "path": "commons/logger.py", "snippet": "@torch.inference_mode()\ndef log_visuals(canon, stn, dset, train_idx, writer, vis_sample=2,\n vis_denseres=32):\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n pseudo_kps = dset.pseudo_kps\n parts = dset.parts\n vis_sample = min(vis_sample, len(dset))\n res = dset.img_size\n has_gt_kp = dset.kps is not None\n has_fixed_pairs = dset.fixed_pairs is not None # SPair\n\n # Run full test dataloader (assuming small dataset)\n all_imgs = dset.imgs\n all_masks = dset.masks\n all_kps = dset.kps\n all_flows, _ = stn(all_imgs)\n\n if has_gt_kp:\n kps_cols = torch.from_numpy(get_colors(all_kps.size(1))).float()\n kps_cols = map_minmax(kps_cols, 0, 1, -1, 1).to(device).unsqueeze(0)\n\n parts_cols = torch.from_numpy(get_colors(dset.num_parts+1)).float()\n parts_cols = map_minmax(parts_cols, 0, 1, -1, 1).to(device)\n parts_cols[-1] = 0\n\n # Text logging\n text_kp, text_kp_col = load_text_points('CVPR')\n text_kp = text_kp.to(device).unsqueeze(0)\n text_kp_col = text_kp_col.to(device).unsqueeze(0)\n\n pairs = sample_tuples(len(dset), count=vis_sample, seed=0)\n src_idx, trg_idx = pairs[:, 0], pairs[:, 1]\n\n # Log only once during the training\n if train_idx == 0:\n # Log images and the mask\n writer.log_image_grid(all_imgs[:vis_sample], 'img', train_idx,\n vis_sample, nrow=vis_sample)\n writer.log_image_grid(all_imgs[:vis_sample]*all_masks[:vis_sample],\n 'img_mask', train_idx, vis_sample, nrow=vis_sample)\n\n # Log neural best buddies (sparse)\n kp1 = pseudo_kps[src_idx, trg_idx]\n kp2 = pseudo_kps[trg_idx, src_idx]\n kp_vis = kp1[..., -1] * kp2[..., -1]\n kp1, kp2 = kp1[..., :2], kp2[..., :2]\n colors = map_minmax(get_dense_colors(kp1), 0, 1, -1, 1)\n\n blend_src = splat_points(\n all_imgs[src_idx], kp1, sigma=3., opacity=1.0, colors=colors,\n alpha_channel=kp_vis.unsqueeze(-1))\n blend_trg = splat_points(\n all_imgs[trg_idx], kp2, sigma=3., opacity=1.0, colors=colors,\n alpha_channel=kp_vis.unsqueeze(-1))\n stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)\n\n writer.log_image_grid(stacked, 'kp_pseudo_gt', train_idx, 2*vis_sample,\n log_mean_img=False, nrow=2)\n\n # Log parts\n parts_img = parts_cols[parts[:vis_sample]].permute(0, 3, 1, 2)\n writer.log_image_grid(parts_img, 'parts', train_idx, vis_sample,\n nrow=vis_sample, log_mean_img=False)\n\n # Log groundtruth kp\n if has_gt_kp:\n kp1, kp2 = all_kps[src_idx], all_kps[trg_idx]\n kp_vis = kp1[..., -1] * kp2[..., -1]\n kp1, kp2 = kp1[..., :2], kp2[..., :2]\n\n colors = kps_cols.expand(vis_sample, -1, -1)\n blend_src = splat_points(\n all_imgs[src_idx], kp1, sigma=3., opacity=1.0, colors=colors,\n alpha_channel=kp_vis.unsqueeze(-1))\n blend_trg = splat_points(\n all_imgs[trg_idx], kp2, sigma=3., opacity=1.0, colors=colors,\n alpha_channel=kp_vis.unsqueeze(-1))\n stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)\n\n stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)\n writer.log_image_grid(stacked, 'kp_gt', train_idx, 2*vis_sample,\n log_mean_img=False, nrow=2)\n\n # Log kp and top predictions by STN (if kp are available)\n if has_gt_kp:\n kp1 = all_kps[src_idx][..., :2]\n kp_vis = all_kps[src_idx][..., 2]\n\n kp_pred = stn.transfer_points(\n kp1, src_idx, trg_idx, all_flows, mask=all_masks, res=res, is_flow=True)\n colors = kps_cols.expand(vis_sample, -1, -1)\n\n blend_src = splat_points(\n all_imgs[src_idx], kp1, sigma=3., opacity=1.0,\n colors=colors, alpha_channel=kp_vis.unsqueeze(-1))\n blend_trg = splat_points(\n all_imgs[trg_idx], kp_pred.float(), sigma=3., opacity=1.0,\n colors=colors, alpha_channel=kp_vis.unsqueeze(-1))\n\n stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)\n writer.log_image_grid(stacked, 'kp_pred_sparse', train_idx,\n 2*vis_sample, log_mean_img=False, nrow=2)\n\n # Log current canon image\n canon_grid = canon.get_grid(vis_sample)\n if canon_grid.size(1) > 3:\n canon_grid = canon_grid[:, :3]\n scale_factor = res / canon_grid.size(-1)\n canon_grid = F.interpolate(\n canon_grid, scale_factor=scale_factor, mode='bilinear')\n writer.log_image_grid(canon_grid, 'canon', train_idx, 1, log_mean_img=False)\n\n # Log dense correspondences\n kp, kp_vis, kp_col_dense = load_fg_points(all_masks[src_idx],\n resolution=vis_denseres)\n kp_pred, kp_canon = stn.transfer_points(\n kp, src_idx, trg_idx, all_flows, mask=all_masks, res=res,\n return_canon=True, is_flow=True)\n colors = map_minmax(kp_col_dense, 0, 1, -1, 1)\n\n blend_src = splat_points(\n all_imgs[src_idx], kp, sigma=4., opacity=0.75,\n colors=colors, alpha_channel=kp_vis.unsqueeze(-1))\n\n blend_trg = splat_points(\n all_imgs[trg_idx], kp_pred.float(), sigma=4., opacity=0.75,\n colors=colors, alpha_channel=kp_vis.unsqueeze(-1))\n\n blend_canon = splat_points(\n torch.ones_like(canon_grid) * -1, kp_canon, sigma=1.3, opacity=1.0,\n colors=colors, alpha_channel=kp_vis.unsqueeze(-1))\n stacked = torch.stack([blend_src, blend_canon, blend_trg], dim=1).\\\n flatten(0, 1)\n writer.log_image_grid(\n stacked, 'kp_pred_dense', train_idx, 3*vis_sample,\n log_mean_img=False, nrow=3)\n\n # # Log dense correspondences with text\n # text_kp = text_kp.expand(vis_sample, -1, -1)\n # text_kp_col = text_kp_col.expand(vis_sample, -1, -1)\n # kp_pred, kp_canon = stn.transfer_points(\n # text_kp, src_idx, trg_idx, all_flows, mask=all_masks, res=res,\n # return_canon=True, is_flow=True)\n\n # blend_src = splat_points(all_imgs[src_idx], text_kp, sigma=0.7, opacity=1.,\n # colors=text_kp_col)\n\n # blend_trg = splat_points(all_imgs[trg_idx], kp_pred.float(), sigma=0.7,\n # opacity=1., colors=text_kp_col)\n\n # blend_canon = splat_points(torch.ones_like(canon_grid) * -1, kp_canon,\n # sigma=0.7, opacity=1., colors=text_kp_col)\n\n # stacked = torch.stack([blend_src, blend_canon, blend_trg], dim=1).\\\n # flatten(0, 1)\n # writer.log_image_grid(\n # stacked, 'kp_pred_text', train_idx, 3*vis_sample,\n # log_mean_img=False, nrow=3)\n\n # Log dense mapping from canonical space to Image space\n wheel = color_wheel_fast_smooth(res).permute(2, 0, 1).unsqueeze(0).to(device)\n colors = wheel.expand(vis_sample, -1, -1, -1)\n flow, _ = stn(all_imgs[src_idx])\n colors = F.grid_sample(colors, flow, padding_mode='border',\n align_corners=True)\n colors = map_minmax(colors, 0, 1, -1, 1)\n alpha = 0.5\n blend_img = alpha * all_imgs[src_idx] * (1-all_masks[src_idx]) + \\\n (all_imgs[src_idx] * alpha + colors * (1-alpha)) * all_masks[src_idx]\n blend_img = torch.cat([wheel, blend_img, wheel, colors* all_masks[src_idx]])\n writer.log_image_grid(blend_img, 'canon_map', train_idx, len(blend_img),\n log_mean_img=False, nrow=len(blend_img)//2)\n\n # Log keypoints from Image space to canonical space\n if has_gt_kp:\n canon_corrs = stn.transfer_forward(all_flows, all_kps[..., :2], res, is_flow=True)\n canon_corrs = stn.unnormalize(canon_corrs, res, res)\n canon_vis = all_kps[..., -1]\n num_kp = canon_vis.size(-1)\n N = canon_vis.size(0)\n colors = kps_cols.permute(1, 0, 2).expand(-1, N, -1).to(device)\n heatmaps = splat_points(\n torch.ones(num_kp, 3, res, res, device=device) * -1,\n canon_corrs.permute(1, 0, 2), sigma=6., opacity=1.,\n colors=colors, alpha_channel=canon_vis.permute(1, 0).unsqueeze(-1))\n writer.log_image_grid(heatmaps, 'kp_heatmaps', train_idx,\n num_kp, padding=2, pad_value=1.)\n\n # Log parts from Image space to canonical space\n # Splat one part at a time to canonical\n # TODO: splat all at once\n num_parts = dset.num_parts\n part_kp_canons = []\n part_kp_vis = [] \n for part in range(num_parts):\n part_masks = (parts == part).float().unsqueeze(1)\n kp, kp_vis, _ = load_fg_points(part_masks, resolution=vis_denseres)\n kp_canon = stn.transfer_forward(all_flows, kp[..., :2], res, is_flow=True)\n kp_canon = stn.unnormalize(kp_canon, res, res)\n part_kp_canons.append(kp_canon.reshape(-1, 2))\n part_kp_vis.append(kp_vis.reshape(-1))\n\n part_kp_canons = torch.stack(part_kp_canons)\n part_kp_vis = torch.stack(part_kp_vis)\n colors = parts_cols[:-1].unsqueeze(1).expand(-1, part_kp_vis.size(1), -1)\n heatmaps = splat_points(\n torch.ones(num_parts, 3, res, res, device=device) * -1,\n part_kp_canons, sigma=2., opacity=1.,\n colors=colors, alpha_channel=part_kp_vis.unsqueeze(-1))\n writer.log_image_grid(heatmaps, 'part_heatmaps', train_idx,\n num_parts, padding=2, pad_value=1.)\n\n # Compute PCKs\n N = all_imgs.size(0)\n transfer_fn = stn.transfer_points\n pck_pairs = None\n if has_gt_kp:\n # First compute PCK for all 2-pairs\n if has_fixed_pairs:\n tuples = dset.fixed_pairs\n if dset.thresholds is not None:\n thresholds = [torch.from_numpy(dset.thresholds)[tuples[:, 1]]]\n else:\n thresholds = None\n else:\n tuples = sample_tuples(N)\n thresholds = None\n print(f\"First computing 2-point PCK for {len(tuples)} pairs\")\n gt_corrs, pred_corrs, vis = pck_loop(\n tuples, all_kps, transfer_fn, all_flows, all_masks, res,\n return_canon=False, is_flow=True)\n pck_pairs = compute_pck(pred_corrs, gt_corrs, vis, thresholds,\n img_size=res)\n\n # Compute k-cycle PCK\n pck_cycles = []\n if not has_gt_kp:\n kp, kp_vis, kp_col_dense = load_fg_points(all_masks,\n resolution=vis_denseres)\n ignore_idx = kp_vis.sum(dim=0) == 0\n all_kps = torch.cat([kp[:, ~ignore_idx], kp_vis[:, ~ignore_idx].unsqueeze(-1)], dim=2)\n ignore_interim = True\n else:\n ignore_interim = False\n\n for k in [2, 3, 4]:\n tuples = sample_tuples(N, k=k, count=200)\n if has_fixed_pairs and dset.thresholds is not None:\n thresholds = torch.from_numpy(dset.thresholds[tuples[:, 1:]])\n thresholds = thresholds.reshape(-1)\n else:\n thresholds = None\n print(f\"Next computing {k}-cycle PCK for {len(tuples)} tuples\")\n gt_corrs, pred_corrs, vis = pck_loop(\n tuples, all_kps, transfer_fn, all_flows, all_masks, res,\n return_canon=False, is_flow=True, ignore_interim=ignore_interim)\n pck = compute_pck(pred_corrs, gt_corrs, vis, thresholds, img_size=res)\n pck_cycles.append(pck)\n\n return pck_pairs, pck_cycles" }, { "identifier": "get_rank", "path": "commons/distributed.py", "snippet": "def get_rank():\n if not dist.is_available():\n return 0\n\n if not dist.is_initialized():\n return 0\n\n return dist.get_rank()" }, { "identifier": "setup_distributed", "path": "commons/distributed.py", "snippet": "def setup_distributed():\n local_rank = int(os.environ['LOCAL_RANK']) if 'LOCAL_RANK' in os.environ else 0\n n_gpu = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1\n is_distributed = n_gpu > 1\n if is_distributed:\n torch.cuda.set_device(local_rank)\n dist.init_process_group(backend=\"nccl\", init_method=\"env://\")\n synchronize()\n return is_distributed" }, { "identifier": "reduce_loss_dict", "path": "commons/distributed.py", "snippet": "def reduce_loss_dict(loss_dict):\n world_size = get_world_size()\n\n if world_size < 2:\n return loss_dict\n\n with torch.no_grad():\n keys = []\n losses = []\n\n for k in sorted(loss_dict.keys()):\n keys.append(k)\n losses.append(loss_dict[k])\n\n losses = torch.stack(losses, 0)\n dist.reduce(losses, dst=0)\n\n if dist.get_rank() == 0:\n losses /= world_size\n\n reduced_losses = {k: v for k, v in zip(keys, losses)}\n\n return reduced_losses" }, { "identifier": "get_world_size", "path": "commons/distributed.py", "snippet": "def get_world_size():\n if not dist.is_available():\n return 1\n\n if not dist.is_initialized():\n return 1\n\n return dist.get_world_size()" }, { "identifier": "primary", "path": "commons/distributed.py", "snippet": "def primary():\n if not dist.is_available():\n return True\n\n if not dist.is_initialized():\n return True\n\n return get_rank() == 0" }, { "identifier": "sample_tuples", "path": "commons/utils.py", "snippet": "def sample_tuples(N, k=1, count=None, seed=None):\n\n if seed is not None:\n np.random.seed(seed)\n\n if count is None: # return all possible (k+1) permutations\n # (N!/(N-k)!) x k array\n samples = np.array(list(permutations(range(N), k+1)))\n\n elif k == 1:\n p1 = np.random.choice(N, count)\n p2 = np.random.choice(N, count)\n return np.stack([p1, p2], axis=1)\n\n elif count == -1:\n samples = np.array(list(permutations(range(N), k)))\n samples = np.concatenate([samples, samples[:, 0].reshape(-1, 1)], axis=1)\n\n else: # sample count number of permutations\n # count x k array\n samples = np.zeros((count, k+1), dtype=int)\n for i in range(count):\n samples[i, :k] = np.random.choice(N, k, replace=False)\n # Force the last column to be same as the first column\n samples[:, k] = samples[:, 0]\n\n return samples" }, { "identifier": "CUBDataset", "path": "datasets/cub.py", "snippet": "class CUBDataset(Dataset):\n def __init__(self, data_dir, split='test', img_size=256, cls_idx=1,\n flow_dir=None, num_parts=0,\n mask_threshold=1, use_coseg_masks=False, padding_mode='border'):\n super().__init__()\n self.img_size = img_size\n self.split = split\n self.cls_idx = cls_idx\n self.flow_dir = flow_dir\n self.num_parts = num_parts\n self.mask_threshold = mask_threshold\n self.fixed_pairs = None\n self.thresholds = None\n self.border = True if padding_mode=='border' else False\n\n os.makedirs(data_dir, exist_ok=True)\n download_cub(data_dir)\n download_cub_metadata(data_dir)\n\n self.files, self.bboxes, self.kps, self.masks = load_acsm_data(\n data_dir, size=img_size, split=split, cls_idx=cls_idx)\n\n imgs = []\n for i in range(len(self.files)):\n img = Image.open(self.files[i]).convert('RGB')\n img = cub_crop(img, self.img_size, self.bboxes[i], border=self.border)\n imgs.append(torch.from_numpy(np.array(img)).permute(2, 0, 1))\n self.imgs = torch.stack(imgs) / 127.5 - 1.0 # normalize (-1, 1)\n\n # Load masks\n if flow_dir is not None:\n if use_coseg_masks:\n mask_dir = Path(flow_dir) / 'masks_coseg'\n else:\n mask_dir = Path(flow_dir) / 'masks'\n assert mask_dir.exists(), f\"{mask_dir} doesn't exist\"\n masks = []\n for i in range(0, len(self)):\n fname = mask_dir / f'{Path(self.files[i]).stem}.png'\n mask = np.array(Image.open(fname).convert('L'))\n masks.append(mask)\n self.masks = torch.from_numpy(np.stack(masks) > mask_threshold).float()\n\n self.parts = None\n if flow_dir is not None:\n parts_str = 'parts' if num_parts <=0 else f'parts_num{num_parts}'\n parts_dir = Path(flow_dir) / f'{parts_str}'\n if parts_dir.exists():\n parts = []\n for i in range(0, len(self)):\n fname = parts_dir / f'parts_s2_{Path(self.files[i]).stem}.npy'\n part = np.load(fname)\n parts.append(part)\n parts = np.stack(parts)\n num_parts = int(np.max(parts[~np.isnan(parts)])) + 1\n parts[np.isnan(parts)] = num_parts\n\n self.parts = torch.from_numpy(parts.astype(np.int64))\n else:\n print(f\"{parts_dir} doesn't exist. Parts won't load.\")\n self.num_parts = num_parts\n # self.parts = F.one_hot(parts, num_classes=num_parts+1).bool()\n\n # Load pseudo keypoints\n self.pseudo_kps = None\n if flow_dir is not None:\n nbb_dir = Path(flow_dir) / 'nbb'\n if nbb_dir.exists():\n self.pseudo_kps = load_nbb(nbb_dir, self.files, self.parts)\n max_matches = self.pseudo_kps.shape[2]\n print(f'Max #matches between an image pair: {max_matches}')\n else:\n print(f\"{nbb_dir} doesn't exist. Pseudo kps won't load.\")\n\n\n def __len__(self):\n return len(self.files)" }, { "identifier": "InMemoryDataset", "path": "datasets/in_memory.py", "snippet": "class InMemoryDataset(Dataset):\n def __init__(self, data_dir, img_size=256, flow_dir=None,\n num_parts=0, mask_threshold=1, use_coseg_masks=False,\n every_k=1):\n\n self.img_size = img_size\n self.flow_dir = flow_dir\n self.num_parts = num_parts\n self.mask_threshold = mask_threshold\n\n normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],\n std=[0.5, 0.5, 0.5])\n transform = transforms.Compose([\n transforms.Resize(img_size),\n transforms.CenterCrop(img_size),\n transforms.ToTensor(),\n normalize,\n ])\n\n files = []\n imgs = []\n for base_dir, dirnames, filenames in os.walk(data_dir):\n if len(dirnames) > 0:\n continue\n for f in sorted(filenames):\n if not f.lower().endswith(('.png', '.jpg', '.jpeg')):\n continue\n filename = Path(base_dir) / f\n files.append(filename)\n img = Image.open(filename).convert('RGB')\n imgs.append(transform(img))\n \n self.files = files[::every_k]\n self.imgs = torch.stack(imgs[::every_k])\n\n self.kps = None\n self.fixed_pairs = None\n self.thresholds = None\n self.pseudo_kps = None\n self.parts = None\n\n # Load masks\n if flow_dir is not None:\n if use_coseg_masks:\n mask_dir = Path(flow_dir) / 'masks_coseg'\n else:\n mask_dir = Path(flow_dir) / 'masks'\n assert mask_dir.exists(), f\"{mask_dir} doesn't exist\"\n masks = []\n for i in range(0, len(self)):\n fname = mask_dir / f'{self.files[i].stem}.png'\n mask = np.array(Image.open(fname).convert('L'))\n masks.append(mask)\n self.masks = torch.from_numpy(np.stack(masks) >= mask_threshold).float()\n\n # Load parts\n if flow_dir is not None:\n parts_str = 'parts' if num_parts <=0 else f'parts_num{num_parts}'\n parts_dir = Path(flow_dir) / f'{parts_str}'\n if parts_dir.exists():\n parts = []\n for i in range(0, len(self)):\n fname = parts_dir / f'parts_s2_{self.files[i].stem}.npy'\n part = np.load(fname)\n parts.append(part)\n parts = np.stack(parts)\n num_parts = int(np.max(parts[~np.isnan(parts)])) + 1\n parts[np.isnan(parts)] = num_parts\n\n self.parts = torch.from_numpy(parts.astype(np.int64))\n else:\n print(f\"{parts_dir} doesn't exist. Parts won't load.\")\n self.num_parts = num_parts\n # self.parts = F.one_hot(parts, num_classes=num_parts+1).bool()\n\n # Load pseudo keypoints\n if flow_dir is not None:\n nbb_dir = Path(flow_dir) / 'nbb'\n if nbb_dir.exists():\n self.pseudo_kps = load_nbb(nbb_dir, self.files, self.parts)\n max_matches = self.pseudo_kps.shape[2]\n print(f'Max #matches between an image pair: {max_matches}')\n else:\n print(f\"{nbb_dir} doesn't exist. Pseudo kps won't load.\")\n\n def __len__(self):\n return len(self.files)" }, { "identifier": "SpairDataset", "path": "datasets/spair.py", "snippet": "class SpairDataset(Dataset):\n def __init__(self, data_dir, split='test', img_size=256, spair_cat='cat',\n flow_dir=None, padding_mode='edge', num_parts=0,\n mask_threshold=1, use_coseg_masks=False):\n super().__init__()\n self.img_size = img_size\n self.split = split\n self.cat = spair_cat\n self.padding_mode = padding_mode\n self.flow_dir = flow_dir\n self.num_parts = num_parts\n self.mask_threshold = mask_threshold\n\n normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],\n std=[0.5, 0.5, 0.5])\n transform = transforms.Compose([\n SquarePad(padding_mode),\n transforms.Resize(img_size),\n transforms.ToTensor(),\n normalize,\n ])\n\n os.makedirs(data_dir, exist_ok=True)\n spair_dir = download_spair(data_dir)\n\n self.files, self.kps, fixed_pairs, thresholds = load_spair_data(\n spair_dir, size=img_size, split=split, category=spair_cat)\n imgs = [transform(Image.open(self.files[i]).convert('RGB'))\n for i in range(len(self))]\n self.imgs = torch.stack(imgs)\n self.fixed_pairs = np.array(fixed_pairs)\n self.thresholds = np.array(thresholds)\n\n self.masks = torch.ones(len(self), 1, img_size, img_size)\n self.pseudo_kps = None\n self.parts = None\n\n # Load masks\n if flow_dir is not None:\n if use_coseg_masks:\n mask_dir = Path(flow_dir) / 'masks_coseg'\n else:\n mask_dir = Path(flow_dir) / 'masks'\n assert mask_dir.exists(), f\"{mask_dir} doesn't exist\"\n masks = []\n for i in range(0, len(self)):\n fname = mask_dir / f'{Path(self.files[i]).stem}.png'\n mask = np.array(Image.open(fname).convert('L'))\n masks.append(mask)\n self.masks = torch.from_numpy(np.stack(masks) >= mask_threshold).float()\n\n # Load parts\n if flow_dir is not None:\n parts_str = 'parts' if num_parts <=0 else f'parts_num{num_parts}'\n parts_dir = Path(flow_dir) / f'{parts_str}'\n if parts_dir.exists():\n parts = []\n for i in range(0, len(self)):\n fname = parts_dir / f'parts_s2_{Path(self.files[i]).stem}.npy'\n part = np.load(fname)\n parts.append(part)\n parts = np.stack(parts)\n num_parts = int(np.max(parts[~np.isnan(parts)])) + 1\n parts[np.isnan(parts)] = num_parts\n\n self.parts = torch.from_numpy(parts.astype(np.int64))\n else:\n print(f\"{parts_dir} doesn't exist. Parts won't load.\")\n self.num_parts = num_parts\n # self.parts = F.one_hot(parts, num_classes=num_parts+1).bool()\n \n # Load pseudo keypoints\n if flow_dir is not None:\n nbb_dir = Path(flow_dir) / 'nbb'\n if nbb_dir.exists():\n self.pseudo_kps = load_nbb(nbb_dir, self.files, self.parts)\n max_matches = self.pseudo_kps.shape[2]\n print(f'Max #matches between an image pair: {max_matches}')\n else:\n print(f\"{nbb_dir} doesn't exist. Pseudo kps won't load.\")\n\n def __len__(self):\n return len(self.files)" }, { "identifier": "Augmentor", "path": "datasets/utils.py", "snippet": "class Augmentor(nn.Module):\n def __init__(self, jitter=[0.4, 0.4, 0.2, 0.1], jitter_prob=0.8,\n gray_prob=0.2, solar_prob=0.2, tps_scale=0.4):\n super().__init__()\n self.color_transform = K.AugmentationSequential(\n # https://github.com/facebookresearch/dino/blob/main/main_dino.py#L424\n K.ColorJitter(brightness=jitter[0], contrast=jitter[1],\n saturation=jitter[2], hue=jitter[3], p=jitter_prob),\n K.RandomGrayscale(p=gray_prob),\n K.RandomGaussianBlur((3, 3), (0.1, 2.0), p=0.1),\n K.RandomSolarize(0.1, 0.1, p=solar_prob),\n )\n\n self.perspective_transform = K.RandomPerspective(0.5, p=1.)\n self.affine_transform = K.RandomAffine(30, scale=(0.7, 1.1),\n padding_mode='border', p=1.0)\n self.elastic_transform = K.RandomElasticTransform(\n p=1.0, sigma=(16., 16.), alpha=(3, 3), padding_mode='border')\n\n # TPS doesn't support transforming points\n # Using it only for dense equivariance loss\n self.tps_transform = K.RandomThinPlateSpline(scale=tps_scale, p=1.)\n\n def forward(self, x):\n pass\n\n @torch.no_grad()\n def forward_color(self, img):\n return self.color_transform(img)\n\n @torch.no_grad()\n def forward_tps(self, img, fixed=False):\n if fixed:\n img_t = self.tps_transform(img, params=self.tps_transform._params)\n else:\n img_t = self.tps_transform(img)\n return img_t\n \n @torch.no_grad()\n def forward_geom(self, img, fixed=False):\n if fixed:\n img_t = self.elastic_transform(\n self.affine_transform(img, params=self.affine_transform._params),\n params=self.elastic_transform._params)\n else:\n img_t = self.elastic_transform(self.affine_transform(img))\n return img_t\n\n\n @torch.no_grad()\n def forward_perspective(self, img, fixed=False):\n if fixed:\n img_t = self.perspective_transform(img, params=self.perspective_transform._params)\n else:\n img_t = self.perspective_transform(img)\n return img_t\n\n @torch.no_grad()\n def forward_perspective_kp(self, kp):\n return kornia.geometry.transform_points(\n self.perspective_transform.transform_matrix, kp)" }, { "identifier": "accumulate", "path": "models/utils.py", "snippet": "def accumulate(model1, model2, decay=0.999):\n par1 = dict(model1.named_parameters())\n par2 = dict(model2.named_parameters())\n\n for k in par1.keys():\n par1[k].data.mul_(decay).add_(par2[k].data, alpha=1 - decay)" }, { "identifier": "requires_grad", "path": "models/utils.py", "snippet": "def requires_grad(model, flag=True):\n for p in model.parameters():\n p.requires_grad = flag" }, { "identifier": "Canonical", "path": "models/canonical.py", "snippet": "class Canonical(nn.Module):\n def __init__(self, size, std=0.1, clamp=True):\n super().__init__()\n mean = torch.zeros(size)\n std = torch.ones(size) * std\n self.grid = nn.Parameter(torch.normal(mean=mean, std=std),\n requires_grad=True)\n norm_class = Normalize()\n norm_class.apply(self.grid)\n if clamp:\n clamp_class = Clamp()\n clamp_class.apply(self.grid)\n\n def get_grid(self, N):\n return self.grid.expand(N, -1, -1, -1)\n\n def unwarp(self, flow, sample_res=256):\n N = flow.size(0)\n if sample_res is not None and sample_res != flow.size(1):\n scale_factor = sample_res / flow.size(1)\n sample_flow = F.interpolate(\n flow.permute(0, 3, 1, 2), scale_factor=scale_factor,\n mode='bilinear').permute(0, 2, 3, 1)\n else:\n sample_flow = flow\n warped_img = F.grid_sample(\n self.get_grid(N), sample_flow,\n padding_mode='border', align_corners=True)\n return warped_img\n\n def forward(self, x):\n return x" }, { "identifier": "CanonicalMLP", "path": "models/canonical.py", "snippet": "class CanonicalMLP(nn.Module):\n def __init__(self, input_dim=2, output_dim=3, hidden_dim=256,\n use_positional=True, positional_dim=10,\n skip_layers=[4, 7], num_layers=8, resolution=256,\n use_tanh=True, apply_softmax=False):\n super().__init__()\n self.use_tanh = use_tanh\n self.resolution = resolution\n self.apply_softmax = apply_softmax\n self.output_dim = output_dim\n if apply_softmax:\n self.softmax= nn.Softmax()\n if use_positional:\n encoding_dimensions = 2 * input_dim * positional_dim\n self.b = nn.Parameter(\n torch.tensor([(2 ** j) * np.pi\n for j in range(positional_dim)], requires_grad = False))\n else:\n encoding_dimensions = input_dim\n\n self.hidden = nn.ModuleList()\n for i in range(num_layers):\n if i == 0:\n input_dims = encoding_dimensions\n elif i in skip_layers:\n input_dims = hidden_dim + encoding_dimensions\n else:\n input_dims = hidden_dim\n\n if i == num_layers - 1:\n # last layer\n self.hidden.append(nn.Linear(input_dims, output_dim, bias=True))\n else:\n self.hidden.append(nn.Linear(input_dims, hidden_dim, bias=True))\n\n self.skip_layers = skip_layers\n self.num_layers = num_layers\n\n self.positional_dim = positional_dim\n self.use_positional = use_positional\n\n def get_grid(self, N, device='cuda'):\n resolution = self.resolution\n indsy = torch.linspace(0, resolution-1, resolution, device=device)\n indsx = torch.linspace(0, resolution-1, resolution, device=device)\n\n # Keep (x, y) indexing to make it consistent with the flow\n points = torch.stack(\n torch.meshgrid(indsx, indsy, indexing='xy'), dim=-1).reshape(-1, 2)\n\n with torch.no_grad():\n grid = self(points)\n\n grid = grid.reshape(1, resolution, resolution, self.output_dim)\n grid = grid.permute(0, 3, 1, 2)\n return grid.expand(N, -1, -1, -1)\n\n def unwarp(self, flow, sample_res=256):\n N = flow.size(0)\n # Output of flow model is usually normalized between -1 and 1\n # So we need to first scale it up to self.resolution\n flow = map_minmax(flow, -1, 1, 0, self.resolution-1)\n\n # Resize flow if computed at a lower resolution\n if sample_res is not None and sample_res != flow.size(1):\n scale_factor = sample_res / flow.size(1)\n sample_flow = F.interpolate(\n flow.permute(0, 3, 1, 2), scale_factor=scale_factor,\n mode='bilinear').permute(0, 2, 3, 1)\n else:\n sample_flow = flow\n\n # Unwarp\n warped_img = self(sample_flow.reshape(-1, 2))\n warped_img = warped_img.reshape(N, sample_res, sample_res, -1)\n warped_img = warped_img.permute(0, 3, 1, 2)\n return warped_img\n\n def forward(self, x):\n if self.use_positional:\n if self.b.device != x.device:\n self.b = self.b.to(x.device)\n pos = positionalEncoding_vec(x, self.b)\n x = pos\n\n input = x.detach().clone()\n for i, layer in enumerate(self.hidden):\n if i > 0:\n x = F.relu(x)\n if i in self.skip_layers:\n x = torch.cat((x, input), 1)\n x = layer(x)\n\n if self.use_tanh:\n x = torch.tanh(x)\n\n if self.apply_softmax:\n x = self.softmax(x)\n return x" }, { "identifier": "Asic", "path": "models/asic.py", "snippet": "class Asic(nn.Module):\n def __init__(self, in_ch, in_size, mf=1., bilinear=False,\n padding_mode='zeros', use_tanh=False):\n super().__init__()\n self.model = UNet(in_ch, 2, mf=mf, bilinear=bilinear)\n self.size = in_size\n self.register_buffer('identity_flow', self.get_identity_flow())\n self.padding_mode = padding_mode\n self.use_tanh = use_tanh\n\n def get_identity_flow(self):\n return F.affine_grid(\n torch.eye(2, 3).unsqueeze(0), (1, 1, self.size, self.size),\n align_corners=True).permute(0, 3, 1, 2).contiguous()\n\n def forward(self, x):\n if self.use_tanh:\n flow = torch.tanh(self.model(x))\n delta_flow = flow - self.identity_flow\n else:\n delta_flow = self.model(x) # (N, 2, H, W)\n flow = self.identity_flow + delta_flow\n\n flow = flow.permute(0, 2, 3, 1)\n delta_flow = delta_flow.permute(0, 2, 3, 1)\n return flow, delta_flow\n\n @torch.no_grad()\n def transfer_points(self, src_kps, src_idx, trg_idx, img, mask=None,\n res=None, return_canon=False, is_flow=False):\n # src_kps are N x P x 2 (in xy format)\n\n # Compute flow from images\n if is_flow:\n flow = img\n else:\n flow, _ = self(img)\n\n # Step 1: Map the points in src to the canonical space\n max_batch_size = 2\n if src_kps.size(0) > max_batch_size:\n N = len(src_kps)\n points_canon = []\n for start_idx in range(0, N, max_batch_size):\n end_idx = min(start_idx+max_batch_size, N)\n\n points_canon_batch = self.transfer_forward(\n flow[src_idx[start_idx:end_idx]],\n src_kps[start_idx:end_idx], res=res, is_flow=True)\n points_canon.append(points_canon_batch)\n points_canon = torch.cat(points_canon, dim=0)\n else:\n points_canon = self.transfer_forward(flow[src_idx], src_kps,\n res=res, is_flow=True)\n # points_canon = torch.clamp(points_canon, min=-1, max=1)\n\n # Step 2: Map the points in the canonical space to trg\n # This is a memory intensive step, so do a single image at a time\n # if the number of points are large\n if src_kps.size(1) > 256 or src_kps.size(0) > max_batch_size:\n N = len(src_kps)\n points_transfered = []\n for start_idx in range(0, N, max_batch_size):\n end_idx = min(start_idx+max_batch_size, N)\n points_transfered_single = self.transfer_reverse(\n flow[[trg_idx[start_idx:end_idx]]],\n points_canon[start_idx:end_idx], res=res,\n mask=mask[trg_idx[start_idx:end_idx]], is_flow=True)\n points_transfered.append(points_transfered_single)\n points_transfered = torch.cat(points_transfered, dim=0)\n else:\n points_transfered = self.transfer_reverse(\n flow[trg_idx], points_canon, res=res, mask=mask[trg_idx],\n is_flow=True)\n\n if return_canon:\n points_canon = self.unnormalize(points_canon, res, res)\n return points_transfered, points_canon\n else:\n return points_transfered\n\n def transfer_forward(self, img, points, res=None, is_flow=False):\n\n # TODO: currently points generated by load_fg_points are not\n # scaled properly. Take a look\n # TODO: Also double check normalize and unnormalize logic\n # points are N x P x 2 (in xy format)\n # assume that the flow is also xy format\n points = self.normalize(points, res, res)\n if is_flow:\n flow = img\n else:\n flow, _ = self(img)\n flow_grid = flow.permute(0, 3, 1, 2)\n points_transfered = F.grid_sample(\n flow_grid, points.unsqueeze(2).float(),\n padding_mode='border', align_corners=True)\n points_transfered = points_transfered.squeeze(3).permute(0, 2, 1)\n\n return points_transfered\n\n def transfer_reverse(self, img, points, res=None, mask=None, is_flow=False):\n N = points.size(0)\n num_points = points.size(1)\n # points are N x P x 2 (in xy format)\n points = points\n if is_flow:\n flow = img\n else:\n flow, _ = self(img)\n if flow.size(1) != res:\n scale_factor = res/flow.size(1)\n flow = F.interpolate(\n flow.permute(0, 3, 1, 2),\n scale_factor=scale_factor,\n mode='bilinear').permute(0, 2, 3, 1)\n # From (N, H, W, 2) to (N, H, W, 1, 1, 2)\n flow_reshaped = flow.unsqueeze(-2).unsqueeze(-2)\n\n # From (N, num_points, 2) to (N, 1, 1, num_points, 2, 1)\n points = points.unsqueeze(1).unsqueeze(1).unsqueeze(-1)\n\n # (N, H, W, num_points)\n similarities = (flow_reshaped @ points)[..., 0, 0]\n distances = points.pow(2).squeeze(-1).sum(dim=-1) + \\\n flow_reshaped.pow(2).sum(dim=-1).squeeze(-1) - 2 * similarities\n\n if mask is not None:\n distances[mask.squeeze(1)<0.1] = float('inf')\n\n nearest_neighbors = distances.reshape(\n N, flow_reshaped.size(1) * flow_reshaped.size(2),\n num_points).argmin(dim=1)\n points_transfered = unravel_index(\n nearest_neighbors, (flow_reshaped.size(1), flow_reshaped.size(2)))\n return points_transfered\n\n @staticmethod\n def normalize(points, res, out_res):\n return points.div(out_res - 1).add(-0.5).mul(2).mul((res - 1) / res)\n\n @staticmethod\n def unnormalize(points, res, out_res):\n return points.div((res - 1) / res).div(2).add(0.5).mul(out_res - 1)" }, { "identifier": "total_variation_loss", "path": "losses/reg_losses.py", "snippet": "def total_variation_loss(delta_flow, reduce_batch=True):\n # flow should be size (N, H, W, 2)\n reduce_dims = (0, 1, 2, 3) if reduce_batch else (1, 2, 3)\n distance_fn = lambda a: torch.where(a <= 1.0, 0.5 * a.pow(2), a - 0.5).mean(dim=reduce_dims)\n # assert delta_flow.size(-1) == 2\n diff_y = distance_fn((delta_flow[:, :-1, :, :] - delta_flow[:, 1:, :, :]).abs())\n diff_x = distance_fn((delta_flow[:, :, :-1, :] - delta_flow[:, :, 1:, :]).abs())\n loss = diff_x + diff_y\n return loss" }, { "identifier": "get_perceptual_loss", "path": "thirdparty/lpips/lpips.py", "snippet": "def get_perceptual_loss(loss_fn, device):\n if loss_fn == 'vgg_ssl':\n download_model('simclr_vgg_phase150') # Download the weights\n loss_fn_vgg = LPIPS(net='vgg', lpips=False, pnet_rand=True, pretrained_weights='pretrained/simclr_vgg_phase150.pt').to(device)\n loss_fn = lambda x,y: loss_fn_vgg(x, y) / 18.0\n elif loss_fn == 'lpips':\n download_lpips() # Download LPIPS weights\n loss_fn = LPIPS(net='vgg').to(device)\n else:\n raise NotImplementedError\n return loss_fn" }, { "identifier": "LossCorrsSparse", "path": "losses/matching_losses.py", "snippet": "class LossCorrsSparse(nn.Module):\n def __init__(self, extractor=None, flow_size=256, T=1.0):\n super().__init__()\n self.extractor = extractor\n self.flow_size = flow_size\n self.T = T\n self.dist_fn = nn.PairwiseDistance(p=2)\n self.loss_fn = nn.CrossEntropyLoss(reduction='none')\n\n def forward(self, src_flow, trg_flow, src_kp, trg_kp, kp_vis, kp_wt):\n N = src_flow.size(0)\n res = src_flow.size(1)\n top_k = kp_vis.shape[1]\n # bb1_canon - N x 2 x top_k x 1\n # bb2_canon - N x 2 x 1 x top_k\n # Sample flow values using the pseudo GT from the flow_grid\n src_kp_canon = F.grid_sample(\n src_flow.permute(0, 3, 1, 2),\n map_minmax(src_kp.unsqueeze(2), 0, res-1, -1, 1), mode='bilinear',\n padding_mode='zeros', align_corners=True).permute(0, 2, 3, 1)\n trg_kp_canon = F.grid_sample(\n trg_flow.permute(0, 3, 1, 2),\n map_minmax(trg_kp.unsqueeze(1), 0, res-1, -1, 1), mode='bilinear',\n padding_mode='zeros', align_corners=True).permute(0, 2, 3, 1)\n\n # dists - N x top_k x top_k\n dists1 = self.dist_fn(src_kp_canon, trg_kp_canon.detach()) * (-1/self.T)\n dists2 = self.dist_fn(src_kp_canon.detach(), trg_kp_canon) * (-1/self.T)\n labels = torch.arange(top_k, dtype=torch.long, device='cuda')\n labels = labels.unsqueeze(0).repeat(N, 1)\n labels[~kp_vis] = -100\n \n loss = self.loss_fn(dists1, labels) + self.loss_fn(dists2, labels)\n loss *= kp_wt\n return loss.sum() / kp_vis.sum()\n\n def forward_eq(self, src_flow, trg_flow, src_kp, trg_kp, kp_vis):\n N = src_flow.size(0)\n res = src_flow.size(1)\n top_k = kp_vis.shape[1]\n # bb1_canon - N x 2 x top_k x 1\n # bb2_canon - N x 2 x 1 x top_k\n # Sample flow values using the pseudo GT from the flow_grid\n src_kp_canon = F.grid_sample(\n src_flow.permute(0, 3, 1, 2),\n map_minmax(src_kp.unsqueeze(2), 0, res-1, -1, 1), mode='bilinear',\n padding_mode='zeros', align_corners=True).permute(0, 2, 3, 1)\n trg_kp_canon = F.grid_sample(\n trg_flow.permute(0, 3, 1, 2),\n map_minmax(trg_kp.unsqueeze(1), 0, res-1, -1, 1), mode='bilinear',\n padding_mode='zeros', align_corners=True).permute(0, 2, 3, 1)\n\n # dists - N x top_k x top_k\n dists1 = self.dist_fn(src_kp_canon, trg_kp_canon.detach()) * (-1/self.T)\n dists2 = self.dist_fn(src_kp_canon.detach(), trg_kp_canon) * (-1/self.T)\n labels = torch.arange(top_k, dtype=torch.long, device='cuda')\n labels = labels.unsqueeze(0).repeat(N, 1)\n labels[~kp_vis] = -100\n return self.loss_fn(dists1, labels).mean() + self.loss_fn(dists2, labels).mean()" }, { "identifier": "DecayingCosineAnnealingWarmRestarts", "path": "thirdparty/gangealing/annealing.py", "snippet": "class DecayingCosineAnnealingWarmRestarts(_LRScheduler):\n r\"\"\"Set the learning rate of each parameter group using a cosine annealing\n schedule, where :math:`\\eta_{max}` is set to the initial lr,\n :math:`T_{cur}` is the number of epochs since the last restart and\n :math:`T_{i}` is the number of epochs between two warm restarts in SGDR:\n .. math::\n \\eta_t = \\eta_{min} + \\frac{1}{2}(\\eta_{max} - \\eta_{min})\\left(1 +\n \\cos\\left(\\frac{T_{cur}}{T_{i}}\\pi\\right)\\right)\n When :math:`T_{cur}=T_{i}`, set :math:`\\eta_t = \\eta_{min}`.\n When :math:`T_{cur}=0` after restart, set :math:`\\eta_t=\\eta_{max}`.\n It has been proposed in\n `SGDR: Stochastic Gradient Descent with Warm Restarts`_.\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n T_0 (int): Number of iterations for the first restart.\n T_mult (int, optional): A factor increases :math:`T_{i}` after a\n restart. Default: 1.\n eta_min (float, optional): Minimum learning rate. Default: 0.\n last_epoch (int, optional): The index of last epoch. Default: -1.\n .. _SGDR\\: Stochastic Gradient Descent with Warm Restarts:\n https://arxiv.org/abs/1608.03983\n \"\"\"\n\n def __init__(self, optimizer, T_0, decay=0.9, T_mult=1, eta_min=0,\n last_epoch=-1):\n if T_0 <= 0 or not isinstance(T_0, int):\n raise ValueError(f\"Expected positive integer T_0, but got {T_0}\")\n if T_mult < 1 or not isinstance(T_mult, int):\n raise ValueError(f\"Expected integer T_mult >= 1, but got {T_mult}\")\n self.T_0 = T_0\n self.T_i = T_0\n self.T_mult = T_mult\n self.eta_min = eta_min\n self.decay = decay\n self.cur_decay = 1.0\n\n super(DecayingCosineAnnealingWarmRestarts, self).__init__(optimizer,\n last_epoch)\n\n self.T_cur = self.last_epoch\n\n def get_lr(self):\n if not self._get_lr_called_within_step:\n warnings.warn(\"To get the last learning rate computed by the \"\n \"scheduler, use `get_last_lr()`.\", UserWarning)\n\n return [self.cur_decay * (self.eta_min + (base_lr - self.eta_min) *\n (1 + math.cos(math.pi * self.T_cur / self.T_i)) / 2)\n for base_lr in self.base_lrs]\n\n def step(self, epoch=None):\n \"\"\"Step could be called after every batch update\"\"\"\n\n if epoch is None and self.last_epoch < 0:\n epoch = 0\n\n if epoch is None:\n epoch = self.last_epoch + 1\n self.T_cur = self.T_cur + 1\n if self.T_cur >= self.T_i:\n self.T_cur = self.T_cur - self.T_i\n self.T_i = self.T_i * self.T_mult\n else:\n if epoch < 0:\n raise ValueError(f\"Expected non-negative epoch, got {epoch}\")\n if epoch >= self.T_0:\n if self.T_mult == 1:\n self.T_cur = epoch % self.T_0\n n = int(epoch // self.T_0)\n else:\n n = int(math.log((epoch / self.T_0 * (self.T_mult - 1)\n + 1), self.T_mult))\n self.T_cur = epoch - self.T_0 * (self.T_mult ** n - 1) / \\\n (self.T_mult - 1)\n self.T_i = self.T_0 * self.T_mult ** (n)\n else:\n self.T_i = self.T_0\n self.T_cur = epoch\n n = 0\n self.cur_decay = self.decay ** n\n self.last_epoch = math.floor(epoch)\n\n class _enable_get_lr_call:\n\n def __init__(self, o):\n self.o = o\n\n def __enter__(self):\n self.o._get_lr_called_within_step = True\n return self\n\n def __exit__(self, type, value, traceback):\n self.o._get_lr_called_within_step = False\n return self\n\n with _enable_get_lr_call(self):\n for param_group, lr in zip(self.optimizer.param_groups,\n self.get_lr()):\n param_group['lr'] = lr\n\n self._last_lr = [group['lr'] for group in self.optimizer.param_groups]" }, { "identifier": "lr_cycle_iters", "path": "thirdparty/gangealing/annealing.py", "snippet": "def lr_cycle_iters(anneal_psi, period, iter, tm):\n zero_lr_iters = [anneal_psi - 1]\n num_cycles = int(math.log((iter - anneal_psi) / period, tm))\n for n in range(num_cycles):\n step = zero_lr_iters[-1] + period * tm ** n\n zero_lr_iters.append(int(step))\n print(f'Learning Rate Cycles: {zero_lr_iters}')\n return zero_lr_iters" } ]
import argparse import torch import numpy as np import json import os import torch.nn.functional as F import wandb from torch import nn, optim from tqdm import tqdm from pathlib import Path from commons.logger import Logger, log_visuals from commons.distributed import get_rank, setup_distributed, reduce_loss_dict,\ get_world_size, primary from commons.utils import sample_tuples from datasets.cub import CUBDataset from datasets.in_memory import InMemoryDataset from datasets.spair import SpairDataset from datasets.utils import Augmentor from models.utils import accumulate, requires_grad from models.canonical import Canonical, CanonicalMLP from models.asic import Asic from losses.reg_losses import total_variation_loss from thirdparty.lpips.lpips import get_perceptual_loss from losses.matching_losses import LossCorrsSparse from thirdparty.gangealing.annealing import DecayingCosineAnnealingWarmRestarts,\ lr_cycle_iters
17,268
if args.flow_ssl: # in_size = extractor.num_patches # in_ch = extractor.feat_dim # TODO: read from the file and modfiy accordingly raise NotImplementedError else: in_size = args.img_size in_ch = 3 stn = Asic( in_ch, in_size, mf=args.channel_multiplier, bilinear=args.bilinear, padding_mode=args.padding_mode, use_tanh=args.use_tanh).to(device) if args.stn_ema: t_ema = Asic( in_ch, in_size, mf=args.channel_multiplier, bilinear=args.bilinear, padding_mode=args.padding_mode).to(device) accumulate(t_ema, stn, 0) else: t_ema = stn if args.mask_weight > 0: num_ch = 4 else: num_ch = 3 if args.use_mlp: canon = CanonicalMLP( input_dim=2, output_dim=num_ch, hidden_dim=args.mlp_hidden_dim, skip_layers=args.mlp_skip_layers, num_layers=args.mlp_num_layers, resolution=args.canon_size).to(device) else: canon = Canonical((1, num_ch, args.canon_size, args.canon_size), clamp=args.clamp).to(device) if args.canon_ema: if args.use_mlp: c_ema = CanonicalMLP( input_dim=2, output_dim=num_ch, hidden_dim=args.mlp_hidden_dim, skip_layers=args.mlp_skip_layers, num_layers=args.mlp_num_layers, resolution=args.canon_size).to(device) else: c_ema = Canonical((1, num_ch, args.canon_size, args.canon_size), clamp=args.clamp).to(device) accumulate(c_ema, canon, 0) else: c_ema = canon # Setup the perceptual loss function: loss_fn = get_perceptual_loss(args.loss_fn, device) if args.nbb_weight > 0.: nbb_loss_fn = LossCorrsSparse(flow_size=in_size, T=args.sparse_temp) nbb_loss_fn = nbb_loss_fn.to(device) else: nbb_loss_fn = None if args.canon_lr == 0: requires_grad(canon, False) canon_optim = None canon_sched = None else: canon_optim = optim.Adam(canon.parameters(), lr=args.canon_lr, betas=(0.9, 0.999), eps=1e-8) canon_sched = DecayingCosineAnnealingWarmRestarts( canon_optim, T_0=1, T_mult=args.tm, decay=args.decay) if primary(): print(f"{count_parameters(stn)} parameters in STN") print(f"{count_parameters(canon)} parameters in Canonical") # Setup optimizers and learning rate schedulers: t_optim = optim.Adam(stn.parameters(), lr=args.stn_lr, betas=(0.9, 0.999), eps=1e-8) t_sched = DecayingCosineAnnealingWarmRestarts( t_optim, T_0=1, T_mult=args.tm, decay=args.decay) # See if the start iteration can be recovered when resuming training: args.start_iter = 0 # Load pre-trained generator (and optionally resume from a GANgealing checkpoint): ckpt_path = Path(args.results) / args.exp_name / 'checkpoint.pt' try: print(f"Loading model from {ckpt_path}") ckpt = torch.load(ckpt_path) canon.load_state_dict(ckpt["canon"]) c_ema.load_state_dict(ckpt["c_ema"]) stn.load_state_dict(ckpt["t"]) t_ema.load_state_dict(ckpt["t_ema"]) t_optim.load_state_dict(ckpt["t_optim"]) t_sched.load_state_dict(ckpt["t_sched"]) if canon_optim is not None: canon_optim.load_state_dict(ckpt["canon_optim"]) if canon_optim is not None: canon_sched.load_state_dict(ckpt["canon_sched"]) args.start_iter = ckpt['iter'] print(f"Checkpoint found. Resuming from {args.start_iter} iterations") except FileNotFoundError: print("No checkpoint found. Training from scratch.") except KeyError: raise Exception # Move models to DDP if distributed training is enabled: if args.distributed: local_rank = int(os.environ["LOCAL_RANK"]) stn = nn.parallel.DistributedDataParallel( stn, device_ids=[local_rank], output_device=local_rank, broadcast_buffers=False) canon = nn.parallel.DistributedDataParallel( canon, device_ids=[local_rank], output_device=local_rank, broadcast_buffers=False) # Setup data if args.dset.lower() == 'folder': interim_dir = Path(args.img_dir).stem flow_dir = Path(args.flow_dir) / interim_dir / f'{args.bb}_s{args.bb_stride}'
def save_state_dict(ckpt_name, c_module, t_module, c_ema, t_ema, canon_optim, canon_sched, t_optim, t_sched, args, step, add_step_to_name=False): ckpt_dict = { "canon": c_module.state_dict(), "t": t_module.state_dict(), "c_ema": c_ema.state_dict(), "t_ema": t_ema.state_dict(), "t_optim": t_optim.state_dict(), "t_sched": t_sched.state_dict(), "canon_optim": canon_optim.state_dict() if canon_optim is not None else None, "canon_sched": canon_sched.state_dict() if canon_sched is not None else None, "args": args, "iter": step } torch.save(ckpt_dict, f'{results_path}/{ckpt_name}.pt') if add_step_to_name: torch.save(ckpt_dict, f'{results_path}/{ckpt_name}_{step:07d}.pt') def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) def base_training_argparse(): parser = argparse.ArgumentParser(description="Training") # Main training arguments: parser.add_argument("--exp-name", type=str, required=True, help="Name for experiment run (used for logging)") parser.add_argument("--results", type=str, default='logs', help='path to the results directory') parser.add_argument("--seed", default=0, type=int, help='Random seed for this experiment') parser.add_argument("--dset", type=str, default='cub', choices=["cub", "spair"]) parser.add_argument("--img_dir", type=str, required=True, help="Path to real data") parser.add_argument("--flow_dir", type=str, default='processed_data', help="Path to preprocessed flows") parser.add_argument("--mask_threshold", type=int, default=1, help="Threshold for masking") parser.add_argument("--mask_bbox_pad", type=int, default=4, help="Crop with some padding") parser.add_argument("--img_size", default=256, type=int, help='resolution of real images') parser.add_argument("--iter", type=int, default=20000, help="total training iterations") parser.add_argument("--batch", type=int, default=20, help="batch size per-GPU") parser.add_argument("--num_workers", type=int, default=2, help="num workers for dataloader") # Dataset hyperparameters: parser.add_argument("--cub_idx", type=int, default=1, help="cub category") parser.add_argument("--split", default='test', choices=['test', 'val'], help='splits for training and validation') parser.add_argument("--use_coseg_masks", action='store_true') parser.add_argument("--num_parts", default=4, type=int) parser.add_argument("--spair_cat", default='cat', help="cub category") # Loss hyperparameters: parser.add_argument("--loss_fn", type=str, default='vgg_ssl', choices=['lpips', 'vgg_ssl'], help="The perceptual loss to use.") parser.add_argument("--rec_weight", type=float, default=1., help='weight for reconstruction loss') parser.add_argument("--nbb_weight", type=float, default=30., help='weight for nbb loss') parser.add_argument("--flow_tv_weight", default=15000.0, type=float, help="""Loss weighting of the Total Variation smoothness regularizer on the residual flow""") parser.add_argument("--equi_weight", default=1.0, type=float, help='Loss weighting for equivariance') parser.add_argument("--sparse_topk", type=int, default=None, help='number of sparse correspondences for loss') parser.add_argument("--sparse_temp", type=float, default=1, help='temperature for sparse loss') parser.add_argument("--mask_weight", default=0.1, type=float, help="""Loss weighting of the mask""") parser.add_argument("--parts_weight", default=10.0, type=float, help="""Loss weighting of the Parts Mask""") parser.add_argument("--use_nbb_parts", action='store_true') # Augmentation hyperparameters parser.add_argument("--jitter", default=[0.4, 0.4, 0.2, 0.1], type=float, nargs='+', help='augmentation mode') parser.add_argument("--jitter_prob", default=0.8, type=float) parser.add_argument("--gray_prob", default=0.2, type=float) parser.add_argument("--solar_prob", default=0.2, type=float) parser.add_argument("--tps_scale", default=0.4, type=float) # Canonical space parser.add_argument("--unwarp_size", type=int, default=128, help="resolution for unwarping") # Learned Grid hyperparameters parser.add_argument("--canon_size", type=int, default=256, help="resolution of canonical space") parser.add_argument("--clamp", action='store_true', help="clamp values of canonical space (-1, 1)") # MLP Hyperparams parser.add_argument("--use_mlp", action='store_true') parser.add_argument("--mlp_hidden_dim", type=int, default=256, help="number of hidden units per layer") parser.add_argument("--mlp_num_layers", type=int, default=8, help="number of layers") parser.add_argument("--mlp_skip_layers", type=int, nargs='+', default=[4, 7], help="skip layers") # Model hyperparameters: parser.add_argument("--canon_lr", type=float, default=0.003, help="base learning rate of canonical space") parser.add_argument("--canon_ema", action='store_true', help='Enable ema for canonical space') parser.add_argument("--stn_ema", action='store_true', help='Enable ema for canonical space') parser.add_argument("--stn_lr", type=float, default=0.003, help="base learning rate of SpatialTransformer") parser.add_argument("--flow_ssl", action='store_true', help="""If specified, apply STN on SSL features)""") parser.add_argument("--channel_multiplier", default=0.5, type=float, help='channel multiplier for smaller models') parser.add_argument("--bilinear", action='store_true', help='Apply bilinear upsample/downsample') parser.add_argument("--padding_mode", default='border', choices=['border', 'zeros', 'reflection'], type=str, help="""Padding algorithm for when the STN samples beyond image boundaries""") parser.add_argument("--use_tanh", action='store_true', help='Use tanh activation at the flow output') parser.add_argument("--disable_tps", action='store_true', help='disable tps transformations') # Backbone parameters parser.add_argument("--bb", default='dino_vits8', choices=['dino_vits8', 'dino_vits16', 'dino_vitb8', 'dino_vitb16', 'vit_small_patch8_224', 'vit_small_patch16_224', 'vit_base_patch16_224'], help='backbone models') parser.add_argument('--bb_stride', default=2, type=int, help="stride.") # Visualization hyperparameters: parser.add_argument("--vis_every", type=int, default=500, help="""frequency with which visualizations are generated during training""") parser.add_argument("--vis_denseres", type=int, default=32, help='number of sparse correspondences to visualize') parser.add_argument("--ckpt_every", type=int, default=10000, help='frequency of checkpointing during training') parser.add_argument("--log_every", default=25, type=int, help='How frequently to log data to TensorBoard') parser.add_argument("--n_sample", type=int, default=4, help="""number of images (real and fake) to generate visuals for""") parser.add_argument("--disable_wandb", action='store_true', help='Disable wandb for debugging') # Learning Rate scheduler hyperparameters: parser.add_argument("--period", default=10000, type=float, help="""Period for cosine learning rate scheduler (measured in gradient steps)""") parser.add_argument("--decay", default=0.9, type=float, help="""Decay factor for the cosine learning rate scheduler""") parser.add_argument("--tm", default=2, type=int, help="""Period multiplier for the cosine learning rate scheduler""") return parser def train(args, train_dset, canon, stn, c_ema, t_ema, canon_optim, canon_sched, t_optim, t_sched, loss_fn, nbb_loss_fn, device, writer): # Record modules to make saving checkpoints easier: if args.distributed: t_module = stn.module c_module = canon.module else: t_module = stn c_module = canon # Initialize Spatial Transformation Generator (Thin Plate Spline) aug = Augmentor(jitter=args.jitter, jitter_prob=args.jitter_prob, gray_prob=args.gray_prob, solar_prob=args.solar_prob, tps_scale=args.tps_scale).to(device) # A model checkpoint will be saved whenever the learning rate is zero: zero_lr_iters = lr_cycle_iters(0, args.period, args.iter, args.tm) early_ckpt_iters = set(zero_lr_iters) early_vis_iters = {100} early_vis_iters.update(early_ckpt_iters) # Initialize various training variables and constants: rec_loss = torch.tensor(0.0, device='cuda') flow_tv_loss = torch.tensor(0.0, device='cuda') nbb_loss = torch.tensor(0.0, device='cuda') equi_loss = torch.tensor(0.0, device='cuda') mask_loss = torch.tensor(0.0, device='cuda') parts_loss = torch.tensor(0.0, device='cuda') accum = 0.5 ** (32 / (10 * 1000)) # Resize function for perceptual loss if args.unwarp_size != args.img_size: scale_factor = args.unwarp_size / args.img_size resize_fn = nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=True) else: resize_fn = nn.Identity() # Pre-load on GPU # Assuming ~30 images of size 256x256, takes up ~23 MB device memory has_gt_kp = train_dset.kps is not None all_imgs = train_dset.imgs = train_dset.imgs.to(device) # / 127.5 - 1.0 all_masks = train_dset.masks = train_dset.masks.unsqueeze(1).to(device) all_parts = train_dset.parts = train_dset.parts.to(device) if has_gt_kp: all_kps = train_dset.kps = train_dset.kps.to(device) # Pseudo GT pseudo_kps = train_dset.pseudo_kps = torch.from_numpy(train_dset.pseudo_kps).to(device) num_parts = train_dset.num_parts loss_topk = pseudo_kps.shape[2] if args.sparse_topk is None else min(args.sparse_topk, pseudo_kps.shape[2]) # Progress bar for monitoring training: pbar = range(args.start_iter, args.iter) if primary(): pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.2) pck_pairs, pck_cycles = log_visuals( c_ema, t_ema, train_dset, 0, writer, vis_sample=args.n_sample, vis_denseres=args.vis_denseres) best_pck_pairs = pck_pairs best_pck_cycles = pck_cycles requires_grad(stn, True) requires_grad(canon, True) for idx in pbar: # main training loop i = idx + args.start_iter + 1 #################################### # TRAIN STN and CANON # #################################### N = args.batch pairs = sample_tuples(len(train_dset), count=N // 2) src_idx, trg_idx = pairs[:, 0], pairs[:, 1] all_idx = np.concatenate([src_idx, trg_idx]) batch_imgs = all_imgs[all_idx] batch_parts = all_parts[all_idx] if args.use_nbb_parts: batch_masks = (batch_parts != num_parts).unsqueeze(1).float() batch_masks_resized = resize_fn(batch_masks) else: batch_masks = all_masks[all_idx] batch_masks_resized = resize_fn(batch_masks) kp1 = pseudo_kps[src_idx, trg_idx][:, :loss_topk] # (N/2, K, 4) kp2 = pseudo_kps[trg_idx, src_idx][:, :loss_topk] # (N/2, K, 4) batch_kps_vis = kp1[..., 2] > 0 # (N/2, K) batch_kps_wt = torch.ones_like(batch_kps_vis).float() # (N/2, K) batch_kps = torch.cat([kp1, kp2])[..., :2] # (N, K, 2) if args.use_nbb_parts: nbb_parts_vis = (kp1[..., 3] != args.num_parts) * (kp2[..., 3] != args.num_parts) batch_kps_wt *= nbb_parts_vis # Map the images to the canonical space flow, delta_flow = stn(batch_imgs) unwarped = canon.unwarp(flow, args.unwarp_size) # NBB weight if args.nbb_weight > 0.: nbb_loss = nbb_loss_fn(flow[:N//2], flow[N//2:], batch_kps[:N//2], batch_kps[N//2:], batch_kps_vis, batch_kps_wt) if args.equi_weight > 0.: # Apply tps transformations if args.disable_tps: batch_imgs_t = aug.forward_geom(aug.forward_color(batch_imgs)) batch_masks_t = aug.forward_geom(batch_masks, fixed=True) # Apply tps to flow flow_tf = aug.forward_geom(flow.permute(0, 3, 1, 2), fixed=True).permute(0, 2, 3, 1) else: batch_imgs_t = aug.forward_tps(aug.forward_color(batch_imgs)) batch_masks_t = aug.forward_tps(batch_masks, fixed=True) # Apply tps to flow flow_tf = aug.forward_tps(flow.permute(0, 3, 1, 2), fixed=True).permute(0, 2, 3, 1) batch_masks_t = torch.where(batch_masks_t > 0.5, 1., 0.) batch_masks_t_resized = resize_fn(batch_masks_t) vis = batch_masks_t * batch_masks # Flow of tps image flow_ft, _ = stn(batch_imgs_t) unwarped_ft = canon.unwarp(flow_ft, args.unwarp_size) equi_loss = F.l1_loss(flow_ft, flow_tf.detach(), reduction='none') \ + F.l1_loss(flow_tf, flow_ft.detach(), reduction='none') equi_loss = (equi_loss * vis.squeeze(1).unsqueeze(-1)).mean() if args.mask_weight > 0: unwarped_mask = unwarped[:, [3]] mask_loss = F.binary_cross_entropy_with_logits(unwarped_mask, batch_masks_resized) if args.equi_weight > 0.: unwarped_ft_mask = unwarped_ft[:, [3]] mask_loss = 0.5 * mask_loss + \ 0.5 * F.binary_cross_entropy_with_logits( unwarped_ft_mask, batch_masks_t_resized) # Get Total Variation Loss on flow if args.flow_tv_weight > 0: flow_tv_loss = total_variation_loss(delta_flow) # Reconstruction loss if args.rec_weight > 0: unwarped = unwarped * batch_masks_resized resized_img = resize_fn(batch_imgs) * batch_masks_resized rec_loss = loss_fn(unwarped[:, :3], resized_img).mean() if args.equi_weight > 0.: unwarped_ft = unwarped_ft * batch_masks_t_resized resized_img = resize_fn(batch_imgs_t) * batch_masks_t_resized rec_loss = 0.5*rec_loss + 0.5 * loss_fn(unwarped_ft[:, :3], resized_img).mean() # Parts Loss if args.parts_weight > 0.: # Calculate the centroid of each part part_centroids = torch.zeros(num_parts+1, 2, dtype=torch.float, device=device) part_centroids.index_add_(0, batch_parts.reshape(-1), flow.reshape(-1, 2)) part_counts = torch.bincount(batch_parts.reshape(-1)).float() part_centroids = (part_centroids/part_counts.unsqueeze(-1)).detach() # Compute the loss as the distance of the centroid from the flows parts_loss = F.l1_loss(flow, part_centroids[batch_parts], reduction='none') parts_loss = (parts_loss * batch_masks.squeeze(1).unsqueeze(-1)).mean() loss_dict = {"p": rec_loss, "ftv": flow_tv_loss, "nbb": nbb_loss, "equi": equi_loss, "mask": mask_loss, 'parts': parts_loss} canon.zero_grad() stn.zero_grad() full_stn_loss = args.rec_weight * rec_loss + \ args.flow_tv_weight * flow_tv_loss + \ args.nbb_weight * nbb_loss + args.equi_weight * equi_loss + \ args.mask_weight * mask_loss + args.parts_weight * parts_loss full_stn_loss.backward() t_optim.step() epoch = max(0, i / args.period) t_sched.step(epoch) if args.canon_lr > 0: canon_optim.step() canon_sched.step(epoch) if args.stn_ema: accumulate(t_ema, t_module, accum) if args.canon_ema: accumulate(c_ema, c_module, accum) # Aggregate loss information across GPUs loss_reduced = reduce_loss_dict(loss_dict) if primary(): # Display losses on the progress bar: perceptual_loss_val = loss_reduced["p"].mean().item() flow_tv_loss_val = loss_reduced["ftv"].mean().item() nbb_loss_val = loss_reduced["nbb"].mean().item() equi_loss_val = loss_reduced["equi"].mean().item() mask_loss_val = loss_reduced["mask"].mean().item() parts_loss_val = loss_reduced["parts"].mean().item() p_str = f"rec: {perceptual_loss_val:.4f}; " \ if args.rec_weight > 0 else "" ftv_str = f"ftv: {flow_tv_loss_val:.6f}; " \ if args.flow_tv_weight > 0 else "" nbb_str = f"nbb: {nbb_loss_val:.6f}; " \ if args.nbb_weight > 0 else "" equi_str = f"equi: {equi_loss_val:.6f}; " \ if args.equi_weight > 0 else "" mask_str = f"mask: {mask_loss_val:.6f}; " \ if args.mask_weight > 0 else "" parts_str = f"parts: {parts_loss_val:.6f}; " \ if args.parts_weight > 0 else "" pbar.set_description( f"{p_str}{nbb_str}{equi_str}{mask_str}{ftv_str}{parts_str}") # Log losses and others metrics to TensorBoard: if i % args.log_every == 0 or i in early_ckpt_iters or i == 1: writer.add_scalars('', { 'Loss/Full': full_stn_loss.item(), 'Loss/Reconstruction': perceptual_loss_val, 'Loss/TotalVariation': flow_tv_loss_val, 'Loss/NBB': nbb_loss_val, 'Loss/Equi': equi_loss_val, 'Loss/Mask': mask_loss_val, 'Loss/Parts': parts_loss_val, 'Progress/STN_LearningRate': t_sched.get_last_lr()[0], 'Progress/Canon_LearningRate': canon_sched.get_last_lr()[0] if args.canon_lr > 0 else 0. }, i) if (i % args.ckpt_every == 0 or i in early_ckpt_iters): save_state_dict( 'checkpoint', c_module, t_module, c_ema, t_ema, canon_optim, canon_sched, t_optim, t_sched, args, i, True) if i % args.vis_every == 0 or i in early_vis_iters or i == 1: # Save visualizations to Tens orBoard if i in early_ckpt_iters: pbar.write(f'{i:07}: LR = {t_sched.get_last_lr()[0]}') pck_pairs, pck_cycles = log_visuals( c_ema, t_ema, train_dset, i, writer, vis_sample=args.n_sample, vis_denseres=args.vis_denseres) if has_gt_kp and best_pck_cycles[2][0] < pck_cycles[2][0]: best_pck_pairs = pck_pairs for k, pck_cycle in enumerate(pck_cycles): best_pck_cycles[k] = pck_cycle save_state_dict( 'best', c_module, t_module, c_ema, t_ema, canon_optim, canon_sched, t_optim, t_sched, args, i) pck_summary = {} if has_gt_kp: pck_summary.update({ 'Progress/[email protected]': pck_pairs[0] * 100, 'Progress/[email protected]': pck_pairs[-1] * 100, 'Progress/[email protected]': best_pck_pairs[0] * 100, 'Progress/[email protected]': best_pck_pairs[-1] * 100, }) for k, pck_cycle in enumerate(pck_cycles): pck_summary[f'Progress/{k+2}[email protected]'] = pck_cycle[0] * 100 pck_summary[f'Progress/{k+2}[email protected]'] = pck_cycle[-1] * 100 if has_gt_kp: pck_summary[f'Progress/Best{k+2}[email protected]'] = best_pck_cycles[k][0] * 100 pck_summary[f'Progress/Best{k+2}[email protected]'] = best_pck_cycles[k][-1] * 100 writer.add_scalars('', pck_summary, i) if __name__ == "__main__": device = "cuda" parser = base_training_argparse() args = parser.parse_args() # Setup distributed PyTorch and create results directory: args.distributed = setup_distributed() results_path = os.path.join(args.results, args.exp_name) if primary(): # exp_id = hashlib.md5(args.exp_name.encode('utf-8')).hexdigest() use_wandb = not args.disable_wandb if use_wandb: wandb.init(project="asic", entity="kampta", name=args.exp_name, reinit=True) wandb.config.update(args) writer = Logger(results_path, log_to_wandb=use_wandb) with open(f'{results_path}/opt.txt', 'w') as f: json.dump(args.__dict__, f, indent=2) else: writer = None # Seed RNG: torch.manual_seed(args.seed * get_world_size() + get_rank()) np.random.seed(args.seed * get_world_size() + get_rank()) # UNet output is same size as input by default # When input are SSL features, we want to upsample # the flow when loss is computed in the image space # not upsammple the flow when loss is computed in the # SSL featuremap space # Initialize U-Net for regressing flow if args.flow_ssl: # in_size = extractor.num_patches # in_ch = extractor.feat_dim # TODO: read from the file and modfiy accordingly raise NotImplementedError else: in_size = args.img_size in_ch = 3 stn = Asic( in_ch, in_size, mf=args.channel_multiplier, bilinear=args.bilinear, padding_mode=args.padding_mode, use_tanh=args.use_tanh).to(device) if args.stn_ema: t_ema = Asic( in_ch, in_size, mf=args.channel_multiplier, bilinear=args.bilinear, padding_mode=args.padding_mode).to(device) accumulate(t_ema, stn, 0) else: t_ema = stn if args.mask_weight > 0: num_ch = 4 else: num_ch = 3 if args.use_mlp: canon = CanonicalMLP( input_dim=2, output_dim=num_ch, hidden_dim=args.mlp_hidden_dim, skip_layers=args.mlp_skip_layers, num_layers=args.mlp_num_layers, resolution=args.canon_size).to(device) else: canon = Canonical((1, num_ch, args.canon_size, args.canon_size), clamp=args.clamp).to(device) if args.canon_ema: if args.use_mlp: c_ema = CanonicalMLP( input_dim=2, output_dim=num_ch, hidden_dim=args.mlp_hidden_dim, skip_layers=args.mlp_skip_layers, num_layers=args.mlp_num_layers, resolution=args.canon_size).to(device) else: c_ema = Canonical((1, num_ch, args.canon_size, args.canon_size), clamp=args.clamp).to(device) accumulate(c_ema, canon, 0) else: c_ema = canon # Setup the perceptual loss function: loss_fn = get_perceptual_loss(args.loss_fn, device) if args.nbb_weight > 0.: nbb_loss_fn = LossCorrsSparse(flow_size=in_size, T=args.sparse_temp) nbb_loss_fn = nbb_loss_fn.to(device) else: nbb_loss_fn = None if args.canon_lr == 0: requires_grad(canon, False) canon_optim = None canon_sched = None else: canon_optim = optim.Adam(canon.parameters(), lr=args.canon_lr, betas=(0.9, 0.999), eps=1e-8) canon_sched = DecayingCosineAnnealingWarmRestarts( canon_optim, T_0=1, T_mult=args.tm, decay=args.decay) if primary(): print(f"{count_parameters(stn)} parameters in STN") print(f"{count_parameters(canon)} parameters in Canonical") # Setup optimizers and learning rate schedulers: t_optim = optim.Adam(stn.parameters(), lr=args.stn_lr, betas=(0.9, 0.999), eps=1e-8) t_sched = DecayingCosineAnnealingWarmRestarts( t_optim, T_0=1, T_mult=args.tm, decay=args.decay) # See if the start iteration can be recovered when resuming training: args.start_iter = 0 # Load pre-trained generator (and optionally resume from a GANgealing checkpoint): ckpt_path = Path(args.results) / args.exp_name / 'checkpoint.pt' try: print(f"Loading model from {ckpt_path}") ckpt = torch.load(ckpt_path) canon.load_state_dict(ckpt["canon"]) c_ema.load_state_dict(ckpt["c_ema"]) stn.load_state_dict(ckpt["t"]) t_ema.load_state_dict(ckpt["t_ema"]) t_optim.load_state_dict(ckpt["t_optim"]) t_sched.load_state_dict(ckpt["t_sched"]) if canon_optim is not None: canon_optim.load_state_dict(ckpt["canon_optim"]) if canon_optim is not None: canon_sched.load_state_dict(ckpt["canon_sched"]) args.start_iter = ckpt['iter'] print(f"Checkpoint found. Resuming from {args.start_iter} iterations") except FileNotFoundError: print("No checkpoint found. Training from scratch.") except KeyError: raise Exception # Move models to DDP if distributed training is enabled: if args.distributed: local_rank = int(os.environ["LOCAL_RANK"]) stn = nn.parallel.DistributedDataParallel( stn, device_ids=[local_rank], output_device=local_rank, broadcast_buffers=False) canon = nn.parallel.DistributedDataParallel( canon, device_ids=[local_rank], output_device=local_rank, broadcast_buffers=False) # Setup data if args.dset.lower() == 'folder': interim_dir = Path(args.img_dir).stem flow_dir = Path(args.flow_dir) / interim_dir / f'{args.bb}_s{args.bb_stride}'
train_dset = InMemoryDataset(
9
2023-11-14 16:43:16+00:00
24k
atlantic-quantum/Shipyard
tests/printers/visualizer/test_visualize_pulse_sequences.py
[ { "identifier": "CoreType", "path": "shipyard/awg_core/awg_core.py", "snippet": "class CoreType(Enum):\n \"\"\"Enumeration of AWG Core types\"\"\"\n\n HD = \"HD\"\n QA = \"QA\"\n SG = \"SG\"" }, { "identifier": "ActivationRecord", "path": "shipyard/call_stack.py", "snippet": "class ActivationRecord:\n \"\"\"Activation Records for shipyard\"\"\"\n\n def __init__(\n self,\n name: str,\n ar_type: ARType,\n nesting_level: int,\n ):\n self.name = name\n self.type = ar_type\n self.nesting_level = nesting_level\n self.members = {}\n\n def __setitem__(self, key, value):\n self.members[key] = value\n LOGGER.debug(\"%s: %s\", key, value)\n\n def __getitem__(self, key):\n return self.members[key]\n\n def get(self, key, default=None):\n \"\"\"Gets a member of the activation record by key\"\"\"\n return self.members.get(key, default)\n\n def __str__(self):\n lines = [f\"{self.nesting_level}: {self.type.value} {self.name}\"]\n for name, val in self.members.items():\n lines.append(f\" {name:<20}: {val}\")\n\n return \"\\n\".join(lines)\n\n def __repr__(self):\n return self.__str__()" }, { "identifier": "ARType", "path": "shipyard/call_stack.py", "snippet": "class ARType(Enum):\n \"\"\"\n Enumeration of Acivation Record Types\n \"\"\"\n\n PROGRAM = \"PROGRAM\"\n EXTERN = \"EXTERN\"\n SUBROUTINE = \"SUBROUTINE\"\n CALIBRATION = \"CALIBRATION\"\n DEFCAL = \"DEFCAL\"\n GATE = \"GATE\"\n LOOP = \"LOOP\"" }, { "identifier": "Compiler", "path": "shipyard/compiler.py", "snippet": "class Compiler:\n version = \"0.1.1\"\n \"\"\"\n Compiler to compile openQASM programs to target programs for different AWG Cores.\n Currently supports compilation to ZI SEQC cores.\n\n Args:\n program_path (Path):\n Path object pointing to a qasm program file.\n setup (Setup | Path):\n Path object pointing to a experiment setup json file.\n frames_from_setup (bool, optional):\n If True, frame definitions and port declarations are generated from setup.\n If False, frame definitions and port declarations should be written\n explicitly in the qasm program.\n Defaults to False to preserve original behavior.\n \"\"\"\n\n def __init__(\n self,\n program_path: Path,\n setup: Setup | Path,\n frames_from_setup: bool = False,\n ) -> None:\n self.program_path = program_path\n self.program = CopyTransformer().visit_Program(self.load_program(program_path))\n setup = setup if isinstance(setup, Setup) else Setup.from_file(setup)\n if frames_from_setup:\n self._frames_from_setup(setup)\n self.setup = setup.to_internal()\n self.split_programs: dict[tuple[str, int, str], ast.Program] = {}\n self.split_compiled: dict[tuple[str, int, str], str] = {}\n self.core_settings: dict[tuple[str, int, str], list[tuple[str], Any]] = {}\n self.wfm_mapping: dict[tuple[str, int, str], dict[int, str]] = {}\n\n @staticmethod\n @lru_cache()\n def load_program(path: Path) -> ast.Program:\n \"\"\"\n Loads a qasm program as an AST from a file\n\n Args:\n path (Path): path to the qasm program file\n\n Returns:\n ast.Program: qasm program as an AST\n \"\"\"\n with open(path, encoding=\"utf_8\") as qasm_file:\n qasm_code = qasm_file.read()\n return parse(qasm_code)\n\n def compile(\n self,\n inputs: dict = None,\n printer_kwargs: dict = None,\n waveforms: dict[str, ndarray] | None = None,\n command_tables: dict[tuple[str, int, str], CommandTable] | None = None,\n ):\n \"\"\"\n Compile a single openQASM program into multiple programs for each\n AWG core in the setup\n\n Args:\n inputs (dict, optional):\n Dictionary of input values for the program. Defaults to None.\n Used to resolve input declarations in the program.\n printer_kwargs (dict, optional):\n Dictionary of keyword arguments to pass to the printer.\n See the printer documentation for more details.\n \"\"\"\n ResolveIODeclaration(inputs).visit(self.program)\n IncludeAnalyzer(self.program_path).visit(self.program)\n IncludeWaveforms(waveforms).visit(self.program)\n SemanticAnalyzer().visit(self.program)\n DurationTransformer().visit(self.program)\n TimingConstraints(self.setup, external_zi_function_dict()).visit(self.program)\n max_delay_obj = DetermineMaxDelay(\n self.program, self.setup, external_zi_function_dict()\n )\n extractor_obj = ShotsExtractor()\n extractor_obj.visit(self.program)\n signature = extractor_obj.create_signature()\n printer_kwargs = printer_kwargs or {}\n for instr, core_index, core_type in self.setup.cores():\n if command_tables:\n command_table = command_tables.get((instr, core_index, core_type))\n else:\n command_table = None\n ports = ports_for_core(self.setup, instr, core_index)\n split_program = CoreSplitter(ports).visit_Program(self.program)\n LOGGER.debug(\n \"Split Program before removing unused, core: (%s, %i, %s):\",\n instr,\n core_index,\n core_type,\n )\n LOGGER.debug(\"\\n%s\", LazyRepr(qasm_dumps, [split_program]))\n for repetition in [\"1st pass\", \"2nd pass\"]:\n RemoveUnused(split_program)\n LOGGER.debug(\n \"Split Program after removing unused (%s), core: (%s, %i, %s):\",\n repetition,\n instr,\n core_index,\n core_type,\n )\n LOGGER.debug(\"\\n%s\", LazyRepr(qasm_dumps, [split_program]))\n self.split_programs[(instr, core_index, core_type)] = split_program\n # todo dynamically choose printer based on instrument type\n InsertCTWaveforms(command_table).visit(split_program)\n printer = SEQCPrinter(\n io.StringIO(),\n self.setup,\n signature,\n max_delay_obj.result(),\n **printer_kwargs\n )\n printer.visit(split_program)\n compiled = printer.stream.getvalue()\n LOGGER.debug(\n \"Compiled Program, core: core: (%s, %i, %s):\",\n instr,\n core_index,\n core_type,\n )\n LOGGER.debug(\"\\n%s\", compiled)\n self.split_compiled[(instr, core_index, core_type)] = compiled\n self.core_settings[(instr, core_index, core_type)] = printer.settings()\n self.wfm_mapping[(instr, core_index, core_type)] = printer.wfm_mapping()\n\n @lru_cache()\n @staticmethod\n def cached_compile(\n program_path: Path,\n setup: Setup | Path,\n inputs: dict | None = None,\n printer_kwargs: dict | None = None,\n frames_from_setup: bool = False,\n ) -> \"Compiler\":\n \"\"\"Method to compile a program and cache the result.\n\n Args:\n program_path (Path):\n path to the qasm program file\n setup (Setup | Path):\n path to the laboratory setup file\n inputs (dict | None, optional):\n dictionary of input values for the program,\n used to resolve input declarations. Defaults to None.\n printer_kwargs (dict | None, optional):\n Dictionary of kwarg arguments to pass to the printer,\n see printer documentation for details. Defaults to None.\n frames_from_setup (bool, optional):\n If True, frame definitions and port declarations are generated from\n setup.\n If False, frame definitions and port declarations should be written\n explicitly in the qasm program.\n Defaults to False to preserve original behavior.\n\n Returns:\n Compiler: cached compiler object\n \"\"\"\n compiler = Compiler(program_path, setup, frames_from_setup)\n compiler.compile(inputs, printer_kwargs)\n return compiler\n\n def _frames_from_setup(self, setup: Setup) -> None:\n \"\"\"\n inserts a calibrationStatement after the defcalgrammar statement, the\n calibrationStatement created from the setup file\n\n Args:\n setup_path (Path): path to the setup file\n\n Raises:\n ValueError: if no calibration grammar is defined in the program\n ValueError: if the calibration grammar is not openpulse\n \"\"\"\n # make sure defcalgrammar has been define before inserting setup\n for i, statement in enumerate(self.program.statements):\n if isinstance(statement, ast.CalibrationGrammarDeclaration):\n break\n else:\n raise ValueError(\n \"No calibration grammar defined in program, cannot insert setup.\"\n )\n # make sure defcalgrammar is openpulse\n if statement.name != \"openpulse\":\n raise ValueError(\"calibration grammar be 'openpulse', \")\n # insert cal from setup after defcalgrammar statement\n self.program.statements.insert(i + 1, setup.get_qasm())" }, { "identifier": "Duration", "path": "shipyard/duration.py", "snippet": "class Duration(BaseModel):\n \"\"\"\n pydantic model for managing times/durations in openQASM programs\n\n Durations have both time and unit (ns, us, ms, s) (and dt which represents sample\n time at 2GS/s)\n\n Durations can be added to other Durations or numbers (int, float), they can also\n be compared to one another or to numbers (int, float)\n\n the native max/min python operations work with lists of Durations.\n\n The unit of a Duration can be changed using the 'set_unit' method.\n \"\"\"\n\n # todo consider rounding to nearest ps/fs to avoid floating point errors.\n time: float\n unit: TimeUnits = TimeUnits.dt\n\n def set_unit(self, unit: TimeUnits):\n \"\"\"\n Changes the unit of the Duration and updates the time to be represented in the\n new unit.\n\n Example:\n dur = Duration(time=100, unit=TimeUnits.ns)\n dur.set_unit(TimeUnits.us)\n\n # dur -> Duration(time=0.1, unit=TimeUnits.us)\n \"\"\"\n self.time = self.time * self.unit.value / unit.value\n self.unit = unit\n\n def _real_time(self) -> float:\n \"\"\"Calculates the time in seconds\n\n Returns:\n float: time in seconds\n \"\"\"\n return self.time * self.unit.value\n\n def __add__(self, other): # (self, other: Self) -> Self\n \"\"\"\n Adds Durations together or a number to a Duration\n\n Example (two Durations):\n dur1 = Duration(time=1, unit=TimeUnits.ns)\n dur2 = Duration(time=0.1, unit=TimeUnits.us)\n dur3 = dur1 + dur2 # dur3 -> Duration(time=101, unit=TimeUnits.ns)\n dur4 = dur2 + dur1 # dur3 -> Duration(time=0.101, unit=TimeUnits.us)\n\n Example (Duration and int or float):\n dur1 = Duration(time=1, unit=TimeUnits.ns)\n dur2 = dur1 + 10e-9 # dur2 -> Duration(time=11, unit.TimeUnits.ns)\n\n Args:\n other (Duration | int | float): the Duration or number to add to this\n duration\n\n Raises:\n ValueError: if 'other' is not a Durration, int or float\n\n Returns:\n Duration: sum of this Duration and other\n \"\"\"\n if isinstance(other, Duration):\n return Duration(\n time=self.time + other.time * other.unit.value / self.unit.value,\n unit=self.unit,\n )\n if isinstance(other, (int, float)):\n return Duration(time=self.time + other / self.unit.value, unit=self.unit)\n raise ValueError(f\"'+' not supported between {type(self)} and {type(other)}\")\n\n def __radd__(self, other):\n \"\"\"\n right addition, allows Durations to be added to numbers\n addition of Durations is complimentary\n\n Args:\n other (int | float): number Duration is being added to\n\n Returns:\n Duration: sum of this Duration and other\n \"\"\"\n return self.__add__(other)\n\n def __str__(self) -> str:\n \"\"\"\n Formats how Durations are printed\n Example:\n dur = Duration(time=16, unit=TimeUnits.ns)\n print(dur) -> '16 ns'\n\n Returns:\n str: formated string representation of Duration\n \"\"\"\n return f\"{self.time} {self.unit.name}\"\n\n def __lt__(self, other) -> bool: # (self, other: Self) -> bool:\n \"\"\"\n Compares if this Duration is lower than another Duration, int or Float\n\n Example:\n dur1 = Duration(time=1, unit=TimeUnits.ns)\n dur2 = Duration(time=0.1, unit=TimeUnits.us)\n\n dur1 < dur2 -> True\n dur < 2 -> False\n dur < 0.1 -> False\n\n Args:\n other (Duration | int | float): to compare to\n\n Raises:\n ValueError: if other is not a Duration, int or float\n\n Returns:\n bool:\n True if _real_time() value of this duration is lower than other,\n else False.\n \"\"\"\n if isinstance(other, Duration):\n return self._real_time() < other._real_time()\n if isinstance(other, (int, float)):\n return self._real_time() < other\n raise ValueError(f\"'<' not supported between {type(self)} and {type(other)}\")\n\n def __gt__(self, other) -> bool: # (self, other: Self) -> bool:\n \"\"\"\n Compares if this Duration is greater than another Duration, int or Float\n\n Example:\n dur1 = Duration(time=1, unit=TimeUnits.ns)\n dur2 = Duration(time=0.1, unit=TimeUnits.us)\n\n dur1 > dur2 -> False\n dur > 2 -> False\n dur > 0.1e-9 -> True\n\n Args:\n other (Duration | int | float): to compare to\n\n Raises:\n ValueError: if other is not a Duration, int or float\n\n Returns:\n bool:\n True if _real_time() value of this duration is greater than other,\n else False.\n \"\"\"\n if isinstance(other, Duration):\n return self._real_time() > other._real_time()\n if isinstance(other, (int, float)):\n return self._real_time() > other\n raise ValueError(f\"'>' not supported between {type(self)} and {type(other)}\")\n\n def __eq__(self, other) -> bool: # (self, other: Self) -> bool:\n \"\"\"\n Compares if this Duration is equal to another Duration, int or Float\n\n Example:\n dur1 = Duration(time=1, unit=TimeUnits.ns)\n dur2 = Duration(time=0.1, unit=TimeUnits.us)\n\n dur1 == dur2 -> False\n dur1 == dur1 -> True\n dur == 1e-9 -> True\n\n Args:\n other (Duration | int | float): to compare to\n\n Raises:\n ValueError: if other is not a Duration, int or float\n\n Returns:\n bool:\n True if _real_time() value of this duration is equal to other,\n else False.\n \"\"\"\n if isinstance(other, Duration):\n return self._real_time() == other._real_time()\n if isinstance(other, (int, float)):\n return self._real_time() == other\n raise ValueError(f\"'==' not supported between {type(self)} and {type(other)}\")\n\n def __ne__(self, other) -> bool: # (self, other: Self) -> bool:\n \"\"\"\n Compares if this Duration is not equal to another Duration, int or Float\n\n Example:\n dur1 = Duration(time=1, unit=TimeUnits.ns)\n dur2 = Duration(time=0.1, unit=TimeUnits.us)\n\n dur1 != dur2 -> True\n dur1 != dur1 -> False\n dur != 1e-9 -> False\n\n Args:\n other (Duration | int | float): to compare to\n\n Raises:\n ValueError: if other is not a Duration, int or float\n\n Returns:\n bool:\n True if _real_time() value of this duration is equal to other,\n else False.\n \"\"\"\n if isinstance(other, Duration):\n return self._real_time() != other._real_time()\n if isinstance(other, (int, float)):\n return self._real_time() != other\n raise ValueError(f\"'!=' not supported between {type(self)} and {type(other)}\")" }, { "identifier": "TimeUnits", "path": "shipyard/duration.py", "snippet": "class TimeUnits(Enum):\n \"\"\"\n Enumerations of common time units\n ns, µs, us, ms, s\n\n and\n\n dt = 0.5e-9 <- timestep @ 2GS/s\n \"\"\"\n\n dt = 0.5e-9\n ns = 1e-9\n µs = 1e-6\n us = 1e-6\n ms = 1e-3\n s = 1" }, { "identifier": "DurationTransformer", "path": "shipyard/passes/duration_transformer.py", "snippet": "class DurationTransformer(GenericTransformer):\n \"\"\"\n QASM Transformer that transforms DurationLiterals to have units of samples (dt).\n\n Args:\n sample_rate (int):\n the sample rate that DurationLiterals will be transformed to.\n Default value = 2e9\n \"\"\"\n\n def __init__(self, sample_rate: int = 2e9) -> None:\n self.sample_rate = sample_rate\n super().__init__()\n\n # pylint: disable=C0103\n # snake_case naming style\n\n def visit_DurationLiteral(self, node: ast.DurationLiteral) -> ast.DurationLiteral:\n \"\"\"\n DurationLiteral node Transformer. Transforms DurationLiteral nodes from any\n unit to a node with sample units (dt).\n\n Example:\n in: node = ast.DurationLiteral(value=20, unit=ast.TimeUnit.ns)\n\n usage: DurationTransformer().visit(node)\n\n out: ast.DurationLiteral(value=40, unit=ast.TimeUnit.dt)\n\n\n Args:\n node (ast.DurationLiteral):\n DurationLiteral node to transform.\n\n Returns:\n ast.DurationLiteral:\n Tranformed DurationLiteral node with unit set to samples (dt)\n \"\"\"\n if node.unit.name != \"dt\":\n new_node = ast.DurationLiteral(\n value=int(\n round(\n node.value\n * TimeUnitToValue[node.unit.name].value\n * self.sample_rate\n )\n ),\n unit=ast.TimeUnit.dt,\n )\n return new_node\n return node\n\n # pylint: enable=C0103" }, { "identifier": "ResolveIODeclaration", "path": "shipyard/passes/resolve_io_declaration.py", "snippet": "class ResolveIODeclaration(GenericTransformer):\n def __init__(self, inputs: dict = None):\n self.inputs = inputs or {} # e.g. inputs = {\"basis\": 1}\n\n def visit_IODeclaration(self, node: ast.IODeclaration) -> ast.ConstantDeclaration:\n \"\"\"\n IODeclaration node Transformer. Transforms IODeclaration nodes to\n ConstantDeclarations. Searches through ResolveIODeclaration.inputs\n for info to populate the ConstantDeclaration.\n\n Args:\n node (ast.IODeclaration):\n IODeclaration node to transform.\n\n Returns:\n ast.ConstantDeclaration:\n Tranformed ConstantDeclaration node with relevant data (identifier and\n init_expression)\n \"\"\"\n if node.io_identifier == ast.IOKeyword.input:\n if node.identifier.name not in self.inputs:\n raise SetupError(\n ErrorCode.ID_NOT_FOUND,\n message=f\"Input: {node.identifier.name} not found in input\"\n \" dictionary\",\n )\n match node.type:\n case ast.IntType():\n return ast.ConstantDeclaration(\n type=node.type,\n identifier=node.identifier,\n init_expression=ast.IntegerLiteral(\n value=self.inputs[node.identifier.name]\n ),\n )\n case ast.DurationType():\n return ast.ConstantDeclaration(\n type=node.type,\n identifier=node.identifier,\n init_expression=ast.DurationLiteral(\n value=(self.inputs[node.identifier.name] * 1e9),\n unit=ast.TimeUnit.ns,\n ),\n )\n # todo: AQC-311 add support for complex input type\n # case ast.ComplexType():\n # return ast.ConstantDeclaration(\n # type=node.type,\n # identifier=node.identifier,\n # init_expression=ast.BinaryExpression(\n # op= ast.BinaryOperator['+'],\n # lhs=ast.FloatLiteral(\n # value= self.inputs[node.identifier.name].real),\n # rhs=ast.ImaginaryLiteral(\n # value= self.inputs[node.identifier.name].imag))\n # )\n case ast.FloatType():\n return ast.ConstantDeclaration(\n type=node.type,\n identifier=node.identifier,\n init_expression=ast.FloatLiteral(\n value=self.inputs[node.identifier.name]\n ),\n )\n case ast.BoolType():\n return ast.ConstantDeclaration(\n type=node.type,\n identifier=node.identifier,\n init_expression=ast.BooleanLiteral(\n value=self.inputs[node.identifier.name]\n ),\n )\n case ast.BitType():\n if isinstance(self.inputs[node.identifier.name], list):\n return ast.ConstantDeclaration(\n type=node.type,\n identifier=node.identifier,\n init_expression=ast.ArrayLiteral(\n values=[\n ast.IntegerLiteral(value=s)\n for s in self.inputs[node.identifier.name]\n ]\n ),\n )\n elif isinstance(self.inputs[node.identifier.name], int):\n return ast.ConstantDeclaration(\n type=node.type,\n identifier=node.identifier,\n init_expression=ast.IntegerLiteral(\n value=self.inputs[node.identifier.name]\n ),\n )\n else:\n raise SemanticError(\n ErrorCode.INPUT_TYPE_NOT_SUPPORTED,\n message=f\"Input type not supported: {node.type}\",\n )\n case ast.UintType():\n return ast.ConstantDeclaration(\n type=node.type,\n identifier=node.identifier,\n init_expression=ast.IntegerLiteral(\n value=self.inputs[node.identifier.name]\n ),\n )\n case _:\n raise SemanticError(\n ErrorCode.INPUT_TYPE_NOT_SUPPORTED,\n message=f\"Input type not supported: {node.type}\",\n )\n # case ast.ArrayType():\n # return ast.ConstantDeclaration(\n # type=node.type,\n # identifier=node.identifier,\n # init_expression=ast.ArrayLiteral(\n # values = [ast.IntegerLiteral(value=s)\n # for s in self.inputs[node.identifier.name]]),\n # )\n\n # todo: AQC-312 add support for angle input type\n # case ast.AngleType():\n # # return ast.ConstantDeclaration(\n # # type=node.type,\n # # identifier=node.identifier,\n # # init_expression=ast.FloatLiteral(\n # # value = self.inputs[node.identifier.name]),\n # # )\n # todo: AQC-310 add support for stretch input type\n # case ast.StretchType():\n else:\n raise SemanticError(\n ErrorCode.OUTPUT_NOT_SUPPORTED,\n message=f\"Output type not supported: {node}\",\n )" }, { "identifier": "SemanticAnalyzer", "path": "shipyard/passes/semantic_analysis/semantic_analyzer.py", "snippet": "class SemanticAnalyzer(TypeVisitor, LiteralVisitor, GenericVisitor):\n \"\"\"\n QASMVisitor class that peforms semantic analysis on a openQASM Abstract Syntax Tree\n\n usage:\n qasm_ast = openpulse.parse(qasm_program_string)\n sa = SemanticAnalyser()\n sa.visit(qasm_ast)\n \"\"\"\n\n def __init__(self) -> None:\n self.current_scope: ScopedSymbolTable = None\n self._calibration_scope: CalScopedSymbolTable = None\n self._scope_context: ScopeContext = None\n super().__init__()\n\n @property\n def calibration_scope(self) -> CalScopedSymbolTable:\n \"\"\"Getter for the 'calibration_scope' symbol table of a SemanticAnalyser\n instance. Creates and returns an initialised calibration scope on first call.\n Subsequent calls return the same scope.\n\n Returns:\n CalScopedSymbolTable: a scoped symbol table used for symbols declared within\n openpulse syntax (cal & defcal)\n \"\"\"\n if self._calibration_scope is None:\n self.ensure_in_global_scope(ast.Identifier(\"init cal scope\"))\n self._calibration_scope = CalScopedSymbolTable(\n \"cal_scope\", enclosing_scope=self.current_scope, init_cal=True\n )\n return self._calibration_scope\n\n @property\n def scope_context(self) -> ScopeContext:\n \"\"\"Getter for the 'scope_context' property of a SemanticAnalyser instance\"\"\"\n return self._scope_context\n\n @scope_context.setter\n def scope_context(self, value: ScopeContext):\n LOGGER.debug(\"SET SCOPE CONTEXT: %s\", value)\n self._scope_context = value\n\n # pylint: disable=C0103\n # disable snake_case naming style\n # these functions are of the form \"visit_{QASMNode class name}\"\n def visit_Program(self, node: ast.Program) -> None:\n \"\"\"\n Program node visitor,\n creates and enters a global symbol table (global scope),\n visits all other statements in the openQASM program.\n\n Args:\n node (ast.Program):\n openQASM program ast node to visit\n \"\"\"\n global_scope = ScopedSymbolTable(\n scope_name=\"global\",\n enclosing_scope=self.current_scope,\n )\n with self.scope_context_manager(global_scope, ScopeContext.GLOBAL):\n for statement in node.statements:\n self.visit(statement)\n\n def visit_ExternDeclaration(self, node: ast.ExternDeclaration) -> None:\n \"\"\"\n ExternDeclaration node visitor,\n inserts a symbol representing the external function declaration\n into current_scope (symbol table)\n\n Args:\n node (ast.ExternDeclaration):\n openQASM external function declaration ast node to visit\n \"\"\"\n extern_name = node.name.name\n params = [\n ClassicalSymbol(\n name=f\"{extern_name}_arg_{i}\", kind=self.visit(argument.type)\n )\n for i, argument in enumerate(node.arguments)\n ]\n return_type = self.visit(node.return_type) if node.return_type else None\n extern_symbol = ExternSymbol(\n name=extern_name, params=params, return_type=return_type\n )\n self.declare_symbol(extern_symbol)\n\n def visit_SubroutineDefinition(self, node: ast.SubroutineDefinition) -> None:\n \"\"\"\n SubroutineDefinition node visitor, subroutines may only be defined in global\n scope.\n inserts a symbol representing the subroutine definition into current_scope,\n creates and enters a symbol table (local scope) to encapsulate\n the subroutie,\n inserts all the parameters of the subroutine function signature into the\n new symbol table,\n visits all statements within the subroutine.\n\n Args:\n node (ast.SubroutineDefinition):\n openQASM subroutine definition ast node to visit\n \"\"\"\n self.ensure_in_global_scope(node.name)\n return_type = self.visit(node.return_type) if node.return_type else None\n subroutine_symbol = SubroutineSymbol(\n name=node.name.name, return_type=return_type\n )\n\n self.declare_symbol(subroutine_symbol)\n\n subroutine_scope = ScopedSymbolTable(\n scope_name=node.name.name,\n enclosing_scope=self.current_scope,\n )\n\n with self.scope_context_manager(subroutine_scope, ScopeContext.SUBROUTINE):\n for argument in node.arguments:\n arg_symbol = self.visit(argument)\n subroutine_symbol.params.append(arg_symbol)\n\n for statement in node.body:\n self.visit(statement)\n\n def visit_QuantumGateDefinition(self, node: ast.QuantumGateDefinition) -> None:\n \"\"\"\n QuantumGateDefinition node visitor, quantum gates may only be defined in global\n scope.\n inserts a symbol representing the gate definition into current_scope,\n creates and enters a symbol table (local scope) to encapsulate\n the gate,\n inserts all the parameters and qubits of the gate function signature\n into the new symbol table,\n visits all statements within the gate definition.\n\n Args:\n node (ast.QuantumGateDefinition):\n openQASM quantum gate definition ast node to visit\n \"\"\"\n self.ensure_in_global_scope(node.name)\n gate_symbol = GateSymbol(name=node.name.name)\n\n self.declare_symbol(gate_symbol)\n\n gate_scope = ScopedSymbolTable(\n scope_name=gate_symbol.name,\n enclosing_scope=self.current_scope,\n )\n\n with self.scope_context_manager(gate_scope, ScopeContext.SUBROUTINE):\n for argument in node.arguments:\n arg_symbol = Symbol(name=argument.name)\n self.declare_symbol(arg_symbol)\n gate_symbol.params.append(arg_symbol)\n\n for qubit in node.qubits:\n qubit_symbol = QuantumSymbol(name=qubit.name, kind=\"QUBIT\")\n self.declare_symbol(qubit_symbol)\n gate_symbol.qubits.append(qubit_symbol)\n\n for statement in node.body:\n self.visit(statement)\n\n def visit_ClassicalDeclaration(self, node: ast.ClassicalDeclaration) -> None:\n \"\"\"\n ClassicalDeclaration node visitor\n inserts a symbol representing the classical variable into current_scope\n\n Note:\n Arrays cannot be declared inside the body of a function or gate.\n All arrays must be declared within the global scope of the program.\n https://openqasm.com/language/types.html#arrays\n\n Args:\n node (ast.ClassicalDeclaration):\n openQASM classical declaration ast node to visit\n \"\"\"\n if isinstance(node.type, ast.ArrayType):\n self.ensure_in_global_scope(node.identifier)\n type_symbol = self.visit(node.type)\n LOGGER.debug(\n \"Classical Declaration: name: %s, kind: %s\",\n node.identifier.name,\n type_symbol,\n )\n decl_symbol = ClassicalSymbol(name=node.identifier.name, kind=type_symbol)\n self.declare_symbol(decl_symbol)\n\n def visit_ConstantDeclaration(self, node: ast.ConstantDeclaration) -> None:\n \"\"\"\n ConstantDeclaration node visitor\n inserts a symbol representing the constant into current_scope\n\n Args:\n node (ast.ConstantDeclaration):\n openQASM constant declaration ast node to visit\n \"\"\"\n type_symbol = self.visit(node.type)\n decl_symbol = ConstantSymbol(name=node.identifier.name, kind=type_symbol)\n self.declare_symbol(decl_symbol)\n\n def visit_QubitDeclaration(self, node: ast.QubitDeclaration) -> None:\n \"\"\"\n QubitDeclaration node visitor\n inserts a symbol representing the qubit into current_scope\n\n Note:\n All qubits are global variables.\n Qubits cannot be declared within gates or subroutines.\n https://openqasm.com/language/types.html#quantum-types\n\n Args:\n node (ast.QubitDeclaration):\n openQASM qubit declaration ast node to visit\n \"\"\"\n # qubits can only be declared in global scope\n self.ensure_in_global_scope(node.qubit)\n decl_symbol = QuantumSymbol(name=node.qubit.name, kind=\"QUBIT\")\n self.declare_symbol(decl_symbol)\n\n def visit_IODeclaration(self, node: ast.IODeclaration) -> None:\n \"\"\"\n ToDo: may require more / different handling when we start using this\n\n IODeclaration node visitor\n inserts a symbol representing the io into current_scope\n\n input/output modifiers can be used to indicate that variables will be\n supplied to / generated by an openQASM program at runtime\n\n https://openqasm.com/language/directives.html#input-output\n\n Args:\n node (ast.IODeclaration):\n openQASM io declaration ast node to visit\n \"\"\"\n type_symbol = self.visit(node.type)\n decl_symbol = IOSymbol(name=node.identifier.name, kind=type_symbol)\n self.declare_symbol(decl_symbol)\n\n def visit_Identifier(self, node: ast.Identifier):\n \"\"\"\n Identifier node visitor:\n Looks up the name of the identifer within current and enclosing scope,\n raises an ID_NOT_FOUND error if the identifier hasn't been declared\n\n Args:\n node (ast.Identifier):\n openQASM identifier node to visit\n\n Raises:\n SemanticError with ErrorCode.ID_NOT_FOUND\n \"\"\"\n node_symbol = self.current_scope.lookup(node.name)\n if node.name[0] == \"$\":\n pass\n elif node_symbol is None:\n raise self.error(ErrorCode.ID_NOT_FOUND, node.name)\n\n def visit_AliasStatement(self, node: ast.AliasStatement) -> None:\n \"\"\"\n AliastStatement node visitor:\n Creates and declares a symbol for an Alias.\n Then visits the value the alias is assigned\n\n Args:\n node (ast.AliasStatement):\n openQASM alias statment to visit\n \"\"\"\n alias_symbol = AliasSymbol(name=node.target.name)\n self.declare_symbol(alias_symbol)\n self.visit(node.value)\n\n def visit_CalibrationStatement(self, node: ast.CalibrationStatement) -> None:\n \"\"\"\n CalibrationStatement node visitor, (cal {} statements):\n Enters calibration scope and visits all statements in the body of the\n calibration statement.\n\n Args:\n node (ast.CalibrationStatement):\n openQASM calibration statement node to visit\n \"\"\"\n self.ensure_in_global_scope(ast.Identifier(\"Calibration Statement\"))\n with self.scope_context_manager(self.calibration_scope, ScopeContext.DEFCAL):\n for statement in node.body:\n self.visit(statement)\n\n def visit_CalibrationDefinition(self, node: ast.CalibrationDefinition) -> None:\n \"\"\"\n CalibrationDefinition node visitor, (defcal {} statements):\n Gets a mangles name for the calibration definition and uses it\n to create a symbol representing the defcal statement.\n Inserts a symbol representing the defcal statement into calibration scope.\n Creates a new CalScopedSymbolTable and enters it.\n Inserts symbols for all parameters and qubits into the new scope.\n Visits all statements withing the body of the defcal statement\n\n Args:\n node (ast.CalibrationDefinition):\n openQASM calibration definition node to visit\n \"\"\"\n self.ensure_in_global_scope(node.name)\n defcal_name = Mangler(node).signature().mangle()\n return_type = self.visit(node.return_type) if node.return_type else None\n defcal_symbol = DefcalSymbol(name=defcal_name, return_type=return_type)\n with self.scope_context_manager(\n self.calibration_scope, context=ScopeContext.DEFCAL\n ):\n self.declare_symbol(defcal_symbol)\n\n defcal_scope = CalScopedSymbolTable(\n scope_name=defcal_symbol.name,\n enclosing_scope=self.calibration_scope,\n )\n\n with self.scope_context_manager(defcal_scope, ScopeContext.DEFCAL):\n for argument in node.arguments:\n arg_symbol = self.visit(argument)\n defcal_symbol.params.append(arg_symbol)\n\n for qubit in node.qubits:\n qubit_symbol = QuantumSymbol(\n name=qubit.name, kind=self.current_scope.lookup(\"QUBIT\").name\n )\n self.declare_symbol(qubit_symbol)\n defcal_symbol.qubits.append(qubit_symbol)\n\n for statement in node.body:\n self.visit(statement)\n\n def visit_QuantumGate(self, node: ast.QuantumGate) -> None:\n \"\"\"\n QuantumGate node visitor, (gate call):\n Gets the mangled name best matching the gate call.\n Looks up the mangled name of the gate within the calibration scope.\n Raises an ID_NOT_FOUND error if the gate hasn't been declared.\n\n Args:\n node (ast.QuantumGate):\n openQASM qauntum gate node to visit\n\n Raises:\n SemanticError with ErrorCode.ID_NOT_FOUND\n \"\"\"\n f_signature = Mangler(node).signature()\n symbols = f_signature.match(self.current_scope.keys())\n if not symbols:\n symbols = f_signature.match(self.calibration_scope.keys())\n if symbols:\n # per https://github.com/openqasm/openqasm/issues/245\n return symbols[-1]\n raise self.error(ErrorCode.ID_NOT_FOUND, node.name)\n\n def visit_ClassicalArgument(self, node: ast.ClassicalArgument) -> ClassicalSymbol:\n \"\"\"\n ClassicalArgument node visitor:\n Creates and inserts a ClassicalSymbol for function arguments (def, defcal)\n into current scope\n\n Args:\n node (ast.ClassicalArgument):\n openQASM classical argument node to visit\n\n Returns:\n ClassicalSymbol: the symbol inserted in to current scope\n \"\"\"\n arg_symbol = ClassicalSymbol(name=node.name.name, kind=self.visit(node.type))\n self.declare_symbol(arg_symbol)\n return arg_symbol\n\n def visit_QuantumArgument(self, node: ast.QuantumArgument) -> QuantumSymbol:\n \"\"\"\n QuantumArgument node visitor:\n Creates and inserts a QuantumSymbol for function arguments (def, defcal)\n into current scope\n\n Args:\n node (ast.QuantumArgument):\n openQASM quantum argument node to visit\n\n Returns:\n QuantumSymbol: the symbol inserted in to current scope\n \"\"\"\n arg_symbol = QuantumSymbol(name=node.name.name, kind=\"QUBIT\")\n self.declare_symbol(arg_symbol)\n return arg_symbol\n\n def visit_ForInLoop(self, node: ast.ForInLoop) -> None:\n \"\"\"\n ForInLoop node visitor:\n Visits the set declaration (what will be looped over)\n Enters a new scope.\n Inserts a symbol representing the loop variable into the new scope\n Visits every statement in the block of the ForInLoop\n\n Args:\n node (ast.ForInLoop):\n openQASM for in loop node to visit\n \"\"\"\n type_symbol = self.visit(node.type)\n loop_symbol = ClassicalSymbol(name=node.identifier.name, kind=type_symbol)\n self.visit(node.set_declaration)\n with self.local_context_manager(\"for_loop_scope\", node.block):\n self.current_scope.insert(loop_symbol)\n\n def visit_BranchingStatement(self, node: ast.BranchingStatement) -> None:\n \"\"\"\n BranchingStatement node visitor (if/else):\n visits the condition node of the if/else statement\n Enters a new scope for the if block and visits every statment within it.\n Leaves the if block scope\n Enters a new scope for the else block and visits every statment within it.\n\n Args:\n node (ast.BranchingStatement):\n openQASM branching (if/else) node to visit\n \"\"\"\n self.visit(node.condition)\n with self.local_context_manager(\"if_scope\", node.if_block):\n pass\n with self.local_context_manager(\"else_scope\", node.else_block):\n pass\n\n def visit_WhileLoop(self, node: ast.WhileLoop) -> None:\n \"\"\"\n WhileLoop node visitor:\n visits the condition node of the while statement\n Enters a new scope for the while block and visits every statment within it.\n\n Args:\n node (ast.WhileLoop):\n openQASM while node to visit\n \"\"\"\n self.visit(node.while_condition)\n with self.local_context_manager(\"while_scope\", node.block):\n pass\n\n def visit_Box(self, node: ast.Box) -> None:\n \"\"\"\n Box node visitor:\n visits the duration node of the Box statement\n Enters a new scope for the Box block and visits every statment within it.\n\n Args:\n node (ast.Box):\n openQASM Box node to visit\n \"\"\"\n if node.duration:\n self.visit(node.duration)\n with self.local_context_manager(\"box_scope\", node.body):\n pass\n\n def visit_UnaryExpression(self, node: ast.UnaryExpression):\n \"\"\"\n UnaryExpression node visitor:\n validates the operator of the unary expression node\n visits the expression of the unary expression node\n\n Args:\n node (ast.UnaryExpression):\n openQASM unary expression node to visit\n \"\"\"\n # todo check if unary op is allowed for expression\n assert isinstance(node.op, type(ast.UnaryOperator[\"!\"]))\n self.visit(node.expression)\n\n def visit_BinaryExpression(self, node: ast.BinaryExpression):\n \"\"\"\n BinaryExpression node visitor:\n validates the operator of the binary expression node\n visits each side of the binary expression\n\n Args:\n node (ast.BinaryExpression):\n openQASM binary expression node to visit\n \"\"\"\n # todo check if binary op is allowed between lhs and rhs\n assert isinstance(node.op, type(ast.BinaryOperator[\"+\"]))\n self.visit(node.lhs)\n self.visit(node.rhs)\n\n def visit_FunctionCall(self, node: ast.FunctionCall):\n \"\"\"\n FunctionCall node visitor:\n visits the name (Identifier) node of the function call\n visits all the argument nodes of the function call\n\n Args:\n node (ast.FunctionCall):\n openQASM function call node to visit\n \"\"\"\n self.visit(node.name)\n for argument in node.arguments:\n self.visit(argument)\n\n def visit_Cast(self, node: ast.Cast):\n \"\"\"\n Cast node visitor:\n validates that the type being cast to is a classical type\n # todo should be more narrow, e.g. durration can't be cast to\n visits the argument node of the cast node\n\n Args:\n node (ast.Cast):\n openQASM cast node to visit\n \"\"\"\n assert isinstance(node.type, ast.ClassicalType)\n self.visit(node.argument)\n\n def visit_IndexExpression(self, node: ast.IndexExpression):\n \"\"\"\n IndexExpression node visitor:\n visits collection node of an index expression node\n visits index node of an index expression node\n\n Args:\n node (ast.IndexExpression):\n openQASM index expression node to visit\n \"\"\"\n self.visit(node.collection)\n if isinstance(node.index, list):\n for i_node in node.index:\n self.visit(i_node)\n else:\n self.visit(node.index)\n\n def visit_DiscreteSet(self, node: ast.DiscreteSet):\n \"\"\"\n DiscreteSet node visitor:\n visits each node of a DiscreteSet\n\n Args:\n node (ast.DiscreteSet):\n openQASM discreate set node to visit\n \"\"\"\n for expression in node.values:\n self.visit(expression)\n\n def visit_RangeDefinition(self, node: ast.RangeDefinition):\n \"\"\"\n RangeDefinition node visitor:\n visits start, end and step nodes of a RangeDefinition\n\n Args:\n node (ast.RangeDefinition):\n openQASM range definition node to visit\n \"\"\"\n if node.start:\n self.visit(node.start)\n if node.end:\n self.visit(node.end)\n if node.step:\n self.visit(node.step)\n\n def visit_Concatenation(self, node: ast.Concatenation):\n \"\"\"\n Concatenation node visitor:\n visits each side of the concatenation expression\n\n Args:\n node (ast.Concatenation):\n openQASM concatenation node to visit\n \"\"\"\n self.visit(node.lhs)\n self.visit(node.rhs)\n\n def visit_BitstringLiteral(self, node: ast.BitstringLiteral) -> LiteralSymbol:\n \"\"\"\n BitstringLiteral node visitor:\n\n Args:\n node (ast.BitstringLiteral):\n openQASM bitstring literal node to visit\n\n Returns:\n LiteralSymbol: symbol representation of the node value\n \"\"\"\n value = super().visit_BitstringLiteral(node)\n return LiteralSymbol(name=value, kind=\"BITSTRING\")\n\n def visit_IntegerLiteral(self, node: ast.IntegerLiteral) -> LiteralSymbol:\n \"\"\"\n IntegerLiteral node visitor:\n\n Args:\n node (ast.IntegerLiteral):\n openQASM integer literal node to visit\n\n Returns:\n LiteralSymbol: symbol representation of the node value\n \"\"\"\n value = super().visit_IntegerLiteral(node)\n return LiteralSymbol(name=value, kind=\"INT\")\n\n def visit_FloatLiteral(self, node: ast.FloatLiteral) -> LiteralSymbol:\n \"\"\"\n FloatLiteral node visitor:\n\n Args:\n node (ast.FloatLiteral):\n openQASM float literal node to visit\n\n Returns:\n LiteralSymbol: symbol representation of the node value\n \"\"\"\n value = super().visit_FloatLiteral(node)\n return LiteralSymbol(name=value, kind=\"FLOAT\")\n\n def visit_ImaginaryLiteral(self, node: ast.ImaginaryLiteral) -> LiteralSymbol:\n \"\"\"\n ImaginaryLiteral node visitor:\n\n Args:\n node (ast.ImaginaryLiteral):\n openQASM imaginary literal node to visit\n\n Returns:\n LiteralSymbol: symbol representation of the node value\n \"\"\"\n value = super().visit_ImaginaryLiteral(node)\n return LiteralSymbol(name=value, kind=\"IMAGINARY\")\n\n def visit_BooleanLiteral(self, node: ast.BooleanLiteral) -> LiteralSymbol:\n \"\"\"\n BooleanLiteral node visitor:\n\n Args:\n node (ast.BooleanLiteral):\n openQASM boolean literal node to visit\n\n Returns:\n LiteralSymbol: symbol representation of the node value\n \"\"\"\n value = super().visit_BooleanLiteral(node)\n return LiteralSymbol(name=value, kind=\"BOOL\")\n\n def visit_DurationLiteral(self, node: ast.DurationLiteral) -> LiteralSymbol:\n \"\"\"\n DurationLiteral node visitor:\n\n Args:\n node (ast.DurationLiteral):\n openQASM duration literal node to visit\n\n Returns:\n LiteralSymbol: symbol representation of the node value\n \"\"\"\n value = super().visit_DurationLiteral(node)\n return LiteralSymbol(name=value, kind=\"DURATION\")\n\n # pylint: disable=C0103\n # (snake_case naming style)\n\n def _visit_type_node(self, node: ast.ClassicalType) -> str:\n \"\"\"\n type node visitor:\n Returns the name of a Type node\n Example:\n node:ast.FloatType -> 'FLOAT'\n\n Args:\n node (ast.ClassicalType): node that is a subclass of ClassicalType\n\n Returns:\n str: name of the node type\n \"\"\"\n name = super()._visit_type_node(node)\n name_in_table = self.current_scope.lookup(name).name\n return name_in_table\n\n def error(self, error_code: ErrorCode, name: str) -> SemanticError:\n \"\"\"\n Method for standardizing error handling of the SemanticAnalyser class.\n Logs current scope and returns a SemanticError object that should be raised\n immediately after this method retuns\n\n Usage:\n raise self.error(...)\n\n Args:\n error_code (ErrorCode):\n Code to identify what issue caused an error to be raised\n name (str):\n An identifer string to identify what caused the error\n\n Returns:\n SemanticError: should be raised immediatly on method return\n \"\"\"\n LOGGER.debug(\"CURRENT SCOPE: %s\", self.current_scope)\n LOGGER.debug(\"CALIBRATION SCOPE: %s\", self._calibration_scope)\n return SemanticError(error_code, message=f\"{error_code.value} -> {name}\")\n\n def declare_symbol(self, symbol: Symbol):\n \"\"\"Method for standardizing symbol declaration.\n Symbols are first looked up (in current scope only)\n before being inserted into current scope (if not already in scope)\n\n Args:\n symbol (Symbol): to insert into current scope\n\n Raises:\n SemanticError: ErrorCode.DUBLICATE_ID\n \"\"\"\n if self.current_scope.lookup(symbol.name, current_scope_only=True):\n raise self.error(ErrorCode.DUPLICATE_ID, symbol.name)\n self.current_scope.insert(symbol)\n\n def ensure_in_global_scope(self, node: ast.Identifier):\n \"\"\"\n Ensures that the current scope_context is global scope\n Used to make sure that declarations such as Subroutines and defcals\n Are only used in the allowed scope (GLOBAL)\n\n Args:\n node (ast.Identifier): Node that is currently being visited\n\n Raises:\n SemanticError: ErrorCode.NOT_IN_GLOBAL_SCOPE\n \"\"\"\n if not self.scope_context == ScopeContext.GLOBAL:\n raise self.error(ErrorCode.NOT_IN_GLOBAL_SCOPE, node.name)\n\n @contextmanager\n def scope_context_manager(\n self,\n symbol_table: ScopedSymbolTable,\n context: ScopeContext,\n ):\n \"\"\"\n Context manager for entering/leaving scopes in specific ScopeContext\n\n Args:\n symbol_table (ScopedSymbolTable): Symbol Table / Scope to enter\n context (ScopeContext): what context the scope is entered in\n \"\"\"\n enclosing_scope = self.current_scope\n enclosing_context = self.scope_context\n self.current_scope = symbol_table\n self.scope_context = context\n try:\n yield\n finally:\n if enclosing_context:\n self.scope_context = enclosing_context\n if enclosing_scope:\n self.current_scope = enclosing_scope\n LOGGER.debug(symbol_table)\n LOGGER.debug(\"LEAVE scope: %s\", symbol_table.scope_name)\n\n @contextmanager\n def local_context_manager(self, name: str, block: list[ast.Statement]):\n \"\"\"\n Context manager for entering/leaving local scopes (if/else, for, while, box)\n What ScopeContext is entered depends on the current ScopeContext.\n If in GLOBAL then enter LOCAL\n Else (LOCAL, SUBROUTINE, DEFCAL) then keep context unchanged.\n Once in the new scope nodes in the block of the scope will be visited in order\n\n Args:\n name (str):\n Name of the ScopedSymbolTable to enter\n block (list[ast.Statement]):\n list of openQASM statments nodes, visited in order\n \"\"\"\n scope = ScopedSymbolTable(name, enclosing_scope=self.current_scope)\n context = (\n ScopeContext.LOCAL\n if self.scope_context == ScopeContext.GLOBAL\n else self.scope_context\n )\n\n with self.scope_context_manager(scope, context):\n yield\n for statement in block:\n self.visit(statement)" }, { "identifier": "PulseVisualizer", "path": "shipyard/printers/visualizer/visualize_pulse_sequence.py", "snippet": "class PulseVisualizer(Interpreter):\n def __init__(\n self,\n setup: SetupInternal = None,\n external_functions: dict = None,\n ):\n super().__init__(setup, external_functions)\n self.pulses = {} # dict of pulses for each frame/ port\n self.phases = {} # dict of phases for each frame/ port\n self.frequencies = {} # dict of frequencies for each frame/ port\n self.plot_flag: bool = False\n\n def visit_Program(self, node: ast.Program) -> None:\n activation_record = ActivationRecord(\n name=\"main\", ar_type=ARType.PROGRAM, nesting_level=1\n )\n with self.ar_context_manager(activation_record):\n for statement in node.statements:\n self.visit(statement)\n for frame in self.pulses.keys():\n self.plotter(\n np.concatenate(self.pulses[frame]),\n np.concatenate(self.phases[frame]),\n np.concatenate(self.frequencies[frame]),\n frame,\n )\n\n def plotter(self, wfm_array, phase_array, frequency_array, frame_name):\n fig, axs = plt.subplots(3)\n if all(isinstance(i, complex) for i in wfm_array):\n axs[0].plot([value.real for value in wfm_array], label=\"real\")\n axs[0].plot([value.imag for value in wfm_array], label=\"imag\")\n axs[0].legend()\n else:\n axs[0].plot(wfm_array)\n axs[0].set(ylabel=f\"{frame_name} amplitude\")\n axs[1].plot(phase_array)\n axs[1].set(ylabel=f\"{frame_name} phase\")\n axs[2].plot(frequency_array)\n axs[2].set(ylabel=f\"{frame_name} frequency\")\n if self.plot_flag: # pragma: no cover\n plt.show()\n\n @_maybe_annotated\n def visit_ClassicalDeclaration(self, node: ast.ClassicalDeclaration) -> None:\n \"\"\"\n ClassicalDeclaration node visitor:\n Visits and stores classical declarations of variables. If the variable\n declared is a frame, the frame is added to the current activation record,\n as well as the Interpreter's pulse, phase, and frequency dictionaries.\n\n Args:\n node (ast.ClassicalDeclaration): openQASM ClassicalDeclaration AST node\n\n \"\"\"\n activation_record = self.call_stack.peek()\n match node:\n case ast.ClassicalDeclaration(type=ast.PortType()):\n name = node.identifier.name\n activation_record[name] = self.setup.ports[name]\n case ast.ClassicalDeclaration(\n type=ast.FrameType(),\n init_expression=ast.FunctionCall(name=ast.Identifier(\"newframe\")),\n ):\n call = node.init_expression\n assert isinstance(call, ast.FunctionCall)\n assert len(call.arguments) == 3\n port = call.arguments[0].name\n frequency = self.visit(call.arguments[1])\n phase = self.visit(call.arguments[2])\n frame = Frame(\n name=node.identifier.name,\n port=activation_record[port],\n frequency=frequency,\n phase=phase,\n )\n self.pulses[frame.name] = []\n self.phases[frame.name] = []\n self.frequencies[frame.name] = []\n activation_record[frame.name] = frame\n case ast.ClassicalDeclaration(type=ast.ArrayType()):\n if node.init_expression is None:\n shapes = [dim.value for dim in node.type.dimensions]\n activation_record[node.identifier.name] = np.zeros(shape=shapes)\n else:\n activation_record[node.identifier.name] = self.visit(\n node.init_expression\n )\n case _:\n if node.init_expression is not None:\n activation_record[node.identifier.name] = self.visit(\n node.init_expression\n )\n else:\n activation_record[node.identifier.name] = None\n\n @_maybe_annotated\n def visit_DelayInstruction(self, node: ast.DelayInstruction) -> None:\n \"\"\"\n DelayInstruction node visitor:\n Appends delay of 0s to relevant frame\n\n Args:\n node (ast.DelayInstruction): openQASM DelayInstruction AST node\n \"\"\"\n for q in node.qubits:\n if q.name in self.pulses.keys():\n self.pulses[q.name].append(np.zeros(int(self.visit(node.duration))))\n self.phases[q.name].append(\n np.full(\n int(self.visit(node.duration)),\n self.call_stack.down_stack(q.name)[q.name].phase,\n )\n )\n self.frequencies[q.name].append(\n np.full(\n int(self.visit(node.duration)),\n self.call_stack.down_stack(q.name)[q.name].frequency,\n )\n )\n\n def visit_play(self, node: ast.FunctionCall) -> None:\n \"\"\"\n FunctionCall node visitor. Handles 'play' and 'capture' function calls.\n For 'play', 'capture_v1', and 'capture_v2' function calls, the function\n call is visited and the resulting waveform is appended to the relevant\n frame's pulse, phase, and frequency arrays. For 'capture_v3' and\n 'capture_v1' function calls, the function call is visited and the resulting\n time value is returned and turned into an array of 1s of that length, and\n appeneded to the relevant frame's pulse, phase, and frequency arrays.\n\n Args:\n node (ast.FunctionCall): 'play' FunctionCall node to visit\n\n Raises:\n Error:\n ErrorCode.UNHANDLED\n If the node does not match the expected format/structure\n \"\"\"\n match node:\n case ast.FunctionCall(\n name=ast.Identifier(\"play\"),\n arguments=[ast.Identifier(frame_name), wfm_node],\n ) | ast.FunctionCall(\n name=ast.Identifier(\"capture_v1\"),\n arguments=[ast.Identifier(frame_name), wfm_node],\n ) | ast.FunctionCall(\n name=ast.Identifier(\"capture_v2\"),\n arguments=[ast.Identifier(frame_name), wfm_node],\n ):\n wfm_array = self.visit(wfm_node)\n self.phases[frame_name].append(\n np.full(\n len(wfm_array),\n self.call_stack.down_stack(frame_name)[frame_name].phase,\n )\n )\n self.pulses[frame_name].append(wfm_array)\n self.frequencies[frame_name].append(\n np.full(\n len(wfm_array),\n self.call_stack.down_stack(frame_name)[frame_name].frequency,\n )\n )\n case ast.FunctionCall(\n name=ast.Identifier(\"capture_v3\"),\n arguments=[ast.Identifier(frame_name), wfm_node],\n ) | ast.FunctionCall(\n name=ast.Identifier(\"capture_v1_spectrum\"),\n arguments=[ast.Identifier(frame_name), wfm_node],\n ):\n val = self.visit(wfm_node)\n self.phases[frame_name].append(\n np.full(\n len(wfm_array),\n self.call_stack.down_stack(frame_name)[frame_name].phase,\n )\n )\n self.pulses[frame_name].append(np.ones(int(val)))\n self.frequencies[frame_name].append(\n np.full(\n len(wfm_array),\n self.call_stack.down_stack(frame_name)[frame_name].frequency,\n )\n )\n\n case _:\n raise Error(\n ErrorCode.UNHANDLED,\n f\"Unhandled waveform generation: {node}\",\n )" }, { "identifier": "waveform_functions", "path": "shipyard/printers/zi/waveform_functions.py", "snippet": "def zeros(samples: int) -> np.ndarray:\ndef placeholder(samples: int) -> np.ndarray:\ndef ones(samples: int) -> np.ndarray:\ndef sine(\n samples: int,\n amplitue: float,\n phase_offset: float,\n n_periods: int,\n) -> np.ndarray:\ndef cosine(\n samples: int,\n amplitue: float,\n phase_offset: float,\n n_periods: int,\n) -> np.ndarray:\ndef sinc(samples: int, amplitude: float, position: int, beta: float) -> np.ndarray:\ndef ramp(samples: int, start_level: float, end_level: float) -> np.ndarray:\ndef sawtooth(\n samples: int, amplitude: float, phase_offset: float, n_periods: int\n) -> np.ndarray:\ndef triangle(\n samples: int, amplitude: float, phase_offset: float, n_periods: int\n) -> np.ndarray:\ndef gauss(samples: int, amplitude: float, position: int, width: float) -> np.ndarray:\ndef drag(samples: int, amplitude: float, position: int, width: float) -> np.ndarray:\ndef blackman(samples: int, amplitude: float, alpha: float) -> np.ndarray:\ndef hamming(samples: int, amplitude: float) -> np.ndarray:\ndef hann(samples: int, amplitude: float) -> np.ndarray:\ndef rect(samples: int, amplitude: float) -> np.ndarray:\ndef chirp(\n samples: int,\n amplitude: float,\n start_freq: float,\n stop_freq: float,\n phase: float = 0.0,\n) -> np.ndarray:\ndef rrc(\n samples: int, amplitude: float, position: int, beta: float, width: float\n) -> np.ndarray:\n def _special_value():" }, { "identifier": "Frame", "path": "shipyard/setup/internal.py", "snippet": "class Frame(BaseModel):\n \"\"\"\n Representation of the openQASM openpulse frame concept as a pydantic model.\n https://openqasm.com/language/openpulse.html#frames\n\n Args:\n name (str):\n name of the frame.\n port (Port):\n the Port object the frame is associated with.\n frequency (float):\n the frequency the frame evolves at. Defaults to 0.\n phase (float):\n the phase of the frame.\n time (Duration):\n the time of the frame.\n \"\"\"\n\n name: str\n port: Port\n frequency: float = 0.0\n phase: float = 0.0\n time: Duration = Duration(time=0)\n\n def set_phase(self, phase: float):\n \"\"\"Sets the phase of the frame\n\n Args:\n phase (float): the value the phase will be set to\n \"\"\"\n self.phase = phase\n\n def shift_phase(self, phase: float):\n \"\"\"Shifts the phase of the frame\n\n Args:\n phase (float): the value the phase will be shifted by.\n \"\"\"\n self.phase += phase\n\n def get_phase(self) -> float:\n \"\"\"Gets the phase of the frame\n\n Returns:\n float: current value of the phase of the frame.\n \"\"\"\n return self.phase\n\n def set_frequency(self, frequency: float):\n \"\"\"Sets the frequency of the frame\n\n Args:\n frequency (float): the value the frequency will be set to.\n \"\"\"\n self.frequency = frequency\n\n def shift_frequency(self, frequency: float):\n \"\"\"Shifts the frequency of the frame\n\n Args:\n frequency (float): the value the frequency will be shifted by.\n \"\"\"\n self.frequency += frequency\n\n def get_frequency(self) -> float:\n \"\"\"Gets the frequency of the frame\n\n Returns:\n float: current value of the frequency of the frame.\n \"\"\"\n return self.frequency\n\n def advance(self, duration: Duration):\n \"\"\"Advances the time of the frame by some duration\n\n Args:\n duration (Duration): the duration to advance the time of the frame by.\n \"\"\"\n self.time += duration\n\n def advance_to(self, duration: Duration):\n \"\"\"Advances the time of the frame to some other time\n\n Args:\n duration (Duration): the duratioin to advance the time fo the frame to.\n\n Raises:\n ValueError:\n If the time the frame should be advanced to is less than the\n current time of the frame.\n \"\"\"\n duration.set_unit(self.time.unit)\n if self.time > duration:\n raise ValueError(f\"Cant advance current time {self.time} to {duration}\")\n self.time.time = int(duration.time * duration.unit.value / self.time.unit.value)" }, { "identifier": "Instrument", "path": "shipyard/setup/internal.py", "snippet": "class Instrument(BaseModel):\n \"\"\"\n Minimal information required to identify an Instrument\n\n Args:\n name (str):\n name of instrument instance, used to easily identify one intrument from\n another.\n type (InstrumentType):\n Literal representing the type/model of the instrument.\n serial (str):\n Serial number of the instrument in string format.\n \"\"\"\n\n name: str\n type: InstrumentType\n serial: str" }, { "identifier": "Port", "path": "shipyard/setup/internal.py", "snippet": "class Port(BaseModel):\n \"\"\"\n Representation of the openQASM openpulse port concept as a pydantic model.\n https://openqasm.com/language/openpulse.html#ports\n\n Args:\n name (str):\n name of the port.\n instrument (Instrument):\n What instrument the port is associated with.\n core (Core):\n Settings for the AWG Core the port is associated with.\n \"\"\"\n\n class Core(BaseModel):\n \"\"\"\n Settings for a AWG core\n\n Args:\n type (CoreType):\n the Type of AWG Core this 'Core' object is\n index (int):\n the index of the AWG Core on the Instrument this 'Core' object belongs.\n channels (list[int]):\n the channels of the AWG Core this 'Core' object belongs\n \"\"\"\n\n type: CoreType\n index: int\n channels: list[int]\n\n # pylint: disable=R0903\n # too-few-public-methods\n class Config:\n \"\"\"Pydantic model config for Core\"\"\"\n\n frozen = True\n\n # pylint: enable=R0903\n\n def obj(self) -> AWGCore:\n \"\"\"\n Returns an AWGCore subclass of type matching the type of the pydantic core\n model.\n\n Returns:\n AWGCore: AWGCore subclass of type matching the model instance.\n \"\"\"\n return CORE_TYPE_TO_CLASS[self.type]\n\n @validator(\"channels\")\n def not_more_channels_than_core_type_allows(cls, channels: list[int], values):\n \"\"\"\n Validates that the number of channels for the Core object does\n not exceed the number of channels allowed by the CoreType\n \"\"\"\n assert channels\n assert \"type\" in values\n assert len(channels) <= CORE_TYPE_TO_CLASS[values[\"type\"]].n_channels\n return channels\n\n name: str\n instrument: Instrument\n core: Core\n\n # pylint: disable=R0903\n # too-few-public-methods\n class Config:\n \"\"\"Pydantic model config for Port\"\"\"\n\n frozen = True\n\n # pylint: enable=R0903" }, { "identifier": "SetupInternal", "path": "shipyard/setup/internal.py", "snippet": "class SetupInternal(BaseModel):\n\n \"\"\"\n A Pydantic model containing the information required to compile an openQASM program\n to instrument level instructions.\n\n It is recommended to instanciate this object from a configuration file\n (json (future yml?))\n \"\"\"\n\n # todo validation\n\n # todo move to own module\n instruments: dict[str, Instrument]\n ports: dict[str, Port]\n frames: dict[str, Frame]\n\n @classmethod\n def from_dict(cls, setup: dict[str, dict[str, dict]]) -> \"SetupInternal\":\n \"\"\"Creates a Setup object from a dictionary\n\n Args:\n setup (dict[str, dict[str, dict]]): dictionary to create a Setup object from\n\n Returns:\n Setup: created from dictionary\n \"\"\"\n instruments = {\n k: Instrument(name=k, **v) for k, v in setup[\"Instruments\"].items()\n }\n ports = {}\n for k, val in setup[\"Ports\"].items():\n val[\"instrument\"] = instruments[val[\"instrument\"]]\n val[\"core\"] = Port.Core(**val[\"core\"])\n ports[k] = Port(name=k, **val)\n frames = {}\n for k, val in setup[\"Frames\"].items():\n val[\"port\"] = ports[val[\"port\"]]\n frames[k] = Frame(name=k, **val)\n return cls(instruments=instruments, ports=ports, frames=frames)\n\n def to_dict(self) -> dict[str, dict[str, dict]]:\n \"\"\"Creates a dictionary from a Setup object\n\n Args:\n filename (Path | str, optional):\n path to save dictionary to. Defaults to None.\n\n Returns:\n dict[str, dict[str, dict]]: dictionary created from Setup object\n \"\"\"\n setup = {\n \"Instruments\": {\n k: {\n \"type\": v.type,\n \"serial\": v.serial,\n }\n for k, v in self.instruments.items()\n },\n \"Ports\": {\n k: {\n \"instrument\": v.instrument.name,\n \"core\": {\n \"type\": v.core.type.value,\n \"index\": v.core.index,\n \"channels\": v.core.channels,\n },\n }\n for k, v in self.ports.items()\n },\n \"Frames\": {\n k: {\n \"port\": v.port.name,\n \"frequency\": v.frequency,\n \"phase\": v.phase,\n }\n for k, v in self.frames.items()\n },\n }\n return setup\n\n @classmethod\n def from_json(cls, filename: str | Path) -> \"SetupInternal\":\n \"\"\"Creates a Setup object from a json file\n\n Args:\n filename (str | Path): path to json file\n\n Returns:\n Setup: created from json file\n \"\"\"\n with open(filename, encoding=\"utf-8\") as file:\n data = json.load(file)\n return cls.from_dict(data)\n\n def to_json(self, filename: str | Path) -> Path:\n \"\"\"Writes a Setup object to a json file\n\n Args:\n filename (str | Path): path to json file to create\n\n Returns:\n Path: path to json file\n \"\"\"\n data = self.to_dict()\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n json.dump(data, file, indent=4)\n return Path(filename)\n\n @classmethod\n def from_yml(cls, filename: str | Path) -> \"SetupInternal\":\n \"\"\"Creates a Setup object from a yml file\n\n Args:\n filename (str | Path): path to yml file\n\n Returns:\n Setup: created from yml file\n \"\"\"\n with open(filename, \"r\", encoding=\"utf-8\") as file:\n data = yaml.safe_load(file)\n return cls.from_dict(data)\n\n def to_yml(self, filename: str | Path) -> Path:\n \"\"\"Writes a Setup object to a yml file\n\n Args:\n filename (str | Path): path to yml file to create\n\n Returns:\n Path: path to yml file\n \"\"\"\n data = self.to_dict()\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n yaml.dump(data, file)\n return Path(filename)\n\n def cores(self) -> set[tuple[str, int, str]]:\n \"\"\"Gets all the AWG Cores used in the setup\n\n Returns:\n set[tuple[str, int, str]]:\n a Set of tuples, each tuple has a string representing the instruement\n name, a integer representing the index of the awg core of the\n instrument and a string representing the type of the awg core.\n \"\"\"\n return set(\n (port.instrument.name, port.core.index, port.core.type.value)\n for port in self.ports.values()\n )" } ]
import codecs import json import numpy as np import pytest from pathlib import Path from shipyard.awg_core.awg_core import CoreType from shipyard.call_stack import ActivationRecord, ARType from shipyard.compiler import Compiler from shipyard.duration import Duration, TimeUnits from shipyard.passes.duration_transformer import DurationTransformer from shipyard.passes.resolve_io_declaration import ResolveIODeclaration from shipyard.passes.semantic_analysis.semantic_analyzer import SemanticAnalyzer from shipyard.printers.visualizer.visualize_pulse_sequence import PulseVisualizer from shipyard.printers.zi import waveform_functions from shipyard.setup.internal import Frame, Instrument, Port, SetupInternal
17,486
final_call_stack = { "nested_subroutines": {"dummy": 16}, "complex_arrays": { "dummy": 4, "two_d": [[1, 2], [3, 4], [5, 6]], "my_arr": [complex(1, 0), complex(0, 1), complex(0.8, 0.6)], "second": [1, 2, 3, 4], }, } def files() -> list[str]: base_path = Path(__file__).parent.parent.parent / "qasm/visualize_pulse" plen = len(base_path.parts) FILES = list(base_path.glob("**/*.qasm")) return [str(Path(*path.parts[plen:])) for path in FILES] QASM_FILES = files() def common_files() -> list[str]: files = [] cut = -5 for q_file in QASM_FILES: files.append(q_file[:cut]) return files COMMON_FILES = common_files() @pytest.fixture(name="basic_setup") def fixture_basic_setup() -> SetupInternal: json_path = Path(__file__).parent.parent.parent / "setups/interpreter.json" return SetupInternal.from_json(json_path) def test_visit_ClassicalDeclaration(): setup_path = Path(__file__).parent.parent.parent / "setups/complex.json" qasm_path = Path(__file__).parent.parent.parent / "qasm/interpreter/phase_freq.qasm" compiler = Compiler(qasm_path, setup_path) qasm_ast = compiler.load_program(qasm_path) ResolveIODeclaration().visit(qasm_ast) SemanticAnalyzer().visit(qasm_ast) DurationTransformer().visit(qasm_ast) pv = PulseVisualizer( SetupInternal.from_json(setup_path), waveform_functions.__dict__ )
final_call_stack = { "nested_subroutines": {"dummy": 16}, "complex_arrays": { "dummy": 4, "two_d": [[1, 2], [3, 4], [5, 6]], "my_arr": [complex(1, 0), complex(0, 1), complex(0.8, 0.6)], "second": [1, 2, 3, 4], }, } def files() -> list[str]: base_path = Path(__file__).parent.parent.parent / "qasm/visualize_pulse" plen = len(base_path.parts) FILES = list(base_path.glob("**/*.qasm")) return [str(Path(*path.parts[plen:])) for path in FILES] QASM_FILES = files() def common_files() -> list[str]: files = [] cut = -5 for q_file in QASM_FILES: files.append(q_file[:cut]) return files COMMON_FILES = common_files() @pytest.fixture(name="basic_setup") def fixture_basic_setup() -> SetupInternal: json_path = Path(__file__).parent.parent.parent / "setups/interpreter.json" return SetupInternal.from_json(json_path) def test_visit_ClassicalDeclaration(): setup_path = Path(__file__).parent.parent.parent / "setups/complex.json" qasm_path = Path(__file__).parent.parent.parent / "qasm/interpreter/phase_freq.qasm" compiler = Compiler(qasm_path, setup_path) qasm_ast = compiler.load_program(qasm_path) ResolveIODeclaration().visit(qasm_ast) SemanticAnalyzer().visit(qasm_ast) DurationTransformer().visit(qasm_ast) pv = PulseVisualizer( SetupInternal.from_json(setup_path), waveform_functions.__dict__ )
activation_record = ActivationRecord(
1
2023-11-16 17:37:29+00:00
24k
quantuminterface/qiclib
src/qiclib/code/qi_jobs.py
[ { "identifier": "TaskRunner", "path": "src/qiclib/hardware/taskrunner.py", "snippet": "class TaskRunner(PlatformComponent):\n \"\"\"Driver to control the Taskrunner on the Hardware Platform.\"\"\"\n\n def __init__(\n self,\n name: str,\n connection,\n controller,\n qkit_instrument=True,\n ):\n super().__init__(name, connection, controller, qkit_instrument)\n self._stub = grpc_stub.TaskRunnerServiceStub(self._conn.channel)\n\n @property\n @platform_attribute\n @ServiceHubCall(\n errormsg=\"Could not fetch the current firmware hash of the Taskrunner\"\n )\n def firmware_hash(self):\n \"\"\"The hash of the current firmware running on the realtime core.\"\"\"\n return self._stub.GetStatus(proto.Empty()).firmware_hash\n\n @property\n @platform_attribute\n @ServiceHubCall(\n errormsg=\"Could not determine the build date of the Taskrunner firmware\"\n )\n def firmware_build_date(self):\n \"\"\"Returns the build date of the Taskrunner firmware.\"\"\"\n return self._stub.GetStatus(proto.Empty()).build_date\n\n @property\n @platform_attribute\n @ServiceHubCall(\n errormsg=\"Could not determine the build commit of the Taskrunner firmware\"\n )\n def firmware_build_commit(self):\n \"\"\"Returns the build commit hash of the Taskrunner firmware.\"\"\"\n return self._stub.GetStatus(proto.Empty()).build_commit\n\n @property\n @platform_attribute\n @ServiceHubCall(errormsg=\"Could not determine the status of the taskrunner\")\n def loaded_task(self):\n \"\"\"The name of the currently loaded task.\"\"\"\n return self._stub.GetStatus(proto.Empty()).task_name\n\n @property\n @ServiceHubCall(errormsg=\"Could not determine the progress of the task\")\n def task_progress(self):\n \"\"\"Returns the progress of the task\"\"\"\n return self._stub.GetStatus(proto.Empty()).task_progress\n\n @property\n @ServiceHubCall(errormsg=\"Could not determine number of available databoxes\")\n def databoxes_available(self):\n \"\"\"Returns the number of available databoxes.\"\"\"\n return self._stub.GetStatus(proto.Empty()).databoxes_available\n\n @property\n @ServiceHubCall(errormsg=\"Could not determine state of the taskrunner\")\n def busy(self):\n \"\"\"Returns if the taskrunner is currently busy.\"\"\"\n return self._stub.GetTaskState(proto.Empty()).busy\n\n @property\n @ServiceHubCall(errormsg=\"Could not determine if task has finished\")\n def task_done(self):\n \"\"\"Returns if the task has finished.\"\"\"\n return self._stub.GetTaskState(proto.Empty()).done\n\n @property\n @ServiceHubCall(errormsg=\"Could not determine if task has error messages\")\n def task_errormsg_available(self):\n \"\"\"Returns if task has error messages.\"\"\"\n return self._stub.GetTaskState(proto.Empty()).error_msg_available\n\n @property\n @ServiceHubCall(errormsg=\"Could not determine if error message queue is full\")\n def task_errormsg_queue_full(self):\n \"\"\"Returns if if error message queue is full.\"\"\"\n return self._stub.GetTaskState(proto.Empty()).error_msg_queue_full\n\n @ServiceHubCall(errormsg=\"Failed to start task\")\n def start_task(self, loop=False, overwrite=False):\n \"\"\"Starts the execution of a previously loaded task.\n\n :param loop: bool, optional\n if the task should be executed in a loop, by default False\n :param overwrite: bool, optional\n if a current running task should be stopped, by default False\n \"\"\"\n self._stub.StartTask(\n proto.StartTaskRequest(looping=loop, stop_running=overwrite)\n )\n\n @ServiceHubCall(errormsg=\"Failed to stop task\")\n def stop_task(self):\n \"\"\"Stops the execution of running task.\"\"\"\n self._stub.StopTask(proto.StopTaskRequest())\n\n @ServiceHubCall(errormsg=\"Failed to reset task\")\n def reset_task(self):\n \"\"\"Resets (unloads) a loaded task.\"\"\"\n self._stub.StopTask(proto.StopTaskRequest(reset=True))\n\n @ServiceHubCall(errormsg=\"Failed to load task binary\")\n def load_task_binary(self, filename, taskname):\n \"\"\"Loads a task binary into the taskrunner.\n The *taskname* needs to match the name of the task to load\n in order to verify that it is indeed the desired task file.\n\n :param filename: str\n name of the file with the task\n :param taskname: str\n name of the task\n\n :raises ValueError:\n if the path of the file is not found\n \"\"\"\n if not os.path.exists(filename):\n raise ValueError(\"File not found!\")\n\n with open(filename, \"rb\") as f:\n binary = f.read()\n self._stub.ProgramTask(proto.ProgramTaskRequest(name=taskname, task=binary))\n\n @ServiceHubCall(errormsg=\"Failed to compile and load task binary\")\n def load_task_source(self, filename, taskname):\n \"\"\"Loads a task source file `filename` into the taskrunner.\n `taskname` can be freely chosen to later identify the task on the platform.\n\n :param filename:\n name of the file with the task\n :param taskname:\n name of the task\n \"\"\"\n if os.path.isfile(filename):\n # File name can be full path to a file\n filepath = filename\n else:\n # or just the file name -> pick from task repository\n filepath = get_task_source(filename)\n\n with open(filepath, \"rb\") as f:\n binary = f.read()\n\n self._stub.CompileTask(proto.ProgramTaskRequest(name=taskname, task=binary))\n\n @ServiceHubCall(errormsg=\"Failed to set parameters\")\n def set_param_list(self, param_list):\n \"\"\"Sets the parameters for the task. param_list has to be an array of 32bit values.\"\"\"\n self._stub.SetParameter(proto.ParameterRequest(parameters=param_list))\n\n class DataMode(Enum):\n INT8 = 1\n UINT8 = 2\n INT16 = 3\n UINT16 = 4\n INT32 = 5\n UINT32 = 6\n INT64 = 7\n UINT64 = 8\n\n @ServiceHubCall(errormsg=\"Failed to fetch databoxes from taskrunner\")\n def get_databoxes_with_mode(\n self, mode=DataMode.INT32, require_done=True\n ) -> List[List[Any]]:\n \"\"\"Retrieves data from a previously started task on the R5.\n Depending on the parameter mode, the data is interpreted differently.\n\n :param mode:\n DataMode of the databoxes, by default DataMode.INT32\n :param require_done:\n if the task has to be finished before fetching data, by default True\n\n :return:\n A list of databoxes, being list of values themselves, either int32 or uint32.\n\n :raises Exception:\n If require_done is True and the Task is not finished\n :raises ValueError:\n If the data mode is not known\n :raises Exception:\n If require_done and not data is available\n \"\"\"\n self.check_task_errors()\n\n if require_done and not self.task_done:\n raise RuntimeError(\"Task should be finished prior to fetching data.\")\n\n method_call = {\n TaskRunner.DataMode.INT8: self._stub.GetDataboxesINT8,\n TaskRunner.DataMode.UINT8: self._stub.GetDataboxesUINT8,\n TaskRunner.DataMode.INT16: self._stub.GetDataboxesINT16,\n TaskRunner.DataMode.UINT16: self._stub.GetDataboxesUINT16,\n TaskRunner.DataMode.INT32: self._stub.GetDataboxesINT32,\n TaskRunner.DataMode.UINT32: self._stub.GetDataboxesUINT32,\n TaskRunner.DataMode.INT64: self._stub.GetDataboxesINT64,\n TaskRunner.DataMode.UINT64: self._stub.GetDataboxesUINT64,\n }.get(mode, None)\n if method_call is None:\n raise ValueError(\"Data mode is unknown! Only use DataMode Enum values.\")\n\n databoxes: List[List[Any]] = []\n last_index = -1\n for databox_reply in method_call(proto.Empty()):\n # print databox_reply.index, databox_reply.data[:]\n if last_index != databox_reply.index:\n # Create new (empty) databox in list\n databoxes.append([])\n last_index = databox_reply.index\n # Fill the latest databox with content\n databoxes[-1].extend(databox_reply.data[:])\n\n if require_done and not databoxes:\n raise RuntimeError(\n \"No data available to fetch. Are you sure the task completed successfully?\"\n )\n\n return databoxes\n\n def get_databoxes(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 32bit signed integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.INT32, require_done)\n\n def get_databoxes_INT8(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 8bit signed integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.INT8, require_done)\n\n def get_databoxes_UINT8(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 8bit unsigned integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.UINT8, require_done)\n\n def get_databoxes_INT16(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 16bit signed integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.INT16, require_done)\n\n def get_databoxes_UINT16(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 16bit unsigned integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.UINT16, require_done)\n\n def get_databoxes_INT32(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 32bit signed integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.INT32, require_done)\n\n def get_databoxes_UINT32(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 32bit unsigned integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.UINT32, require_done)\n\n def get_databoxes_INT64(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 64bit signed integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.INT64, require_done)\n\n def get_databoxes_UINT64(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 64bit unsigned integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.UINT64, require_done)\n\n @ServiceHubCall\n def get_error_messages(self):\n \"\"\"Retrieves all error messages from the task\"\"\"\n reply = self._stub.GetTaskErrorMessages(proto.Empty())\n return reply.message[:]\n\n def check_task_errors(self):\n errors = self.get_error_messages()\n if errors:\n raise RuntimeError(\n \"The following error messages were retrieved \"\n + \"from the Taskrunner:\\n{}\".format(\"\\n\".join(errors))\n )\n\n # DEPRECATED STUFF\n @property\n def data_size(self):\n \"\"\"TODO Replace by progress in all experiments.\"\"\"\n raise DeprecationWarning(\n \"data_size is not supported anymore! Use task_progress instead!\"\n )" }, { "identifier": "DataProvider", "path": "src/qiclib/experiment/qicode/data_provider.py", "snippet": "class DataProvider(ABC):\n \"\"\"\n Provides uniform access to experiment result data.\n\n Result data is received either from the taskrunner plugin or the unit cell plugin and comes in different formats.\n This class encapsulates the format differences, to allow for further processing of the data to be handled\n independently.\n \"\"\"\n\n @classmethod\n def create(cls, result, use_taskrunner: bool):\n if use_taskrunner:\n return _TaskrunnerDataProvider(result)\n return _InternalPluginDataProvider(result)\n\n def __init__(self, result):\n self._result = result\n\n @abstractmethod\n def get_raw_i(self, cell_index: int):\n pass\n\n @abstractmethod\n def get_raw_q(self, cell_index: int):\n pass\n\n def get_default_i(self, cell_index: int, index: int):\n return self.get_raw_i(cell_index)[index]\n\n def get_default_q(self, cell_index: int, index: int):\n return self.get_raw_q(cell_index)[index]\n\n def get_amp_pha_i(self, cell_index: int, index: int):\n return self.get_default_i(cell_index, index)\n\n def get_amp_pha_q(self, cell_index: int, index: int):\n return self.get_default_q(cell_index, index)\n\n @abstractmethod\n def get_iq_cloud_i(self, cell_index: int, index: int, recording_count: int):\n pass\n\n @abstractmethod\n def get_iq_cloud_q(self, cell_index: int, index: int, recording_count: int):\n pass\n\n def get_states(self, cell_index: int):\n return self._result[cell_index]\n\n def get_counts(self):\n return self.get_states(0)" }, { "identifier": "DataHandler", "path": "src/qiclib/experiment/qicode/data_handler.py", "snippet": "class DataHandler(ABC):\n \"\"\"\n Each subclass of this one handles a different way to process result data, depending on the type of experiment run.\n This usually includes splitting it up for the different boxes.\n It takes a list of cells and the recording data provider and processes it however it sees fit.\n In order to find out the box in which to store a recording it can access the `_result_recording_order` of a cell\n which provides the correct QiResult for the n-th executed recording.\n For examples, see the subclasses.\n\n :param data_provider: to access the experiments results\n :param cell_list: to store processed results there\n \"\"\"\n\n @staticmethod\n def _data_handler_factories() -> (\n Dict[str, Callable[[DataProvider, List[\"QiCell\"], int], \"DataHandler\"]]\n ):\n \"\"\"\n This is a method instead of a static variable, because forward references to the subclasses are not possible in\n static variable assignments.\n \"\"\"\n return {\n \"average\": lambda data_provider, cell_list, averages: _DefaultDataHandler(\n data_provider, cell_list\n ),\n \"amp_pha\": lambda data_provider, cell_list, averages: _AmplitudePhaseDataHandler(\n data_provider, cell_list\n ),\n \"iqcloud\": lambda data_provider, cell_list, averages: _IQCloudDataHandler(\n data_provider, cell_list\n ),\n \"raw\": lambda data_provider, cell_list, averages: _RawDataHandler(\n data_provider, cell_list\n ),\n \"states\": _StateDataHandler,\n \"counts\": lambda data_provider, cell_list, averages: _CountDataHandler(\n data_provider, cell_list\n ),\n \"quantum_jumps\": lambda data_provider, cell_list, averages: _QuantumJumpsDataHandler(\n data_provider, cell_list\n ),\n \"custom\": lambda data_provider, cell_list, averages: _NotImplementedDataHandler(\n data_provider, cell_list\n ),\n }\n\n @staticmethod\n def names():\n return DataHandler._data_handler_factories().keys()\n\n @classmethod\n def get_factory_by_name(\n cls, name: str\n ) -> Optional[Callable[[DataProvider, List[\"QiCell\"], int], \"DataHandler\"]]:\n factories = DataHandler._data_handler_factories()\n if name not in factories:\n return None\n return factories[name]\n\n @classmethod\n def get_custom_wrapper_factory(\n cls, custom_data_handler: Callable[[List[\"QiCell\"], DataProvider], None]\n ) -> Callable[[DataProvider, List[\"QiCell\"], int], \"DataHandler\"]:\n return lambda data_provider, cell_list, averages: _CustomDataHandlerWrapper(\n data_provider, cell_list, custom_data_handler\n )\n\n def __init__(self, data_provider: DataProvider, cell_list: List[\"QiCell\"]):\n self.data_provider = data_provider\n self.cell_list = cell_list\n\n @abstractmethod\n def process_results(self):\n pass" }, { "identifier": "SequencerInstruction", "path": "src/qiclib/code/qi_seq_instructions.py", "snippet": "class SequencerInstruction:\n OPCODE_WIDTH = 7\n FUNCT3_WIDTH = 3\n FUNCT7_WIDTH = 7\n REGISTER_WIDTH = 5\n LOWER_IMMEDIATE_WIDTH = 12\n UPPER_IMMEDIATE_WIDTH = 20\n\n LOWER_IMM_MAX = (\n 2 ** (LOWER_IMMEDIATE_WIDTH - 1)\n ) - 1 # Lower immediate 12 Bits - 1Bit Signed\n LOWER_IMM_MIN = -(2 ** (LOWER_IMMEDIATE_WIDTH - 1))\n\n UPPER_IMM_MAX = (\n 2 ** (UPPER_IMMEDIATE_WIDTH - 1)\n ) - 1 # Upper immediate 20 Bits - 1Bit Signed\n UPPER_IMM_MIN = -(2 ** (UPPER_IMMEDIATE_WIDTH - 1))\n UPPER_IMM_MAX_UNSIGNED = 2**UPPER_IMMEDIATE_WIDTH\n\n imm_type = Union[int] # might include float in the future\n\n def __init__(self, OpCode: SeqOpCode) -> None:\n self.op = OpCode\n\n @staticmethod\n def is_value_in_lower_immediate(val: imm_type) -> bool:\n return (\n SequencerInstruction.LOWER_IMM_MIN\n <= val\n <= SequencerInstruction.LOWER_IMM_MAX\n )\n\n @staticmethod\n def is_value_in_unsigned_upper_immediate(val: imm_type) -> bool:\n return SequencerInstruction.UPPER_IMM_MAX_UNSIGNED >= abs(val)\n\n @abstractmethod\n def get_riscv_instruction(self) -> int:\n pass" }, { "identifier": "_QiVariableBase", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class _QiVariableBase(QiExpression):\n \"\"\"Base class for QiVariables.\n Variables can be relevant to only a subset of QiCells, this subset is saved in _relevant_cells.\n Variables are simple expressions and, therefore, are typed.\n Variables can be compared by self.id.\"\"\"\n\n id_iter = itertools.count()\n str_id_iter = itertools.count()\n\n def __init__(\n self,\n type: QiType,\n value: Optional[Union[int, float]] = None,\n name=None,\n ):\n from .qi_jobs import QiCell\n\n assert isinstance(type, QiType)\n assert value is None or isinstance(value, (int, float))\n\n super().__init__()\n\n if type != QiType.UNKNOWN:\n self._type_info.set_type(type, _TypeDefiningUse.VARIABLE_DEFINITION)\n\n self.value = value\n\n self._value = value\n self._relevant_cells: Set[QiCell] = set()\n self.id = next(_QiVariableBase.id_iter)\n self.str_id = next(_QiVariableBase.str_id_iter)\n\n self._contained_variables.add(self)\n\n self.name = name\n\n @property\n def contained_variables(self):\n return self._contained_variables\n\n @staticmethod\n def reset_str_id():\n _QiVariableBase.str_id_iter = itertools.count()\n\n def accept(self, visitor: QiExpressionVisitor):\n visitor.visit_variable(self)\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n return isinstance(other, _QiVariableBase) and self.id == other.id\n\n def __hash__(self) -> int:\n return self.id\n\n def __str__(self) -> str:\n return f\"QiVariable({self.name or ''})\"" }, { "identifier": "_QiCalcBase", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class _QiCalcBase(QiExpression):\n \"\"\"Represents binary and unary operations.\"\"\"\n\n def __init__(self, val1, op, val2) -> None:\n super().__init__()\n\n self.val1 = val1\n self.op: QiOp = op\n self.val2 = val2\n\n from .qi_types import add_qi_calc_constraints\n\n add_qi_calc_constraints(op, val1, val2, self)\n\n @property\n def contained_variables(self):\n \"\"\"Function traverses the operation tree to determine which QiVariables are used for the calculations.\n Found QiVariables are added to _contained_variables\"\"\"\n if len(self._contained_variables) == 0:\n self._variables_to_container()\n\n return self._contained_variables\n\n def accept(self, visitor: QiExpressionVisitor):\n visitor.visit_calc(self)\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n return (\n isinstance(other, _QiCalcBase)\n and self.op == other.op\n and self.val1._equal_syntax(other.val1)\n and self.val2._equal_syntax(other.val2)\n )\n\n def __str__(self):\n return (\n \"(\"\n + self.val1.__str__()\n + \" \"\n + self.op.value\n + \" \"\n + self.val2.__str__()\n + \")\"\n )" }, { "identifier": "_QiConstValue", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class _QiConstValue(QiExpression):\n \"\"\"Represents QiExpression which are a constant (compiletime known) values.\n Integers can be used as either NORMAL, TIME or FREQUENCY values. It is up to the type inference to figure it out.\n If the value can be represented as a float value it has an additional attribute float_value which represents the value before\n it has been converted to the integer representation used by the sequencer.\n \"\"\"\n\n def __init__(self, value: Union[int, float]):\n super().__init__()\n\n self._given_value = value # Value given to the constructor. Is interpreted differently depending on the type.\n\n # Constant STATE values can only be 0 or 1, therefore we forbid QiType.STATE if we have a different value.\n if isinstance(self._given_value, float) or self._given_value not in [1, 0]:\n self._type_info.add_illegal_type(\n QiType.STATE, _IllegalTypeReason.INVALID_STATE_CONSTANT\n )\n\n if isinstance(self._given_value, float):\n self._type_info.add_illegal_type(\n QiType.NORMAL, _IllegalTypeReason.INVALID_NORMAL_CONSTANT\n )\n\n @property\n def float_value(self):\n assert self.type in (QiType.TIME or self.type, QiType.FREQUENCY)\n return self._given_value\n\n @property\n def value(self):\n \"\"\"\n Integer representation of the constant value.\n Since the sequencer doesn't have a floating point unit, any calculations has to be using integers.\n In practice, this means we only perform fixpoint arithmetic and need to convert any float like value\n to such an fixpoint value.\n The correct conversion depends on the type.\n \"\"\"\n if self.type in (QiType.NORMAL, QiType.STATE, QiType.UNKNOWN):\n return self._given_value\n elif self.type == QiType.TIME:\n return int(util.conv_time_to_cycles(self._given_value, \"ceil\"))\n else:\n assert self.type == QiType.FREQUENCY\n return util.conv_freq_to_nco_phase_inc(self._given_value)\n\n @property\n def contained_variables(self):\n return QiVariableSet()\n\n def accept(self, visitor: QiExpressionVisitor):\n visitor.visit_constant(self)\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n assert QiType.UNKNOWN not in (self.type, other.type)\n return isinstance(other, _QiConstValue) and self.value == other.value\n\n def __str__(self):\n if self.type in (QiType.TIME, QiType.FREQUENCY):\n value = self.float_value\n elif self.type in (QiType.NORMAL, QiType.STATE, QiType.UNKNOWN):\n value = self.value\n else:\n raise RuntimeError(\n \"This program point should be unreacheable. Please file a bug report.\"\n )\n return f\"{value:g}\"" }, { "identifier": "QiCellProperty", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class QiCellProperty(QiExpression):\n \"\"\"When describing experiments, properties of cells might not yet be defined. Instead a QiCellProperty object will be generated.\n This object can be used as length definition in cQiWait commands and QiPulse\"\"\"\n\n def __init__(self, cell, name):\n super().__init__()\n from .qi_jobs import QiCell\n\n self.name: str = name\n self.cell: QiCell = cell\n self.operations = lambda val: val\n self.opcode = \"x\"\n\n @property\n def opcode_p(self):\n \"\"\"Old opcode in parantheses for building new opcode\"\"\"\n return self.opcode if self.opcode == \"x\" else f\"({self.opcode})\"\n\n def resolve_equal(self, o: object) -> bool:\n if isinstance(o, QiCellProperty):\n return self.name == o.name and self.opcode == o.opcode\n elif o is None:\n return False\n try:\n return o == self()\n except KeyError:\n return False # At time of comparison, unresolved property is not equal to o\n\n def __call__(self):\n value = self.cell._properties.get(self.name)\n\n if isinstance(value, QiCellProperty) or value is None:\n raise KeyError(\"Property could not be resolved\")\n return self.operations(value)\n\n @property\n def value(self):\n if self.type == QiType.TIME:\n return util.conv_time_to_cycles(self())\n elif self.type == QiType.FREQUENCY:\n return util.conv_freq_to_nco_phase_inc(self())\n elif self.type == QiType.NORMAL:\n return self()\n elif self.type == QiType.STATE:\n return self()\n else:\n raise RuntimeError(\n \"Mising type information to resolve value to convert to a machine value.\"\n )\n\n @property\n def float_value(self):\n assert self.type in (QiType.TIME, QiType.FREQUENCY)\n return self()\n\n @abstractmethod\n def accept(self, visitor: QiExpressionVisitor):\n visitor.visit_cell_property(self)\n\n @property\n def contained_variables(self):\n return QiVariableSet()\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n return isinstance(other, QiCellProperty) and self.resolve_equal(other)\n\n def move_add_op_to_property(self, x: _QiConstValue):\n if x._given_value == 0:\n return self\n old_op = self.operations # Necessary because of recursion otherwise\n self.operations = lambda val: old_op(val) + x.value\n self.opcode = f\"{self.opcode_p} + {x}\"\n return self\n\n def move_radd_op_to_property(self, x: _QiConstValue):\n if x._given_value == 0:\n return self\n old_op = self.operations\n self.operations = lambda val: x.value + old_op(val)\n self.opcode = f\"{self.opcode_p} + {x}\"\n return self\n\n def move_sub_op_to_property(self, x: _QiConstValue):\n if x._given_value == 0:\n return self\n old_op = self.operations\n self.operations = lambda val: old_op(val) - x.value\n self.opcode = f\"{self.opcode_p} - {x}\"\n return self\n\n def move_rsub_op_to_property(self, x: _QiConstValue):\n old_op = self.operations\n self.operations = lambda val: x.value - old_op(val)\n self.opcode = f\"{x} - {self.opcode_p}\"\n return self\n\n def move_mul_op_to_property(self, x: _QiConstValue):\n if x._given_value == 1:\n return self\n old_op = self.operations\n self.operations = lambda val: old_op(val) * x.value\n self.opcode = f\"{x} * {self.opcode_p}\"\n return self\n\n def move_rmul_op_to_property(self, x: _QiConstValue):\n if x._given_value == 1:\n return self\n old_op = self.operations\n self.operations = lambda val: x.value * old_op(val)\n self.opcode = f\"{x} * {self.opcode_p}\"\n return self\n\n # These operations are not implemented for general QiExpressions\n # and are, therefore, left as they are.\n\n def __truediv__(self, x):\n if (isinstance(x, _QiConstValue) and x._given_value == 1) or x == 1:\n return self\n old_op = self.operations\n self.operations = lambda val: old_op(val) / x\n self.opcode = f\"{self.opcode_p} / {x}\"\n return self\n\n def __rtruediv__(self, x):\n old_op = self.operations\n self.operations = lambda val: x / old_op(val)\n self.opcode = f\"{x} / {self.opcode_p}\"\n return self" }, { "identifier": "QiExpression", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class QiExpression:\n \"\"\"Superclass of every possible qicode expression.\"\"\"\n\n def __init__(self):\n self._contained_variables = QiVariableSet()\n self._type_info = _TypeInformation(self)\n\n @property\n def type(self):\n return self._type_info.type\n\n @staticmethod\n def _from(x):\n \"\"\"Creates an instance of QiExpression of the provided argument if possible.\"\"\"\n if isinstance(x, (float, int)):\n return _QiConstValue(x)\n elif isinstance(x, QiExpression):\n return x\n else:\n raise RuntimeError(f\"Can not create QiExpression from type {type(x)}.\")\n\n @abstractmethod\n def accept(self, visitor: QiExpressionVisitor):\n raise NotImplementedError(\n f\"{self.__class__} has not implemented `accept`. This is a bug.\"\n )\n\n @property\n def contained_variables(self):\n \"\"\"Returns the variables used in this expression.\n QiExpression subclasses which contain variables (_QiCalcBase and _QiVariableBase) need to overwrite this.\n \"\"\"\n raise NotImplementedError(\n f\"{self.__class__} has not implemented `contained_variables`. This is a bug.\"\n )\n\n def _variables_to_container(self):\n if isinstance(self, _QiVariableBase):\n self._contained_variables.add(self)\n elif isinstance(self, _QiCalcBase):\n self._contained_variables.update(self.val1.contained_variables)\n self._contained_variables.update(self.val2.contained_variables)\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n raise NotImplementedError(\n f\"{self.__class__} has not implemented `_equal_syntax`. This is a bug.\"\n )\n\n # QiCellProperties are supposed to support some form of constant folding.\n # However, originally, instead of implementing this in an extra pass over\n # QiJob they were added to the QiCellProperty class.\n # In order to keep support for this limited form of constant folding\n # This logic was placed here.\n\n # (I'm not sure why we don't fold when both operands are QiCellProperty.\n # And I think the reason we don't fold tow _QiConstValue is that originally\n # They were just int/float and would \"fold\" implicitely when using any\n # math operator on them)\n\n # If anyone ever feels the need to improve this situation, I would\n # encourage them to implement a constant folding pass using the existing\n # dataflow infrastructure.\n # This pdf seems to give a nice short introduction into the topic:\n # http://openclassroom.stanford.edu/MainFolder/courses/Compilers/docs/slides/15-02-constant-propagation-annotated.pdf\n\n def __add__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_add_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_radd_op_to_property(self)\n else:\n return _QiCalcBase(self, QiOp.PLUS, x)\n\n def __radd__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_radd_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_add_op_to_property(self)\n else:\n return _QiCalcBase(x, QiOp.PLUS, self)\n\n def __sub__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_sub_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_rsub_op_to_property(self)\n else:\n return _QiCalcBase(self, QiOp.MINUS, x)\n\n def __rsub__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_rsub_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_sub_op_to_property(self)\n else:\n return _QiCalcBase(x, QiOp.MINUS, self)\n\n def __mul__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_mul_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_rmul_op_to_property(self)\n else:\n return _QiCalcBase(self, QiOp.MULT, x)\n\n def __rmul__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_rmul_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_mul_op_to_property(self)\n else:\n return _QiCalcBase(x, QiOp.MULT, self)\n\n def __lshift__(self, x):\n return _QiCalcBase(self, QiOp.LSH, QiExpression._from(x))\n\n def __rshift__(self, x):\n return _QiCalcBase(self, QiOp.RSH, QiExpression._from(x))\n\n def __and__(self, x):\n return _QiCalcBase(self, QiOp.AND, QiExpression._from(x))\n\n def __rand__(self, x):\n return _QiCalcBase(QiExpression._from(x), QiOp.AND, self)\n\n def __or__(self, x):\n return _QiCalcBase(self, QiOp.OR, QiExpression._from(x))\n\n def __ror__(self, x):\n return _QiCalcBase(QiExpression._from(x), QiOp.OR, self)\n\n def __xor__(self, x):\n return _QiCalcBase(self, QiOp.XOR, QiExpression._from(x))\n\n def __rxor__(self, x):\n return _QiCalcBase(QiExpression._from(x), QiOp.XOR, self)\n\n def __invert__(self):\n return _QiCalcBase(self, QiOp.NOT, None)\n\n def __lt__(self, x):\n return QiCondition(self, QiOpCond.LT, QiExpression._from(x))\n\n def __le__(self, x):\n return QiCondition(self, QiOpCond.LE, QiExpression._from(x))\n\n def __gt__(self, x):\n return QiCondition(self, QiOpCond.GT, QiExpression._from(x))\n\n def __ge__(self, x):\n return QiCondition(self, QiOpCond.GE, QiExpression._from(x))\n\n def __eq__(self, x):\n return QiCondition(self, QiOpCond.EQ, QiExpression._from(x))\n\n def __ne__(self, x):\n return QiCondition(self, QiOpCond.NE, QiExpression._from(x))" }, { "identifier": "QiVariableSet", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class QiVariableSet:\n \"\"\"Class provides Set functionality for QiVariables.\n QiVariables overwrite comparison operations to build operation trees, to still allow comparisons ids are used.\n \"\"\"\n\n def __init__(self) -> None:\n self._var_list: List[\"_QiVariableBase\"] = []\n self._var_id_list: List[int] = []\n\n def __contains__(self, x):\n return x.id in self._var_id_list\n\n def add(self, x: \"_QiVariableBase\"):\n if x.id not in self._var_id_list:\n self._var_id_list.append(x.id)\n self._var_list.append(x)\n\n def update(self, var_set):\n for var in var_set:\n self.add(var)\n\n def __iter__(self):\n self.n = 0\n return self\n\n def __next__(self):\n if self.n < len(self._var_list):\n var = self._var_list[self.n]\n self.n += 1\n return var\n else:\n raise StopIteration\n\n def __len__(self):\n return len(self._var_list)" }, { "identifier": "QiCondition", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class QiCondition:\n \"\"\"Saves conditional comparisons.\n Can only be root node\"\"\"\n\n def __init__(\n self,\n val1: QiExpression,\n op: QiOpCond = QiOpCond.GT,\n val2: QiExpression = _QiConstValue(0),\n ) -> None:\n self._contained_variables = QiVariableSet()\n\n self.val1 = val1\n self.op = op\n self.val2 = val2\n\n from .qi_types import add_qi_condition_constraints\n\n add_qi_condition_constraints(op, val1, val2)\n\n @property\n def contained_variables(self):\n if len(self._contained_variables) == 0:\n self._contained_variables.update(self.val1.contained_variables)\n self._contained_variables.update(self.val2.contained_variables)\n\n return self._contained_variables\n\n def accept(self, visitor):\n visitor.visit_condition(self)\n\n def __str__(self) -> str:\n return f\"{self.val1} {self.op.value} {self.val2}\"" }, { "identifier": "QiPulse", "path": "src/qiclib/code/qi_pulse.py", "snippet": "class QiPulse:\n \"\"\"\n Class to describe a single pulse.\n\n :param length: length of the pulse. This can also be a QiVariable for variable pulse lengths.\n :param shape: pulse shape (i.e. rect, gauss, ...)\n :param amplitude: relative amplitude of your pulse. This can also be a QiVariable for variable pulse amplitudes. NOT IMPLEMENTED\n :param phase: phase of the pulse in deg. (i.e. 90 for pulse around y-axis of the bloch sphere)\n :param frequency: Frequency of your pulse, which is loaded to the PulseGen\n \"\"\"\n\n Type = Union[float, _QiVariableBase]\n\n def __init__(\n self,\n length: Union[float, _QiVariableBase, str],\n shape: Shape = ShapeLib.rect,\n amplitude: Union[float, _QiVariableBase] = 1.0,\n phase: float = 0.0,\n frequency: Union[float, QiExpression, None] = None,\n hold=False,\n ):\n from .qi_jobs import QiCellProperty\n\n if isinstance(length, str):\n mode = length.lower()\n if not mode in [\"cw\", \"off\"]:\n raise ValueError(\"QiPulse with str length only accepts 'cw' or 'off'.\")\n length = util.conv_cycles_to_time(1)\n if mode == \"cw\":\n hold = True\n else:\n amplitude = 0\n else:\n mode = \"normal\"\n\n self.mode = mode\n self.shape = shape\n self.amplitude = amplitude\n self.phase = phase\n self._length = length\n self.frequency = (\n QiExpression._from(frequency) if frequency is not None else None\n )\n self.hold = hold\n self.shift_phase = False\n\n if self.frequency is not None:\n self.frequency._type_info.set_type(\n QiType.FREQUENCY, _TypeDefiningUse.PULSE_FREQUENCY\n )\n\n self.var_dict = {}\n\n if isinstance(length, QiExpression):\n length._type_info.set_type(QiType.TIME, _TypeDefiningUse.PULSE_LENGTH)\n\n if isinstance(length, _QiVariableBase):\n self.var_dict[\"length\"] = length\n if shape != ShapeLib.rect:\n raise NotImplementedError(\n \"Variable pulse lengths are only supported for rectangular pulses\"\n )\n elif isinstance(length, QiCellProperty):\n pass\n elif util.conv_time_to_cycles(length) >= 2**32:\n raise RuntimeError(\n f\"Pulse length exceeds possible wait time, cycles {util.conv_time_to_cycles(length)}\"\n )\n\n if isinstance(amplitude, _QiVariableBase):\n raise NotImplementedError(\"Variable Amplitude not implemented yet\")\n # self.var_dict[\"amplitude\"] = amplitude\n\n def _are_variable_length(self, other) -> bool:\n return self.is_variable_length and other.is_variable_length\n\n def _are_same_length(self, other) -> bool:\n return (\n not isinstance(self._length, _QiVariableBase)\n and not isinstance(other._length, _QiVariableBase)\n and (self._length is other._length)\n )\n\n def _are_same_amplitude(self, other) -> bool:\n return (\n not isinstance(self.amplitude, _QiVariableBase)\n and not isinstance(other.amplitude, _QiVariableBase)\n and (self.amplitude == other.amplitude)\n )\n\n def __eq__(self, o: object) -> bool:\n equal_length: bool = isinstance(o, QiPulse) and (\n self._are_variable_length(o) or self._are_same_length(o)\n )\n equal_amplitude: bool = isinstance(o, QiPulse) and self._are_same_amplitude(o)\n\n return (\n isinstance(o, QiPulse)\n and equal_length\n and equal_amplitude\n and (self.hold == o.hold)\n and (self.shape == o.shape)\n and (self.phase == o.phase)\n and (\n self.frequency._equal_syntax(o.frequency)\n if self.frequency is not None and o.frequency is not None\n else self.frequency is o.frequency\n )\n )\n\n def __call__(self, samplerate: float, **variables: Any) -> np.ndarray:\n \"\"\"\n Returns the pulse envelope for a given frequency.\n :param samplerate: sample rate for calculating the envelope\n :param variables: the variables for the length/amplitude function, if any; legacy of qup_pulses\n\n :return: envelope of the pulse as numpy array.\n \"\"\"\n from .qi_jobs import QiCellProperty\n\n if self.is_variable_length:\n # variable pulses are hold till ended by another pulse, so no need to use correct length\n return np.array([self.amplitude] * 4)\n\n length = (\n self._length() if isinstance(self._length, QiCellProperty) else self._length\n )\n\n if (\n util.conv_time_to_cycles(length) >= 2**32\n ): # check value again, QiCellproperty might be used\n raise RuntimeError(\n f\"Pulse length exceeds possible wait time, cycles {util.conv_time_to_cycles(length)}\"\n )\n\n amplitude = self.amplitude\n timestep = 1.0 / samplerate\n\n if length < timestep / 2.0:\n if length != 0:\n logging.warning(\n \"A pulse is shorter than %f ns and thus is omitted.\", length * 1e09\n )\n\n return np.zeros(0)\n\n time_fractions = np.arange(0, length, timestep) / length\n envelope = amplitude * self.shape(time_fractions)\n\n return envelope\n\n @property\n def length(self):\n return self.var_dict.get(\"length\", self._length)\n\n @property\n def variables(self):\n return list(self.var_dict.values())\n\n @property\n def is_variable_length(self):\n return isinstance(self._length, _QiVariableBase)\n\n def _stringify_args(self) -> str:\n \"\"\"Determines non-default args to explicitly stringify\"\"\"\n arg_strings = []\n defaults = self.__init__.__defaults__\n\n if self.mode == \"normal\":\n arg_strings.append(str(self.length))\n else:\n arg_strings.append(f'\"{self.mode}\"')\n\n if self.shape != defaults[0]:\n arg_strings.append(f\"shape={self.shape}\")\n if not _equal(self.amplitude, defaults[1]) and self.mode != \"off\":\n arg_strings.append(f\"amplitude={self.amplitude}\")\n if not _equal(self.phase, defaults[2]):\n arg_strings.append(f\"phase={self.phase}\")\n if not _equal(self.frequency, defaults[3]):\n arg_strings.append(f\"frequency={self.frequency}\")\n\n return \", \".join(arg_strings)\n\n def _stringify(self) -> str:\n return f\"QiPulse({self._stringify_args()})\"" }, { "identifier": "QiCMContainedCellVisitor", "path": "src/qiclib/code/qi_visitor.py", "snippet": "class QiCMContainedCellVisitor(QiCommandVisitor):\n \"\"\"Visitor to check which cells are used inside context managers.\"\"\"\n\n def __init__(self) -> None:\n self.contained_cells: Set[QiCell] = set()\n\n def visit_cell_command(self, cell_cmd):\n self.contained_cells.update(cell_cmd._relevant_cells)\n\n def visit_context_manager(self, context_manager):\n visitor = QiCMContainedCellVisitor()\n for item in context_manager.body:\n item.accept(visitor)\n\n context_manager._relevant_cells.update(visitor.contained_cells)\n\n self.contained_cells.update(visitor.contained_cells)\n\n def visit_if(self, if_cm):\n visitor = QiCMContainedCellVisitor()\n for command in if_cm.body:\n command.accept(visitor)\n\n for command in if_cm._else_body:\n command.accept(visitor)\n\n if_cm._relevant_cells.update(visitor.contained_cells)\n\n self.contained_cells.update(visitor.contained_cells)\n\n def visit_parallel(self, parallel_cm):\n visitor = QiCMContainedCellVisitor()\n for cmd_list in parallel_cm.entries:\n for cmd in cmd_list:\n cmd.accept(visitor)\n\n parallel_cm._relevant_cells.update(visitor.contained_cells)\n\n self.contained_cells.update(visitor.contained_cells)\n\n def visit_variable_command(self, variable_cmd):\n self.contained_cells.update(variable_cmd._relevant_cells)\n\n def visit_sync_command(self, sync_cmd):\n self.contained_cells.update(sync_cmd._relevant_cells)\n\n def visit_asm_command(self, asm_cmd):\n self.contained_cells.update(asm_cmd._relevant_cells)\n\n def visit_mem_store_command(self, store_cmd):\n self.contained_cells.update(store_cmd._relevant_cells)" }, { "identifier": "QiResultCollector", "path": "src/qiclib/code/qi_visitor.py", "snippet": "class QiResultCollector(QiCommandVisitor):\n def __init__(self):\n # If there are multiple QiResults used, we need to\n # simulate in which order they record.\n self.found_qi_results = set()\n # We also collect the recordings which contain the qi_results above\n self.corresponding_recordings = set()\n\n # Is a recording which saves to a QiResult within an if.\n # In these cases we can not necessarily simulate the recording order.\n self.recording_in_if = False\n\n self.if_else_depth = 0\n\n def visit_cell_command(self, cell_cmd):\n from .qi_jobs import cQiRecording, cQiPlayReadout\n\n if isinstance(cell_cmd, cQiPlayReadout) and cell_cmd.recording is not None:\n cell_cmd = cell_cmd.recording\n\n if isinstance(cell_cmd, cQiRecording):\n if self.if_else_depth > 0:\n self.recording_in_if = True\n\n self.found_qi_results.add(cell_cmd.save_to)\n self.corresponding_recordings.add(cell_cmd)\n\n def visit_if(self, if_cm):\n self.if_else_depth += 1\n\n for cmd in if_cm.body:\n cmd.accept(self)\n\n for cmd in if_cm.body:\n cmd.accept(self)\n\n self.if_else_depth -= 1\n\n def visit_parallel(self, parallel_cm):\n for cmd in parallel_cm.body:\n cmd.accept(self)\n\n def visit_for_range(self, for_range_cm):\n for cmd in for_range_cm.body:\n cmd.accept(self)" }, { "identifier": "QiVarInForRange", "path": "src/qiclib/code/qi_visitor.py", "snippet": "class QiVarInForRange(QiCommandVisitor):\n \"\"\"Visitor used to visit QiCommands inside ForRange-Contextmanager. Raises error, if variable used in ForRange-Head is target of an Assign or Store\n command inside ForRange-Body. Additionally generates UserWarning when loop-variable is used inside Parallel-CM.\n \"\"\"\n\n def __init__(self, var) -> None:\n self.var = var\n\n def raise_exception(self):\n raise RuntimeError(\n \"Variable used in ForRange must not be used in internal Assign-Commands, var: \"\n + str(self.var)\n )\n\n def visit_cell_command(self, cell_cmd):\n from .qi_jobs import cQiStore\n\n if isinstance(cell_cmd, cQiStore):\n if id(cell_cmd.store_var) == id(self.var):\n self.raise_exception()\n\n def visit_context_manager(self, context_manager):\n for item in context_manager.body:\n item.accept(self)\n\n def visit_if(self, if_cm):\n for command in if_cm.body:\n command.accept(self)\n\n for command in if_cm._else_body:\n command.accept(self)\n\n def visit_parallel(self, parallel_cm):\n if self.var in parallel_cm._associated_variable_set:\n raise RuntimeError(\n \"Loop variable inside Parallel Context Manager might result in unexpected behaviour. \"\n \"Please unroll loop or change variable.\"\n )\n\n def visit_variable_command(self, variable_cmd):\n pass\n\n def visit_assign_command(self, assign_cmd):\n if id(assign_cmd.var) == id(self.var):\n self.raise_exception()\n\n def visit_sync_command(self, sync_cmd):\n pass" }, { "identifier": "QiProgramBuilder", "path": "src/qiclib/code/qi_prog_builder.py", "snippet": "class QiProgramBuilder:\n def __init__(\n self,\n cell_list: List[Any],\n cell_map: List[Any],\n command_list: List[Any],\n skip_nco_sync: bool = False,\n nco_sync_length: float = 0,\n ) -> None:\n from .qi_sequencer import Sequencer\n\n self.cell_seq_dict: Dict[Any, Sequencer] = {}\n self.result_boxes = []\n\n for cell, index in zip(cell_list, cell_map):\n self.cell_seq_dict[cell] = Sequencer(cell_index=index)\n\n for resultbox in cell._result_container.values():\n self.result_boxes.append(resultbox)\n\n self.cell_map = cell_map\n\n self.command_list = command_list\n\n self.skip_nco = skip_nco_sync\n self.nco_length = nco_sync_length\n\n @staticmethod\n def assign_cell_to_context_manager(commands: List[Any]):\n contained_cells_visitor = QiCMContainedCellVisitor()\n for command in commands:\n command.accept(contained_cells_visitor)\n\n @staticmethod\n def assign_variables_to_cell(commands: List[Any]):\n cell_to_variable_visitor = QiCmdVariableInspection()\n for command in reversed(commands):\n command.accept(cell_to_variable_visitor)\n\n QiProgramBuilder.assign_cell_to_context_manager(\n commands\n ) # run again, to ensure all Assignment statements are considered as well\n\n def build_program(self):\n for cell, sequencer in self.cell_seq_dict.items():\n cell.reset()\n\n if self.skip_nco is False:\n sequencer.add_nco_sync(self.nco_length)\n\n self.assign_cell_to_context_manager(self.command_list)\n\n self.assign_variables_to_cell(self.command_list)\n\n prog_builder = ProgramBuilderVisitor(self.cell_seq_dict, self.cell_map)\n\n for command in self.command_list:\n command.accept(prog_builder)\n\n for sequencer in self.cell_seq_dict.values():\n sequencer.end_of_program()\n\n return self.cell_seq_dict\n\n def get_all_variables(self) -> Dict[Any, Dict[Any, int]]:\n vars: Dict[Any, Dict[Any, int]] = {}\n for cell, seq in self.cell_seq_dict.items():\n for var in cell._relevant_vars:\n if var not in vars:\n vars[var] = {}\n vars[var][cell] = seq.get_var_register(var).adr\n return vars" }, { "identifier": "QiType", "path": "src/qiclib/code/qi_types.py", "snippet": "class QiType(Enum):\n \"\"\"The type that a :class:`~qiclib.code.qi_var_definitions.QiExpression` has.\"\"\"\n\n UNKNOWN = 0\n TIME = 1\n \"\"\"Time values contain some amount of times (in cycles) that, for example, can be used in wait commands.\n They are specified using float (seconds) and are converted to cycles automatically.\n \"\"\"\n STATE = 2\n \"\"\"State values are the result of a recording.\"\"\"\n NORMAL = 3\n \"\"\"Freely usable integer values.\"\"\"\n FREQUENCY = 4\n \"\"\"\n Frequency values can be used in the Play/PlayReadout commands and, like TIME, are specified using floats.\n \"\"\"" }, { "identifier": "QiPostTypecheckVisitor", "path": "src/qiclib/code/qi_types.py", "snippet": "class QiPostTypecheckVisitor(QiJobVisitor):\n \"\"\"Checks that every variable has an assigned type.\n The start and end values of ForRanges over time values are converted to cycles, because we only know with\n certainty whether they iterate over NORMAL or TIME values after the QiTypeFallbackVisitor has run.\n \"\"\"\n\n def __init__(self):\n pass\n\n def visit_for_range(self, for_range_cm):\n from qiclib.packages.constants import CONTROLLER_CYCLE_TIME\n from .qi_var_definitions import _QiConstValue, QiType\n from .qi_jobs import ForRange\n import numpy as np\n\n for_range_cm: ForRange = for_range_cm\n\n for_range_cm.var.accept(self)\n for_range_cm.start.accept(self)\n for_range_cm.end.accept(self)\n\n super().visit_for_range(for_range_cm)\n\n if for_range_cm.var.type == QiType.TIME:\n if isinstance(for_range_cm.start, _QiConstValue):\n if for_range_cm.start.value < 0:\n raise RuntimeError(\n f\"ForRange with negative time value ({for_range_cm.start._given_value}) are not allowed\"\n )\n\n if for_range_cm.end.value == 0:\n warnings.warn(\"End value of 0 will not be included in ForRange.\")\n\n # round to 11 decimals, if result is CONTROLLER_CYCLE_TIME then float modulo probably failed\n if (\n round(np.mod(for_range_cm.step._given_value, CONTROLLER_CYCLE_TIME), 11)\n != 0\n and round(\n np.mod(for_range_cm.step._given_value, CONTROLLER_CYCLE_TIME), 11\n )\n != CONTROLLER_CYCLE_TIME\n ):\n raise RuntimeError(\n f\"When using QiTimeVariables define step size as multiple of {CONTROLLER_CYCLE_TIME*1e9:.3g} ns.\"\n f\" (It is currently off by {np.mod(for_range_cm.step._given_value, CONTROLLER_CYCLE_TIME)*1e9:.3g} ns.)\"\n )\n elif (\n for_range_cm.var.type == QiType.FREQUENCY\n and isinstance(for_range_cm.end, _QiConstValue)\n and for_range_cm.end.value == 0\n ):\n warnings.warn(\"End value of 0 will not be included in ForRange.\")\n\n def visit_assign_command(self, assign_cmd):\n assign_cmd.var.accept(self)\n super().visit_assign_command(assign_cmd)\n\n def visit_constant(self, const):\n from .qi_var_definitions import QiType\n\n if const.type == QiType.UNKNOWN:\n raise TypeError(f\"Could not infer type of {const}.\")\n\n def visit_variable(self, var):\n from .qi_var_definitions import QiType\n\n if var.type == QiType.UNKNOWN:\n raise TypeError(f\"Could not infer type of {var}.\")\n\n def visit_calc(self, calc):\n from .qi_var_definitions import QiType\n\n super().visit_calc(calc)\n if calc.type == QiType.UNKNOWN:\n raise TypeError(f\"Could not infer type of {calc}.\")\n\n def visit_cell_property(self, cell_prop):\n if cell_prop.type == QiType.UNKNOWN:\n raise TypeError(f\"Could not infer type of {cell_prop}\")" }, { "identifier": "QiTypeFallbackVisitor", "path": "src/qiclib/code/qi_types.py", "snippet": "class QiTypeFallbackVisitor(QiJobVisitor):\n \"\"\"Sets the the fallback type to NORMAL for _QiConstValue if they weren't given a type during QiJob construction.\n This is important for qicode like the following:\n\n .. code-block:: python\n\n with ForRange(x, 0, 10, 1):\n ...\n\n Here, x could theoretically be either of type TIME or NORMAL because int literals can have either type.\n However, we want this code to compile to with integer semantics which is why we need this visitor to run\n after job construction. (see QiJob __exit__ method).\n \"\"\"\n\n def visit_for_range(self, for_range_cm):\n from .qi_var_definitions import QiType\n\n if for_range_cm.var.type == QiType.UNKNOWN:\n for_range_cm.var._type_info.set_type(QiType.NORMAL, _TypeFallback.INT)\n\n super().visit_for_range(for_range_cm)\n\n def visit_constant(self, const):\n from .qi_var_definitions import QiType\n\n if const.type == QiType.UNKNOWN:\n if isinstance(const._given_value, float):\n const._type_info.set_type(QiType.TIME, _TypeFallback.FLOAT)\n else:\n assert isinstance(const._given_value, int)\n const._type_info.set_type(QiType.NORMAL, _TypeFallback.INT)" }, { "identifier": "_TypeDefiningUse", "path": "src/qiclib/code/qi_types.py", "snippet": "class _TypeDefiningUse(_TypeFact, Enum):\n VARIABLE_DEFINITION = 0\n VALUE_DEFINITION = 1\n SHIFT_EXPRESSION = 2\n PULSE_LENGTH = 3\n RECORDING_SAVE_TO = 4\n WAIT_COMMAND = 5\n RECORDING_OFFSET_EXPRESSION = 6\n PULSE_FREQUENCY = 7\n\n def to_error_message(self) -> str:\n return {\n _TypeDefiningUse.VARIABLE_DEFINITION: \"has been defined by the user as this type\",\n _TypeDefiningUse.VALUE_DEFINITION: \"has been defined by the user as this type\",\n _TypeDefiningUse.SHIFT_EXPRESSION: \"is used as right hand side of shift expression\",\n _TypeDefiningUse.PULSE_LENGTH: \"is used as length of pulse\",\n _TypeDefiningUse.RECORDING_SAVE_TO: \"is used as save_to of recording command\",\n _TypeDefiningUse.WAIT_COMMAND: \"is used as length in wait command\",\n _TypeDefiningUse.RECORDING_OFFSET_EXPRESSION: \"is used as an recording offset\",\n _TypeDefiningUse.PULSE_FREQUENCY: \"is used as pulse frequency.\",\n }[self]" } ]
import os import json import functools import warnings import numpy as np import qiclib.packages.utility as util from abc import abstractmethod from typing import Dict, List, Callable, Optional, Union, Set, Any, Type from ..hardware.taskrunner import TaskRunner from ..experiment.qicode.data_provider import DataProvider from ..experiment.qicode.data_handler import DataHandler from .qi_seq_instructions import SequencerInstruction from .qi_var_definitions import ( _QiVariableBase, _QiCalcBase, _QiConstValue, QiCellProperty, QiExpression, QiVariableSet, QiCondition, ) from .qi_pulse import QiPulse from .qi_visitor import ( QiCMContainedCellVisitor, QiResultCollector, QiVarInForRange, ) from .qi_prog_builder import QiProgramBuilder from .qi_types import ( QiType, QiPostTypecheckVisitor, QiTypeFallbackVisitor, _TypeDefiningUse, ) from .qi_types import _TypeDefiningUse from .qi_types import _TypeDefiningUse from .qi_types import ( _TypeConstraintReasonQiCommand, _IllegalTypeReason, _add_equal_constraints, ) from .qi_types import ( _TypeConstraintReasonQiCommand, _IllegalTypeReason, _add_equal_constraints, ) from .analysis.qi_insert_mem_parameters import ( insert_recording_offset_store_commands, insert_manipulation_pulse_frequency_store_commands, insert_readout_pulse_frequency_store_commands, ) from .qi_simulate import Simulator from ..experiment.qicode.base import QiCodeExperiment from qiclib.experiment.qicode.base import _TaskrunnerSettings from .qi_visitor import QiStringifyJob
17,259
if cell_map is None: cell_map = list(range(len(self.cells))) str_map = ", ".join([f"q[{i}] -> sample[{m}]" for i, m in enumerate(cell_map)]) exp._job_representation = f"{self}\n\nmapped as {str_map} to\n\n{sample}" return exp def _prepare_experiment_params( self, controller, sample: Optional[QiSample] = None, averages: int = 1, cell_map: Optional[List[int]] = None, data_collection=None, use_taskrunner=False, ): if len(self.cells) > len(controller.cell): raise IndexError( f"This job requires {len(self.cells)} cells but only " f"{len(controller.cell)} are available in the QiController." ) if data_collection is None: if self._custom_processing is None: data_collection = "average" else: data_collection = "custom" # If float, convert averages to int averages = int(averages) if sample is None: sample = QiSample(len(controller.cell)) elif len(sample) < len(self.cells): raise ValueError( "Need to submit a QiSample with at least as many cells as the job " f"has ({len(self.cells)}), but only {len(sample)} provided." ) if cell_map is None: # Use the first cells of the sample cell_map = list(range(len(self.cells))) else: if len(cell_map) != len(self.cells): raise ValueError( "cell_map needs to have as many entries as the job has cells, but " f"{len(cell_map)} entries given and {len(self.cells)} required!" ) if len(set(cell_map)) != len(cell_map): raise ValueError("Duplicate values not allowed in cell_map!") if any(m < 0 or m >= len(sample) for m in cell_map): raise IndexError( "cell_map values can only point to valid indices within the passed" f" QiSample object, i.e. values between 0 and {len(sample) - 1}." ) # Translate cell_map from sample cells ("cells") to QiController cells cell_map = [sample.cell_map[c] for c in cell_map] if any(c < 0 or c >= len(controller.cell) for c in cell_map): raise ValueError( "The QiSample cell_map can only reference available QiController " f"cells, i.e. between 0 and {len(controller.cell) - 1}." ) self._build_program(sample, cell_map) for_range_list = [] for cell in self.cells: for_range_list.append(self.cell_seq_dict[cell]._for_range_list) return ( controller, self.cells, self._get_sequencer_codes(), averages, for_range_list, cell_map, self._var_reg_map, data_collection, use_taskrunner, ) def run( self, controller, sample: Optional[QiSample] = None, averages: int = 1, cell_map: Optional[List[int]] = None, data_collection=None, use_taskrunner=False, ): """executes the job and returns the results :param controller: the QiController on which the job should be executed :param sample: the QiSample object used for execution of pulses and extracts parameters for the experiment :param averages: the number of executions that should be averaged, by default 1 :param cell_map: A list containing the indices of the cells :param data_collection: the data_collection mode for the result, by default "average" :param use_taskrunner: if the execution should be handled by the Taskrunner Some advanced schemes and data_collection modes are currently only supported by the Taskrunner and not yet by a native control flow. """ exp = self.create_experiment( controller, sample, averages, cell_map, data_collection, use_taskrunner ) exp.run() def run_with_data_callback(self, on_new_data: Callable[[dict], None]): pass def run_streamed(self): pass def set_custom_data_processing( self, file: str, params: Optional[List] = None, converter: Optional[Callable[[List], List]] = None,
# Copyright © 2017-2023 Quantum Interface ([email protected]) # Richard Gebauer, IPE, Karlsruhe Institute of Technology # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. """ This is the main module of QiCode. Here, all important commands write QiPrograms are defined. """ class QiResult: """Result of an experiment. Can be accessed via :python:`job.cells[cell_index].data("result name")`. Where :python:`cells` denotes a :class:`QiCells` object and :python:`cell_index` an integer. The actual data can be retrieved as a numpy array using the :meth:`get` Method Example ------- .. code-block:: python qic: QiController = ... sample: QiSample = ... with QiJob() as job: q = QiCells(1) Readout(q[0], save_to="result") job.run(qic, sample, averages=1000) data = job.cells[0].data("result") :param name: The name of the variable, by default None """ def __init__(self, name: Optional[str] = None) -> None: self._cell = None self.data = None self.recording_count = 0 self.name: str = "" if name is None else name def get(self) -> np.ndarray: """gets the data of the result as a numpy array :return: The data of the experiment """ return np.array(self.data) def __str__(self) -> str: return f'QiResult("{self.name}")' class QiCommand: """Base class of every Job command. Provides _relevant_cells, containing every cell used for the execution of the command. Provides _associated_variable_set, containing every variable needed for the execution of the command. """ def __init__(self) -> None: self._associated_variable_set = QiVariableSet() self._relevant_cells: Set[QiCell] = set() @abstractmethod def accept(self, visitor, *input): raise RuntimeError( f"{self.__class__} doesn't implement `accept`. This is a bug." ) def is_variable_relevant(self, variable: _QiVariableBase) -> bool: return variable in self._associated_variable_set def add_associated_variable(self, x): if isinstance(x, _QiVariableBase): self._associated_variable_set.add(x) def __str__(self) -> str: return "cQiCommand" def _stringify(self) -> str: raise NotImplementedError(f"_stringify not implemented for {repr(self)}") _QiJobReference = None def _add_cmd_to_job(cmd: QiCommand): if _QiJobReference is None: raise RuntimeError("Can not use command outside QiJob context manager.") _QiJobReference._add_command(cmd) def _set_job_reference(job): """Used for testing purposes""" # pylint: disable=global-statement global _QiJobReference _QiJobReference = job def _delete_job_reference(): """Used for testing purposes""" # pylint: disable=global-statement global _QiJobReference _QiJobReference = None class QiCell: """A QiCell is an abstract representation of the qubit/cell the program is run on. Usually, a single :python:`QiCell` is not instantiated, but instead a :class:`QiCells` object. For a single :python:`QiCell`, use instead :python:`QiCells(1)` A :python:`QiCell` must be instantiated inside within a :class:`QiJob` context. The :python:`QiCell` object can be used to get properties that are defined on :class:`QiSamples <QiSample>`. For this, index the :python:`QiCell` object using the name of the property: .. code-block:: python q: QiCell = ... t1_time = q["t1"] The actual value for the accessed property (in the example above, the T1 time) is filled in when executing a :class:`QiJob` and providing the actual sample. **Tasks of the QiCell**: - Saves the pulses needed for program execution. - Provides a dictionary functionality to define commonly used durations/properties. - Implements a Sequencer object, which contains the assembler program after compilation. :param cellID: A unique ID :raises RuntimeError: When the :python:`QiCell` is instantiated outside a `QiJob` """ def __init__(self, cellID: int): if not isinstance(_QiJobReference, QiJob): raise RuntimeError("QiCell can't be used outside of QiJob.") self.cellID = cellID self.manipulation_pulses: List[QiPulse] = [] self.flux_pulses: List[QiPulse] = [] self.readout_pulses: List[QiPulse] = [] self._result_container: Dict[str, QiResult] = {} # The order in which recorded values are assigned to which result container self._result_recording_order: List[QiResult] = [] self._unresolved_property: Set[QiCellProperty] = set() self._job_ref = _QiJobReference self._relevant_vars: Set[_QiVariableBase] = set() # These attributes are determined by dataflow analyses self._initial_manip_freq: float = None self._initial_readout_freq: float = None self._initial_rec_offset: float = None self._rec_length: Union[int, float, QiCellProperty] = None self._properties: Dict[QiCellProperty, Any] = {} def __getitem__(self, key): if _QiJobReference != self._job_ref: raise RuntimeError( "Tried getting values for cells registered to other QiJob" ) prop = self._properties.get(key, QiCellProperty(self, key)) if isinstance(prop, QiCellProperty): self._unresolved_property.add(key) return prop def __setitem__(self, key, value): if _QiJobReference != self._job_ref: raise RuntimeError( "Tried setting values for cells registered to other QiJob" ) self._properties[key] = value def __call__(self, qic): return qic.cell[self.qic_cell] def get_properties(self): return self._properties.copy() def add_pulse(self, pulse: QiPulse): if pulse not in self.manipulation_pulses: self.manipulation_pulses.append(pulse) if len(self.manipulation_pulses) > 13: raise RuntimeError("Too many pulses in use") return self.manipulation_pulses.index(pulse) + 1 # index 0 and 15 are reserved @property def initial_manipulation_frequency(self): if self._initial_manip_freq is None: if len(self.manipulation_pulses) > 0: warnings.warn( "Manipulation pulses without frequency given, using 90 MHz." ) return 90e6 # Default frequency freq = self._initial_manip_freq return freq() if isinstance(freq, QiCellProperty) else freq def add_recording_length(self, length): if self._rec_length is None: self._rec_length = length elif ( not self._rec_length._equal_syntax(length) if isinstance(self._rec_length, QiExpression) else self._rec_length != length ): raise RuntimeError( f"Cell {self.cellID}: Multiple definitions of recording length used." ) def add_readout_pulse(self, pulse: QiPulse): if pulse not in self.readout_pulses: self.readout_pulses.append(pulse) if len(self.readout_pulses) > 13: raise RuntimeError("Too many pulses in use") return self.readout_pulses.index(pulse) + 1 # index 0 and 15 are reserved @property def initial_readout_frequency(self): if self._initial_readout_freq is None: if len(self.readout_pulses) > 0: warnings.warn("Readout pulses without frequency given, using 30 MHz.") return 30e6 # Default frequency freq = self._initial_readout_freq return freq() if isinstance(freq, QiCellProperty) else freq @property def recording_length(self): """the length of the recording pulse""" if self._rec_length is not None: return ( self._rec_length() if isinstance(self._rec_length, QiCellProperty) else self._rec_length ) return 0 @property def initial_recording_offset(self): """the recording offset in seconds""" if self._initial_rec_offset is not None: return ( self._initial_rec_offset() if isinstance(self._initial_rec_offset, QiCellProperty) else self._initial_rec_offset ) return 0 def get_result_container(self, result: str) -> QiResult: if result in self._result_container: return self._result_container[result] # was already added else: box = QiResult(result) box._cell = self self._result_container[result] = box return box def add_variable(self, var: _QiVariableBase): self._relevant_vars.add(var) def get_number_of_recordings(self): return len(self._result_recording_order) def set_default_readout(self, pulse): pass def reset(self): for container in self._result_container.values(): container.data = [] def data( self, name: Optional[str] = None ) -> Union[Dict[str, np.ndarray], np.ndarray]: """ Returns the data after running an experiment. When calling this function without a name, i.e., calling :python:`cell.data()`, returns a dictionary containing the results as numpy arrays. When calling this function with a name, i.e., calling :python:`cell.data("result_name")`, returns the whole dictionary. :param name: The name of the data :return: A single result, or a dictionary of result names mapped to results. """ if name is None: result_dict = {} for key, container in self._result_container.items(): result_dict.update({key: container.get()}) return result_dict else: return self._result_container[name].get() def _resolve_properties(self, len_dict: Dict[QiCellProperty, Any]): keys = list(self._unresolved_property) missing_keys = self._unresolved_property.difference(len_dict.keys()) if missing_keys: raise RuntimeError( f"Cell {self.cellID}: Not all properties for job could be resolved. " f"Missing properties: {missing_keys}" ) for key in keys: self._properties[key] = len_dict[key] @property def has_unresolved_properties(self): return len(self._unresolved_property) > 0 def _get_unresolved_properties(self): return [ key for key in list(self._unresolved_property) if self._properties.get(key) is None ] def __str__(self) -> str: return f"QiCell({self.cellID})" class QiCells: """ QiCells encapsulates multiple :class`QiCell` objects. It is a list-like object where the individual cells can be accessed using the index operator, i.e. .. code-block:: python cells = QiCells(5) cell0: QiCell = cells[0] cell3: QiCell = cells[3] :param num: The number of cells to create :raises RuntimeError: When the :python:`QiCells` object is instantiated outside a :python:`QiJob` """ def __init__(self, num: int) -> None: if not isinstance(_QiJobReference, QiJob): raise RuntimeError( "QiCells can only be used within QiJob description. " + "If you try to create a sample object, use the new QiSample instead." ) self.cells = [QiCell(x) for x in range(num)] _QiJobReference._register_cells(self.cells) def __getitem__(self, key): return self.cells[key] def __len__(self): return len(self.cells) class QiSampleCell: """QiSampleCell is the representation of a single qubit/cell and its properties. All necessary parameters to perform experiments can be stored here. For this purpose, the QiSampleCell can be utilized as a dictionary with user-defined keys. """ def __init__(self, cellID: int, cells_ref: "QiSample"): self.cellID = cellID self._cells_ref = cells_ref self._relevant_vars: Set[_QiVariableBase] = set() self._properties: Dict[str, Any] = {} def __getitem__(self, key): return self._properties[key] def __setitem__(self, key, value): self._properties[key] = value def __call__(self, qic): return qic.cell[self.qic_cell] @property def qic_cell(self): return self._cells_ref.cell_map[self.cellID] def get_properties(self): return self._properties.copy() def __str__(self) -> str: return f"QiSampleCell({self.cellID})" def _export(self): return {"properties": self.get_properties()} def _import(self, prop_dict, index): if prop_dict is None: warnings.warn( f"Imported JSON string does not contain 'properties' for cell[{index}]." ) return self._properties.update(prop_dict) class QiSample: """Representation of an experiment sample and its properties. Property keys can be arbitrary strings, and property values can be anything. Set the keys using :python:`sample["property_key"] = property_value` and get the values the same way, i.e., :python:`property_value = sample["property_key"]`. Note that this class **cannot** be instantiated within a :class:`QiJob`. Instead, it must be defined outside one. Accessing samples defined here within a QiJob is still possible, however, using the :class:`QiCell` object: .. code-block:: python sample: QiSample = ... qic: QiController = ... sample["t1"] = 100e-6 with QiJob() as job: q = QiCells(1) Wait(q[0], q[0]["t1"]) job.run(qic, sample) # Note that we pass the sample object here to make the value available in the job The :python:`QiSample` object is serializable to `JSON <https://www.json.org/>`_. Have a look at the :meth:`save` and :meth:`load` methods for more :param num: The number of cells/qubits this sample has. :param cell_map: On which QiController cells these are mapped, by default [0, 1, ..., num-1] :raises RuntimeError: When the Sample is used within a :class:`QiJob` """ def __init__(self, num: int, cell_map: Optional[List[int]] = None) -> None: self._cell_map = None if _QiJobReference is not None: raise RuntimeError( "QiSample can only be used outside of QiJob to define sample " "properties. Inside a QiJob, use QiCells as placeholder for the " "qubits/cells instead." ) self.cells: List[QiSampleCell] = [] for x in range(num): self.cells.append(QiSampleCell(cellID=x, cells_ref=self)) self.cell_map = cell_map or list(range(num)) def __getitem__(self, key): return self.cells[key] def __len__(self): return len(self.cells) def __str__(self): return ( f"QiSample({len(self.cells)}, cell_map=[{','.join(map(str, self.cell_map))}]):\n" + "\n".join( [ f"[{i}]: {json.dumps(props['properties'], indent=2)}" for i, props in enumerate(self._export()["cells"]) ] ) ) def _arrange_for_controller(self) -> List[Optional[QiSampleCell]]: inverse: List[Optional[QiSampleCell]] = [None] * (max(self.cell_map) + 1) for cell, qi_cell_index in enumerate(self.cell_map): inverse[qi_cell_index] = self[cell] return inverse @property def cell_map(self): return self._cell_map @cell_map.setter def cell_map(self, cell_map): if len(cell_map) != len(self): raise ValueError( "cell_map needs to have as many entries as the there are cells, but " f"{len(cell_map)} entries given and {len(self)} required!" ) if len(set(cell_map)) != len(cell_map): raise ValueError("Duplicate values not allowed in cell_map!") if any(c < 0 for c in cell_map): raise ValueError("Cell indices inside cell_map cannot be negative!") self._cell_map = cell_map def _export(self): properties = [cell._export() for cell in self.cells] return {"cells": properties, "cell_map": self.cell_map} def _import(self, jsn_string): jsn_loaded = json.loads(jsn_string) self._evaluate_import(jsn_loaded.get("cells", None)) self.cell_map = jsn_loaded.get("cell_map", self.cell_map) def save(self, file_path: Union[str, os.PathLike], overwrite: bool = False): """ Save the sample to a file denoted by the :python:`file_path` argument in JSON format. :param file_path: Where to store the file :param overwrite: When true, allow overwriting an existing file. :raise FileExistsError: When overwrite is False and the file exists. """ mode = "w" if overwrite is True else "x" with open(file_path, mode, encoding="utf-8") as file: json.dump(self._export(), file) def load(self, file_path: Union[str, os.PathLike]): """ Loads the file at :python:`file_path` and assigns all properties of the loaded file to this :class:`QiSample` object. :param file_path: Where to look for the file """ with open(file_path, "r", encoding="utf-8") as file: self._import(file.read()) def _evaluate_import(self, sample): if sample is None: warnings.warn("Imported JSON string does not contain 'cells'.") return if len(sample) != len(self): raise ValueError( f"Imported JSON contains {len(sample)} sample cells but {len(self)} " "expected." ) for i in range(0, len(self)): self.cells[i]._import(sample[i].get("properties", None), i) class _JobDescription: """Saves experiment descriptions and handles storage of commands""" def __init__(self): self._commands: List[QiCommand] = [] self._ContextStack: List[List[QiCommand]] = [] def __getitem__(self, key): return self._commands[key] def __len__(self): return len(self._commands) def add_command(self, command): """Checks current command for used cells and raises error, if cells are not defined for current QiJob""" if isinstance(command, QiCellCommand): if _QiJobReference != command.cell._job_ref: raise RuntimeError("Cell not defined for current job") self._commands.append(command) def open_new_context(self): """Saves current commands in a stack and clears command list""" self._ContextStack.append(self._commands.copy()) self._commands = [] def close_context(self) -> List[QiCommand]: """returns the current command list, and loads the commands from top of stack""" current_commands = self._commands.copy() self._commands = self._ContextStack.pop() return current_commands def reset(self): self._commands = [] self._ContextStack = [] class QiCellCommand(QiCommand): """ Cell commands are commands using only one cell, such as Play and Wait commands. :param cell: The target cell """ def __init__(self, cell: QiCell): super().__init__() self.cell = cell self._relevant_cells.add(cell) def accept(self, visitor, *input): return visitor.visit_cell_command(self, *input) class QiVariableCommand(QiCommand): """Base class of variable commands cQiDeclare and cQiAssign""" def __init__(self, var: _QiVariableBase): super().__init__() self.var = var def accept(self, visitor, *input): return visitor.visit_variable_command(self, *input) class cQiWait(QiCellCommand): """Command generated by :meth:`Wait`""" def __init__(self, cell, length: Union[QiExpression, QiCellProperty]): super().__init__(cell) self._length = length if isinstance(length, _QiVariableBase): self.add_associated_variable(length) elif isinstance(length, _QiCalcBase): for variable in length.contained_variables: self.add_associated_variable(variable) if isinstance(length, QiExpression): length._type_info.set_type(QiType.TIME, _TypeDefiningUse.WAIT_COMMAND) @property def length(self): return ( self._length() if isinstance(self._length, QiCellProperty) else self._length ) def _stringify(self) -> str: return f"Wait({self.cell}, {self._length})" class _cQiPlay_base(QiCellCommand): """Base class of Play commands. Saves pulses, trigger_index and adds pulse variables to associated variable set """ def __init__(self, cell, pulse: QiPulse): super().__init__(cell) self.pulse = pulse # default False; Set True for certain commands when unrolling a loop with TimingVariable == 1 cycle self._var_single_cycle_trigger = False for variable in self.pulse.variables: self.add_associated_variable(variable) # length of command might differ from pulse length self._length: Union[float, _QiVariableBase, QiCellProperty] = self.pulse.length self.trigger_index = 0 @property def length(self): return ( self._length if not isinstance(self._length, QiCellProperty) else self._length() ) @length.setter def length(self, value): self._length = value class cQiPlay(_cQiPlay_base): """Command generated by Play()""" def __init__(self, cell, pulse: QiPulse): super().__init__(cell, pulse) self.trigger_index = cell.add_pulse(pulse) def _stringify(self) -> str: return f"Play({self.cell}, {self.pulse._stringify()})" class cQiPlayFlux(_cQiPlay_base): pass class cQiPlayReadout(_cQiPlay_base): """Command generated by :meth:`PlayReadout`""" def __init__(self, cell, pulse) -> None: super().__init__(cell, pulse) self.recording: Union[None, cQiRecording] = None self.trigger_index = cell.add_readout_pulse(pulse) @property def length(self): length = ( self._length if not isinstance(self._length, QiCellProperty) else self._length() ) # if Recording is defined and length is not defined by variable, compare both lengths if isinstance(self.recording, cQiRecording) and not isinstance( self._length, _QiVariableBase ): return max(length, self.recording.length) return length @length.setter def length(self, value): self._length = value if isinstance(self.recording, cQiRecording): self.recording.length = value @property def uses_state(self): return self.recording is not None and self.recording.uses_state def _stringify(self) -> str: return f"PlayReadout({self.cell}, {self.pulse._stringify()})" class cQiRotateFrame(_cQiPlay_base): """Command generated by :meth:`RotateFrame`""" def __init__(self, cell, angle: float): # Negate phase because frame needs to be shifted in the opposite direction # than pulses -> want to shift the state on bloch sphere but shift the frame pulse = QiPulse(0, phase=-1 * angle) pulse.shift_phase = True # Special property to make phase offset persistant super().__init__(cell, pulse) self.trigger_index = cell.add_pulse(pulse) self.length = util.conv_cycles_to_time(1) # command needs exactly one cycle self.angle = angle def _stringify(self) -> str: return f"RotateFrame({self.cell}, {self.angle})" class cQiSync(QiCommand): """Command generated by :meth:`Sync`""" def __init__(self, cells: List[QiCell]): super().__init__() self._relevant_cells.update(cells) def accept(self, visitor, *input): return visitor.visit_sync_command(self, *input) def _stringify(self) -> str: return ( "Sync(" + ", ".join( [ f"{cell}" for cell in sorted(self._relevant_cells, key=lambda c: c.cellID) ] ) + ")" ) class cQiRecording(QiCellCommand): """Command generated by Recording()""" def __init__( self, cell: QiCell, save_to: Union[str, _QiVariableBase, None], state_to: Union[_QiVariableBase, None], length: Union[int, float, QiCellProperty], offset: Union[int, float, QiExpression], toggleContinuous: Optional[bool] = None, ): super().__init__(cell) self.result_box = None self.var = None if ( isinstance(length, QiExpression) and length.type == QiType.STATE or isinstance(offset, QiExpression) and offset.type == QiType.STATE ): raise RuntimeError("State variable can only be used at save_to parameter.") if isinstance(state_to, _QiVariableBase): state_to._type_info.set_type( QiType.STATE, _TypeDefiningUse.RECORDING_SAVE_TO ) self.add_associated_variable(state_to) self.var = state_to self.save_to = save_to assert not isinstance( save_to, QiResult ) # support for QiResult as parameter was removed. if isinstance(save_to, _QiVariableBase): # TODO This should be deprecated and turned into new result variable # to handle I/Q values instead if necessary -> consistency if self.var is not None: raise RuntimeError("Cannot pass variable to state_to and save_to.") save_to._type_info.set_type( QiType.STATE, _TypeDefiningUse.RECORDING_SAVE_TO ) self.add_associated_variable(save_to) self.var = save_to elif isinstance(save_to, str): self.result_box = cell.get_result_container( save_to ) # container might have been added to cell before self.save_to = save_to cell.add_recording_length(length) self._length = length if isinstance(self._length, QiExpression): self._length._type_info.set_type( QiType.TIME, _TypeDefiningUse.RECORDING_OFFSET_EXPRESSION ) self._offset: QiExpression = QiExpression._from(offset) self._offset._type_info.set_type( QiType.TIME, _TypeDefiningUse.RECORDING_OFFSET_EXPRESSION ) for var in self._offset.contained_variables: var._relevant_cells.add(cell) self.toggleContinuous = toggleContinuous self.follows_readout = False try: cmd = _QiJobReference.commands[-1] if ( isinstance(cmd, cQiPlayReadout) and cmd.cell == self.cell ): # Warning if previous cmd is readout but different cell self.follows_readout = True cmd.recording = self cmd._associated_variable_set.update(self._associated_variable_set) except IndexError: pass @property def uses_state(self): return len(self._associated_variable_set) > 0 @property def length(self): return ( self._length() if isinstance(self._length, QiCellProperty) else self._length ) @length.setter def length(self, value): self._length = value @property def offset(self): return ( self._offset() if isinstance(self._offset, QiCellProperty) else self._offset ) def _stringify_args(self) -> str: """Determines non-default args to explicitly stringify""" arg_strings = [str(self.cell), str(self._length)] if not ( isinstance(self._offset, _QiConstValue) and self._offset._given_value == 0 ): arg_strings.append(f"offset={self._offset}") if self.result_box is not None: arg_strings.append(f'save_to="{self.result_box.name}"') if self.var is not None: arg_strings.append(f"state_to={self.var}") if self.toggleContinuous is not None: arg_strings.append(f"toggleContinuous={self.toggleContinuous}") return ", ".join(arg_strings) def _stringify(self) -> str: return f"Recording({self._stringify_args()})" class cQiStore(QiCellCommand): """Command generated by :meth:`Store`""" def __init__(self, cell, store_var: _QiVariableBase, save_to: QiResult): super().__init__(cell) self.store_var = store_var self.save_to = save_to self.add_associated_variable(store_var) def _stringify(self) -> str: return f"Store({self.cell}, {self.store_var}, {self.save_to})" class cQiAssign(QiVariableCommand): """Command generated by :meth:`Assign`""" def __init__(self, dst: _QiVariableBase, value: Union[QiExpression, int, float]): if not isinstance(dst, _QiVariableBase): raise TypeError("Target of Assign can only be a QiVariable.") super().__init__(dst) self._value = QiExpression._from(value) dst._type_info.add_illegal_type(QiType.STATE, _IllegalTypeReason.ASSIGN) _add_equal_constraints( QiType.NORMAL, _TypeConstraintReasonQiCommand(cQiAssign), self._value, dst ) _add_equal_constraints( QiType.TIME, _TypeConstraintReasonQiCommand(cQiAssign), self._value, dst ) for variable in self.value.contained_variables: self.add_associated_variable(variable) @property def value(self): return self._value def accept(self, visitor, *input): return visitor.visit_assign_command(self, *input) def _stringify(self) -> str: return f"Assign({self.var}, {self._value})" class cQiDeclare(QiVariableCommand): """Command generated by initialization of new QiVariable""" def __init__(self, dst: _QiVariableBase) -> None: super().__init__(var=dst) def accept(self, visitor, *input): return visitor.visit_declare_command(self, *input) def _stringify(self) -> str: return f"v{self.var.str_id} = {self.var}" class cQiASM(QiCommand): def __init__(self, cells: QiCell, instr: SequencerInstruction, cycles: int): super().__init__() self._relevant_cells.add(cells) self.asm_instruction = instr self.cycles = cycles def accept(self, visitor, *input): return visitor.visit_asm_command(self, *input) def _stringify(self) -> str: return f"ASM({self.asm_instruction.get_riscv_instruction()})" class cQiMemStore(QiCommand): def __init__(self, cell: QiCell, addr: int, value): super().__init__() self._relevant_cells.add(cell) self.addr = addr self.value = value def accept(self, visitor, *input): return visitor.visit_mem_store_command(self, *input) def _stringify(self): cell_str = ", ".join(list(map(lambda x: f"{x}", self._relevant_cells))) return f"cQiMemStore({cell_str}, {self.addr}, {self.value})" class QiContextManager(QiCommand): """Base Class for If, Else, ForRange and Parallel. Defines functions for storing commands.""" def __init__(self) -> None: super().__init__() self.body: List[QiCommand] = [] def __enter__(self): _QiJobReference._open_new_context() return self def __exit__(self, exception_type, exception_value, traceback): self.body = _QiJobReference._close_context() _QiJobReference._add_command(self) def accept(self, visitor, *input): return visitor.visit_context_manager(self, *input) class If(QiContextManager): """ Add conditional logic to the program. If multiple cells are used inside the body, a synchronization between the cells takes place before the If. :param condition: The condition to check Example ------- .. code-block:: python with QiJob() as job: q = QiCells(1) x = QiIntVariable(1) with If(x > 1): ... # won't be executed The If statement is most commonly used to react to qubit states in real-time: .. code-block:: python from qiclib import jobs with QiJob() as job: q = QiCells(1) state = QiStateVariable() jobs.Readout(q[0], state_to=state) with If(state = 0): ... # Apply some conditional logic based on the qubit state """ def __init__(self, condition: Optional[QiCondition] = None): super().__init__() self._else_body: List[QiCommand] = [] if condition is None: raise RuntimeError("No QiCondition given") self.condition = condition for variable in condition.contained_variables: self.add_associated_variable(variable) def add_else_body(self, else_body): self._else_body = else_body.copy() def is_followed_by_else(self) -> bool: return len(self._else_body) != 0 def accept(self, visitor, *input): return visitor.visit_if(self, *input) def _stringify(self) -> str: return f"If({self.condition})" class Else(QiContextManager): """ Adds Conditional logic if the preceding :class:`If` command evaluates to false. :raises RuntimeError: When the preceeding command is not an :python:`If` command Example ------- .. code-block:: python from qiclib import jobs with QiJob() as job: q = QiCells(1) state = QiStateVariable() jobs.Readout(q[0], state_to=state) with If(state = 0): ... # Apply some conditional logic based on the qubit state with Else(): ... # State is 1 """ def __enter__(self): self.if_cmd = _QiJobReference.commands[-1] if not isinstance(self.if_cmd, If): raise RuntimeError("Else is not preceded by If") _QiJobReference._open_new_context() return self def __exit__(self, exception_type, exception_value, traceback): self.if_cmd.add_else_body(_QiJobReference._close_context()) class Parallel(QiContextManager): """Pulses defined in body are united in one trigger command.""" def __init__(self): super().__init__() self.entries: List[List[QiCommand]] = [] def __exit__(self, exception_type, exception_value, traceback): temp = _QiJobReference._close_context() self.body += temp # So visitors also find commands in Parallel blocks. self.entries.append(temp) containing_cells = QiCMContainedCellVisitor() for command in temp: if not isinstance( command, ( cQiPlay, cQiPlayReadout, cQiPlayFlux, cQiRotateFrame, cQiRecording, cQiWait, ), ): raise TypeError("Type not allowed inside Parallel()", command) if ( isinstance(command, (cQiRecording, cQiPlayReadout)) and command.uses_state ): raise RuntimeError("Can not save to state variable inside Parallel") try: if isinstance(command.length, _QiVariableBase): self._associated_variable_set.add(command.length) except KeyError: pass # length was QiCellProperty command.accept(containing_cells) self._relevant_cells.update(containing_cells.contained_cells) # If previous command is also parallel, combine by adding another parallel entry at previous command try: cmd = _QiJobReference.commands[-1] if isinstance(cmd, Parallel) and len(cmd.entries) < 2: cmd.entries.append(temp) cmd._associated_variable_set.update(self._associated_variable_set) else: _QiJobReference._add_command(self) except IndexError: _QiJobReference._add_command(self) class CmdTuple: def __init__(self, cmd: QiCommand, start: int, end: int, choke: bool = False): self.cmd = cmd self.start = start self.end = end self.choke_cmd = choke class TimeSlot: def __init__(self, cmd_tuples: List[Any], start, end): self.cmd_tuples: List[Parallel.CmdTuple] = cmd_tuples self.start: int = start self.end: int = end self.duration: float = 0.0 def _clear_wait_commands(self, cmd_tuples: List[CmdTuple]): """Clears cQiWait commands from cmd_tuples, if any trigger command is also in cmd_tuples""" contains_pulse = False for cmd_tuple in cmd_tuples: if isinstance(cmd_tuple.cmd, _cQiPlay_base): contains_pulse = True break return [ cmd_tuple for cmd_tuple in cmd_tuples if isinstance(cmd_tuple.cmd, _cQiPlay_base) or contains_pulse is False ] def _clear_choke_commands(self, cmd_tuples: List[CmdTuple]): """Clears choke commands, if at the same slot another Play or Readout command is present.""" contains_play = False contains_readout = False for cmd_tuple in cmd_tuples: if isinstance(cmd_tuple.cmd, cQiPlay) and cmd_tuple.choke_cmd is False: contains_play = True elif ( isinstance(cmd_tuple.cmd, cQiPlayReadout) and cmd_tuple.choke_cmd is False ): contains_readout = True if contains_play is False and contains_readout is False: return cmd_tuples cleared_tuples = [] for cmd_tuple in cmd_tuples: # if play command is present skip choke command for play if isinstance(cmd_tuple.cmd, cQiPlay): if cmd_tuple.choke_cmd is True and contains_play: continue # if PlayReadout command is present skip choke command for PlayReadout elif isinstance(cmd_tuple.cmd, cQiPlayReadout): if cmd_tuple.choke_cmd is True and contains_readout: continue cleared_tuples.append(cmd_tuple) return cleared_tuples def _create_time_slots(self, annotated_bodies: List[List[CmdTuple]], max_end: int): time_slot_list: List[Parallel.TimeSlot] = [] for start in range(0, max_end): time_slot = self.TimeSlot([], start, start) # find tuples with start time == start for cmd_list in annotated_bodies: for cmd_tuple in cmd_list: if cmd_tuple.start == start: time_slot.cmd_tuples.append(cmd_tuple) time_slot.end = max(cmd_tuple.end, time_slot.end) cmd_list.remove(cmd_tuple) break # next cmd_list # next start value, if nothing was found if len(time_slot.cmd_tuples) == 0: continue time_slot.cmd_tuples = self._clear_wait_commands(time_slot.cmd_tuples) time_slot.cmd_tuples = self._clear_choke_commands(time_slot.cmd_tuples) # Add Wait command, if previous end value < start try: prev_time_slot = time_slot_list[-1] if prev_time_slot.end < start: length = util.conv_cycles_to_time(start - prev_time_slot.end) new_wait = self.CmdTuple( cQiWait(list(self._relevant_cells)[0], length), start=prev_time_slot.end, end=start, ) time_slot_list.append( self.TimeSlot([new_wait], prev_time_slot.end, start) ) except IndexError: pass # Adjust previous end time, if previous.end > start try: prev_time_slot = time_slot_list[-1] prev_time_slot.end = min(prev_time_slot.end, start) except IndexError: pass time_slot_list.append(time_slot) # Add final wait, if previous.end != max_end try: prev_time_slot = time_slot_list[-1] if prev_time_slot.end < max_end: length = util.conv_cycles_to_time(max_end - prev_time_slot.end) new_wait = self.CmdTuple( cQiWait(list(self._relevant_cells)[0], length), start=prev_time_slot.end, end=max_end, ) time_slot_list.append( self.TimeSlot([new_wait], prev_time_slot.end, max_end) ) except IndexError: pass # calculate duration of time slot for slot in time_slot_list: slot.duration = util.conv_cycles_to_time(slot.end - slot.start) return time_slot_list def _generate_command_body(self, cell, sequencer): """Combines the parallel sequences to one command body.""" parallel_bodies: List[List[Parallel.CmdTuple]] = [] max_end = 0 # Generate annotated list of commands with start and end cycle for cmd_list in self.entries: commands: List[Parallel.CmdTuple] = [] start: int = 0 end: int = 0 for cmd in cmd_list: var_pulse = False if cell not in cmd._relevant_cells: continue # skip commands for other cells if isinstance(cmd.length, _QiVariableBase): reg = sequencer.get_var_register(cmd.length) if reg.valid is False or reg.value is None: raise RuntimeError( "Variable inside parallel not initialised or invalidated" ) length = reg.value if isinstance(cmd, (cQiPlay, cQiPlayReadout)): var_pulse = True else: length = util.conv_time_to_cycles(cmd.length, "ceil") if length == 0: continue # skip commands with length 0 if isinstance(cmd, cQiRecording) or ( isinstance(cmd, cQiPlayReadout) and isinstance(cmd.recording, cQiRecording) ): end += length + util.conv_time_to_cycles( sequencer.recording_delay, "ceil" ) else: end += length cmd_duration = self.CmdTuple(cmd, start, end) commands.append(cmd_duration) if var_pulse: # Add parallel choke command after current command, if variable length is used parallel_choke = [self.CmdTuple(cmd, end, end + 1, choke=True)] parallel_bodies.append(parallel_choke) max_end = max(end + 1, max_end) # +1 to account for choke command else: max_end = max(end, max_end) start = end parallel_bodies.append(commands) return self._create_time_slots(parallel_bodies, max_end) def accept(self, visitor, *input): return visitor.visit_parallel(self, *input) def _stringify(self) -> str: return "Parallel" class ForRange(QiContextManager): """Adds ForRange to program. If multiple cells are used inside body, a synchronisation between the cells is done before the ForRange as well as after the end of the body. If QiTimeVariable is used as var, loops starting at 0 are unrolled, to skip pulses/waits inside body using var as length. Raises exception if start, end and step are not set up properly.""" def __init__( self, var: _QiVariableBase, start: Union[_QiVariableBase, int, float], end: Union[_QiVariableBase, int, float], step: Union[int, float] = 1, ): super().__init__() if not isinstance(var, _QiVariableBase): raise RuntimeError( "Can only use QiVariables as control variable in ForRanges." ) start_expr = QiExpression._from(start) end_expr = QiExpression._from(end) step_expr = QiExpression._from(step) var._type_info.add_illegal_type(QiType.STATE, _IllegalTypeReason.FOR_RANGE) start_expr._type_info.add_illegal_type( QiType.STATE, _IllegalTypeReason.FOR_RANGE ) end_expr._type_info.add_illegal_type(QiType.STATE, _IllegalTypeReason.FOR_RANGE) step_expr._type_info.add_illegal_type( QiType.STATE, _IllegalTypeReason.FOR_RANGE ) _add_equal_constraints( QiType.TIME, _TypeConstraintReasonQiCommand(ForRange), var, start_expr, end_expr, step_expr, ) _add_equal_constraints( QiType.FREQUENCY, _TypeConstraintReasonQiCommand(ForRange), var, start_expr, end_expr, step_expr, ) _add_equal_constraints( QiType.NORMAL, _TypeConstraintReasonQiCommand(ForRange), var, start_expr, end_expr, step_expr, ) if not isinstance(start, _QiVariableBase) and not isinstance( end, _QiVariableBase ): if (start > end and step >= 0) or (start < end and step <= 0): raise ValueError("Definition of ForRange faulty") self.var = var self.start = start_expr self.end = end_expr self.step = step_expr self.add_associated_variable(var) if isinstance(start, _QiVariableBase): self.add_associated_variable(start) if start.id == var.id: raise RuntimeError("Loop variable can not be used as start value") if isinstance(end, _QiVariableBase): self.add_associated_variable(end) if end.id == var.id: raise RuntimeError("Loop variable can not be used as end value") def __exit__(self, exception_type, exception_value, traceback): super().__exit__(exception_type, exception_value, traceback) check_variable = QiVarInForRange(self.var) self.accept(check_variable) def accept(self, visitor, *input): return visitor.visit_for_range(self, *input) @property def is_step_positive(self) -> bool: return self.step > 0 def _stringify(self) -> str: return f"ForRange({self.var}, {self.start}, {self.end}, {self.step})" class QiVariable(_QiVariableBase): """Used as variables for use in program. If no type is provided as an argument, it will infer its type. """ def __init__( self, type: Union[QiType, Type[int], Type[float]] = QiType.UNKNOWN, value=None, name=None, ) -> None: if type == int: type = QiType.NORMAL elif type == float: type = QiType.TIME super().__init__(type, value, name=name) _add_cmd_to_job(cQiDeclare(self)) if self.value is not None: val = _QiConstValue(value) val._type_info.set_type(type, _TypeDefiningUse.VARIABLE_DEFINITION) _add_cmd_to_job(cQiAssign(self, val)) class QiJob: """ Container holding program, cells and qi_result containers for execution of program. Builds the job with its properties :param skip_nco_sync: if the NCO synchronization at the beginning should be skipped :param nco_sync_length: how long to wait after the nco synchronization """ def __init__( self, skip_nco_sync=False, nco_sync_length=0, ): self.qi_results: List[QiResult] = [] self.cells = [] self.skip_nco_sync = skip_nco_sync self.nco_sync_length = nco_sync_length self._description = _JobDescription() # Build self._performed_analyses = False self._build_done = False self._arranged_cells: List[Optional[QiCell]] = [] self._var_reg_map: Dict[_QiVariableBase, Dict[QiCell, int]] = {} # Run self._custom_processing = None self._custom_data_handler = None def __enter__(self): # pylint: disable=global-statement global _QiJobReference _QiJobReference = self return self def __exit__(self, exception_type, exception_value, traceback): for cmd in self.commands: cmd.accept(QiTypeFallbackVisitor()) for cmd in self.commands: cmd.accept(QiPostTypecheckVisitor()) _QiVariableBase.reset_str_id() # pylint: disable=global-statement global _QiJobReference _QiJobReference = None def _open_new_context(self): self._description.open_new_context() def _close_context(self): return self._description.close_context() def _add_command(self, command): self._description.add_command(command) @property def commands(self): """returns the commands of the job""" return self._description._commands def _register_cells(self, cells: List[QiCell]): if len(self.cells) > 0: raise RuntimeError("Can only register one set of cells at a QiJob.") self.cells = cells def _run_analyses(self): """ Executes needed (dataflow) analyses. These mutate the commands in QiJob by inserting additional instructions, therefore they should only run once, in order to avoid duplicate instructions. """ if not self._performed_analyses: insert_recording_offset_store_commands(self) insert_manipulation_pulse_frequency_store_commands(self) insert_readout_pulse_frequency_store_commands(self) self._performed_analyses = True def _simulate_recordings(self) -> Dict[Any, List[cQiRecording]]: """ Simulates the order cQiRecording executions. The result of this simulation is used to disentangle the recordings buffer and reassociate the individual recording results with their corresponding Recording commands. It might return more elements than are recorded during the real execution. """ # We first check if there are Recording commands at positions which we can not simulate. # i.e. If-Else, ForRanges with start or end that are neither constant nor other loop variables. # If this is the case we cannot simulate the order. visitor = QiResultCollector() for cmd in self.commands: cmd.accept(visitor) if len(visitor.found_qi_results) == 0: return {cell: [] for cell in self.cells} elif visitor.recording_in_if: raise RuntimeError("Recording command within If-Else statement.") # Next we simulate all loops and collect the respective Recording commands inside. simulator = Simulator(self.cells) simulator._simulate(self.commands) return simulator.cell_recordings def _build_program( self, sample: Optional[QiSample] = None, cell_map: Optional[List[int]] = None ): if sample is not None and cell_map is not None: sample = sample._arrange_for_controller() sample = [sample[m] if m < len(sample) else None for m in cell_map] if cell_map is None: cell_map = list(range(len(self.cells))) # TODO Check that this works with None and right order now self._resolve_properties(sample) for cell in self.cells: if len(cell._get_unresolved_properties()) > 0: raise RuntimeError( f"Unresolved properties {cell._get_unresolved_properties()} at cell {cell}" ) self._run_analyses() sim_result = self._simulate_recordings() for cell in self.cells: cell._result_recording_order = list( map( lambda x: x.result_box, filter(lambda x: x.result_box is not None, sim_result[cell]), ) ) prog_builder = QiProgramBuilder( self.cells, cell_map, self._description._commands.copy(), self.skip_nco_sync, self.nco_sync_length, ) self.cell_seq_dict = prog_builder.build_program() self._var_reg_map = prog_builder.get_all_variables() self._build_done = True def _get_sequencer_codes(self): return [ [ instr.get_riscv_instruction() for instr in self.cell_seq_dict[cell].instruction_list ] for cell in self.cells ] def create_experiment( self, controller, sample: Optional[QiSample] = None, averages: int = 1, cell_map: Optional[List[int]] = None, data_collection=None, use_taskrunner=False, ): exp = QiCodeExperiment( *self._prepare_experiment_params( controller, sample, averages, cell_map, data_collection, use_taskrunner ) ) if data_collection is None: if self._custom_processing is not None: exp._taskrunner.update(self._custom_processing) if self._custom_data_handler is not None: exp._data_handler_factory = DataHandler.get_custom_wrapper_factory( self._custom_data_handler ) # Provide a human-readable description of the execution if cell_map is None: cell_map = list(range(len(self.cells))) str_map = ", ".join([f"q[{i}] -> sample[{m}]" for i, m in enumerate(cell_map)]) exp._job_representation = f"{self}\n\nmapped as {str_map} to\n\n{sample}" return exp def _prepare_experiment_params( self, controller, sample: Optional[QiSample] = None, averages: int = 1, cell_map: Optional[List[int]] = None, data_collection=None, use_taskrunner=False, ): if len(self.cells) > len(controller.cell): raise IndexError( f"This job requires {len(self.cells)} cells but only " f"{len(controller.cell)} are available in the QiController." ) if data_collection is None: if self._custom_processing is None: data_collection = "average" else: data_collection = "custom" # If float, convert averages to int averages = int(averages) if sample is None: sample = QiSample(len(controller.cell)) elif len(sample) < len(self.cells): raise ValueError( "Need to submit a QiSample with at least as many cells as the job " f"has ({len(self.cells)}), but only {len(sample)} provided." ) if cell_map is None: # Use the first cells of the sample cell_map = list(range(len(self.cells))) else: if len(cell_map) != len(self.cells): raise ValueError( "cell_map needs to have as many entries as the job has cells, but " f"{len(cell_map)} entries given and {len(self.cells)} required!" ) if len(set(cell_map)) != len(cell_map): raise ValueError("Duplicate values not allowed in cell_map!") if any(m < 0 or m >= len(sample) for m in cell_map): raise IndexError( "cell_map values can only point to valid indices within the passed" f" QiSample object, i.e. values between 0 and {len(sample) - 1}." ) # Translate cell_map from sample cells ("cells") to QiController cells cell_map = [sample.cell_map[c] for c in cell_map] if any(c < 0 or c >= len(controller.cell) for c in cell_map): raise ValueError( "The QiSample cell_map can only reference available QiController " f"cells, i.e. between 0 and {len(controller.cell) - 1}." ) self._build_program(sample, cell_map) for_range_list = [] for cell in self.cells: for_range_list.append(self.cell_seq_dict[cell]._for_range_list) return ( controller, self.cells, self._get_sequencer_codes(), averages, for_range_list, cell_map, self._var_reg_map, data_collection, use_taskrunner, ) def run( self, controller, sample: Optional[QiSample] = None, averages: int = 1, cell_map: Optional[List[int]] = None, data_collection=None, use_taskrunner=False, ): """executes the job and returns the results :param controller: the QiController on which the job should be executed :param sample: the QiSample object used for execution of pulses and extracts parameters for the experiment :param averages: the number of executions that should be averaged, by default 1 :param cell_map: A list containing the indices of the cells :param data_collection: the data_collection mode for the result, by default "average" :param use_taskrunner: if the execution should be handled by the Taskrunner Some advanced schemes and data_collection modes are currently only supported by the Taskrunner and not yet by a native control flow. """ exp = self.create_experiment( controller, sample, averages, cell_map, data_collection, use_taskrunner ) exp.run() def run_with_data_callback(self, on_new_data: Callable[[dict], None]): pass def run_streamed(self): pass def set_custom_data_processing( self, file: str, params: Optional[List] = None, converter: Optional[Callable[[List], List]] = None,
mode: Union[TaskRunner.DataMode, str] = TaskRunner.DataMode.INT32,
0
2023-11-10 10:26:10+00:00
24k
fg320/DEASC
examples/12C_5x1_farm_dyn_tuning_wso_grouping_looping.py
[ { "identifier": "WfModel", "path": "deasc/wf_model.py", "snippet": "class WfModel:\n \"\"\"\n Class for wind farm modelling (Interface setup but not limited to FLORIS\n framework).\n \"\"\"\n\n def __init__(self, input_file, path):\n \"\"\"\n Initialise wind farm object by pointing towards an input file.\n (FLORIS interface object).\n\n Args\n ----\n input file:(FLORIS .json input file).\n \"\"\"\n # Read and initialize input file\n self.input_file = input_file\n self.interface = floris_input_handler(self.input_file, path)\n\n # Assign wind farm model proporties\n self.D, self.H_hub, self.n_turbs = floris_properties(self)\n\n def set_aligned_layout(self, n_row, n_col, spac_x, spac_y, coordinates=False):\n \"\"\"\n Modify farm layout in aligned wind turbines with constant spacing,\n differing only from rows to columns. Flow field is also reinitialized.\n\n Args\n ----\n n_row: (float) number of turbine rows\n n_col: (float) number of turbine columns\n spac_x: (float) WT diam normalized turbines distance in x direction\n spac_y: (float) WT diam normalized turbines distance in y direction\n coordinates: (bool, opt) False if no coordinates wanted.\n Default set to False.\n\n Returns\n -------\n if coordinates is False:\n None\n if coordinates is True:\n x-coordinates: (numpy array) turbines x-coordinates\n y-coordinates: (numpy array) turbines y-coordinates\n \"\"\"\n # Input type check\n if not all(isinstance(i, int) for i in [n_row, n_col]) or \\\n not all(isinstance(j, (int, float)) for j in [spac_x, spac_y]):\n err_msg = \"Incorrect input value types\"\n raise ValueError(err_msg)\n\n # Calculate new coordinate farm layout\n layout_x = []\n layout_y = []\n for i in range(int(n_row)):\n for j in range(int(n_col)):\n layout_x.append(i * spac_x * self.D)\n layout_y.append(j * spac_y * self.D)\n\n # Reinitialize wind farm object\n floris_reinitialise_layout(self, layout_x, layout_y)\n\n if coordinates:\n return (np.array(layout_x), np.array(layout_y))\n else:\n return None\n\n def set_HR_layout(self, coordinates=False):\n \"\"\"\n Set Horns Rev wind farm layout to wind farm object and\n returns turbines' x and y coordinates if coordinates=True.\n\n Args\n ----\n coordinates: (bool, opt) False if no coordinates wanted.\n Default set to False.\n\n Returns\n -------\n if coordinates is False:\n None\n if coordinates is True:\n x-coordinates: (numpy array) turbines x-coordinates\n y-coordinates: (numpy array) turbines y-coordinates\n \"\"\"\n # Vestas V80 2 MW diameter check\n if self.D != 80:\n warning = \"Rotor diameter not from the Vestas V80 2 MW turbine\"\n warnings.warn(warning, UserWarning)\n\n n_rows = 10\n n_cols = 8\n spac_x = 7\n spac_y = 7\n angle = 6\n layout_x = []\n layout_y = []\n for i in range(int(n_rows)):\n for j in range(int(n_cols)):\n layout_x.append((i * spac_x * self.D) -\n (np.sin(np.radians(angle)) * j * spac_y * self.D))\n layout_y.append(j * spac_y * self.D * np.cos(np.radians(angle)))\n\n # Reinitialize wind farm object\n floris_reinitialise_layout(self, layout_x, layout_y)\n\n if coordinates:\n return (np.array(layout_x), np.array(layout_y))\n else:\n return None\n\n def farm_eval(self, yaw=None, ws=None, wd=None, ti=None, shear=None):\n \"\"\"\n Calculate farm flow field for given wind farm layout and input conditions.\n Return main outputs, such as yaw angles, turbines power, farm power, etc.\n\n Args\n ----\n yaw: (list, optional) turbines yaw angles (deg). Default to None.\n ws: (float, optional) input wind speeds (m/s). Default to None.\n wd: (float, optional) input wind directions (deg). Default to None.\n ti: (float, optional) input turbulence intensity. Default to None.\n shear: (float, optional) shear exponent. Default to None.\n\n Returns\n -------\n wf_pow: (float) WF power (MWatts).\n wt_pow: (np.array) WTs power (MWatts).\n wt_ti: (list) WTs turbulence intensity.\n wt_yaw: (np.array) WTs yaw angles (deg).\n \"\"\"\n # Main wind farm calculation\n wf_pow, wt_pow, wt_ti, wt_yaw, _ = floris_farm_eval(self,\n yaw,\n ws,\n wd,\n ti,\n shear)\n\n return (wf_pow, wt_pow, wt_ti, wt_yaw)\n\n def pow_yaw_sweep_1var(self, layout, var_info):\n \"\"\"\n Return wind farm power for a single yaw variable, either a\n single turbine or a single row of turbines. Sweep by row not possible\n for not aligned \"custom\" layouts.\n\n Args\n ----\n layout: (tuple)\n row: (integer) number of farm rows\n cols: (integer) number of farm columns\n or string \"custom\"\n var_info: (tuple)\n var_type: (string) \"T\" for turbine,\n \"R\" for row (not for custom layouts)\n var: (integer) turbine or row number\n var_value: (list of floats) variable values\n\n Returns\n -------\n obj_out: tuple\n obj: (list) objective values\n obj_func: (string) objective function\n var_info: (tuple) see input\n model: (string) model name\n \"\"\"\n # Extract inputs and check inputs\n var_type, var, var_value = var_info\n if layout != \"custom\":\n rows, cols = layout\n if var_type == 'R' and layout == \"custom\":\n err_msg = \"Row not allowed for custom layouts\"\n raise ValueError(err_msg)\n if var_type == 'R' and var > rows:\n err_msg = \"Row specified not in farm\"\n raise ValueError(err_msg)\n if var_type == 'T' and var > self.n_turbs:\n err_msg = \"Turbine specified not in farm\"\n raise ValueError(err_msg)\n\n # Calculations\n yaw_angles = np.array(floris_current_yaw(self))\n wf_pow = []\n\n for yaw_change in var_value:\n if layout != \"custom\":\n rows, cols = layout\n if var_type == 'T':\n yaw_angles[(var-1)] = yaw_change\n elif var_type == 'R':\n idx_1 = var*cols\n idx_0 = idx_1-cols\n yaw_angles[idx_0:idx_1] = yaw_change\n else:\n err_msg = \"var_type either 'T' or 'R'\"\n raise ValueError(err_msg)\n\n wf_pow_single, _, _, _ = self.farm_eval(yaw=yaw_angles)\n wf_pow.append(wf_pow_single)\n\n obj_out = (wf_pow, 'Farm Power')\n var_info = (var_type, var, var_value)\n print(\"Function exploration complete\")\n\n return obj_out, var_info" }, { "identifier": "WSOpt", "path": "deasc/wake_steering.py", "snippet": "class WSOpt:\n \"\"\"\n Class to perform wake steering optimization with a WfModel object, given an a-priori\n specified wind farm layout and specified atmopheric conditions. Optimization can have\n all/some turbines as variables, or rows for wind farms with equal columns. Optimizers\n available are the local SLSQP, where linear constraints can be added, and the global\n optimizer TuRBO.\n \"\"\"\n\n def __init__(self,\n wf_model,\n inflow,\n variables,\n var_bounds,\n var_initial,\n opt_method=\"SLSQP\",\n opt_options=None,\n obj_function=\"Farm Power\",\n constraints=(None, None, None),\n by_row=(False, None, None),\n tuning_dynamic=False\n ):\n \"\"\"\n Args\n ----\n wf_model: (WfModel)\n WfModel to perform wake steering optimization.\n inflow: (list) Inflow conditions for wake steering optimization.\n yaw_initial: (list) wind farm yaw angles (deg).\n (string) 'random' for random intial wind farm yaw angles.\n wd: (float) input wind directions (deg).\n ws: (float) input wind speeds (m/s).\n ti: (float) input turbulence intensity.\n shear: (float) shear exponent.\n variables: (list)\n List of turbines (or rows) to optimize. Naming convention starts from 1.\n var_bounds: (tuple)\n low_bound: (float) variable (yaw angle) lower bound.\n upp_bound: (float) variable (yaw angle) upper bound.\n var_initial:\n SLSQP: (list) list of initial variable values for each variable.\n (string) 'random' for random initial variable values.\n TURBO_1: (list of lists) list of n_init variable values lists\n (see TURBO_1 options).\n (string) 'LHS' latin hypercube sampling.\n TURBO_M: (string) 'LHS' latin hypercube sampling.\n opt_method: (string, optional) optimization method.\n 'SLSQP', 'TURBO_1 and 'TURBO_M' available.\n Default set to 'SLSQP'.\n opt_options: (dict , optional) optimization method options dictionary.\n Default set to None.\n opt_function: (string , optional) objective function. 'Farm Power' available\n Default set to 'Farm Power'.\n constraints: (tuple) Linear constraints definition. Limited to SLSQP.\n A: (matrix) linear constraint matrix.\n Default set to None.\n low_bound_constr: (float) lower non-normalized contraint bound.\n Default set to None.\n upp_bnd_constr: (float) upper non-normalized contraint bound.\n Default set to None.\n by_row : (tuple, optional) Optimization by row, requires all farm columns to have\n the same amount of rows.\n by_row_bool: (bool) True if optimization variables are wind farm rows,\n False if wind farm turbines. Default set to False.\n rows:: (int) wind farm rows. Default set to None.\n cols:: (int) wind farm columns. Default set to None.\n tuning_dynamic : (bool, optional)\n If True, include dynamic parameter tuning. See tuning_dynamic_initialize\n method. Default to False.\n \"\"\"\n # Opt Methods - Opt Options - Optimizers - Opt Functions\n self.opt_method_list = [\"SLSQP\", \"TURBO_1\", \"TURBO_M\"]\n self.opt_options_dict = {\"SLSQP\": {'maxiter': 100,\n 'disp': True,\n 'iprint': 2,\n 'ftol': 1e-6,\n 'eps': 0.01},\n \"TURBO_1\": {\"n_init\": len(variables)*2,\n \"max_evals\": 500,\n \"batch_size\": 1, # 1 = Serial\n \"verbose\": True,\n \"use_ard\": True,\n \"max_cholesky_size\": 2000,\n \"n_training_steps\": 50,\n \"min_cuda\": 1024,\n \"device\": \"cpu\",\n \"dtype\": \"float64\"},\n \"TURBO_M\": {\"n_init\": len(variables)*2,\n \"max_evals\": 500,\n \"n_trust_regions\": 2,\n \"batch_size\": 1, # 1 = Serial\n \"verbose\": True,\n \"use_ard\": True,\n \"max_cholesky_size\": 2000,\n \"n_training_steps\": 50,\n \"min_cuda\": 1024,\n \"device\": \"cpu\",\n \"dtype\": \"float64\"}}\n self.optimizer_dict = {'SLSQP': self._optimizer_scipy,\n 'TURBO_1': self._optimizer_turbo_1,\n 'TURBO_M': self._optimizer_turbo_m}\n self.obj_function_dict = {'Farm Power': self._obj_function_power}\n\n # Optimization methods and optimizer\n self.opt_method = opt_method\n self._opt_method_settler()\n self.optimizer = self.optimizer_dict[self.opt_method]\n\n # Optimizer options\n self.opt_options = opt_options\n self._opt_options_settler()\n\n # Optimization function\n self.obj_function_name = obj_function\n self._obj_function_settler()\n\n # Wind farm conditions\n self.wf_model = wf_model\n self.wf_model_dict_original = floris_extract_object_dict(self.wf_model)\n self.yaw_initial, self.wd, self.ws, self.ti, self.shear = inflow\n if not isinstance(self.yaw_initial, (list, np.ndarray)):\n if self.yaw_initial == 'random':\n self.yaw_initial = self._random_yaw_generator(self.wf_model.n_turbs,\n var_bounds)\n self._yaw_initial_input_handler()\n self.yaw_initial = np.array([float(item) for item in self.yaw_initial])\n\n # Optimization per wind turbine or per wind farm row\n self.by_row_bool = by_row[0]\n if self.by_row_bool:\n self.rows = by_row[1]\n self.cols = by_row[2]\n self._by_row_input_handler()\n\n # Variable bounds\n self.var_bounds = var_bounds\n self.low_bound, self.upp_bound = self.var_bounds\n self.low_bound_norm = norm(self.low_bound, self.low_bound, self.upp_bound)\n self.upp_bound_norm = norm(self.upp_bound, self.low_bound, self.upp_bound)\n self.var_bounds_norm = (self.low_bound_norm, self.upp_bound_norm)\n tmp = [self.var_bounds_norm for i in range(len(variables))]\n self.var_bounds_norm_list = tmp\n tmp = np.array([self.low_bound_norm for i in range(len(variables))])\n self.low_bound_norm_list = tmp\n tmp = np.array([self.upp_bound_norm for i in range(len(variables))])\n self.upp_bound_norm_list = tmp\n\n # Constraints\n self.A = constraints[0]\n self.low_bound_constr = constraints[1]\n self.upp_bound_constr = constraints[2]\n if self.A is not None:\n self._constraints_input_handler()\n self.low_bound_constr_norm = norm(self.low_bound_constr,\n self.low_bound,\n self.upp_bound)\n self.upp_bound_constr_norm = norm(self.upp_bound_constr,\n self.low_bound,\n self.upp_bound)\n\n # Yaw variables\n self.variables = variables\n self.var_initial = var_initial\n self._variables_input_handler()\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.opt_method == 'SLSQP' and self.var_initial == 'random':\n self.var_initial = self._random_yaw_generator(len(self.variables),\n self.var_bounds)\n self._var_initial_input_handler()\n self.var_initial_norm = self._var_initial_norm()\n\n # Dynamic tuning\n self.tuning_dyn_bool = tuning_dynamic\n self._tuning_dyn_bool_check()\n self.tuning_dyn_initialization = False\n\n self.opt_run = False\n\n def tuning_dyn_initialize(self, tuning_dyn_obj_list):\n \"\"\"\n Assign list of tuning dynamic objects TuningDyn to the WSOpt object.\n\n Args\n ----\n tuning_dyn_object: (list of TuningDyn objects)\n \"\"\"\n self.tuning_dyn_obj_list = tuning_dyn_obj_list\n self._tuning_dyn_init_input_handler()\n for tuning_dyn_obj in self.tuning_dyn_obj_list:\n tuning_dyn_obj.wso_compatibility_check(self)\n self.tuning_dyn_initialization = True\n\n def optimize_yaw(self):\n \"\"\"\n Optimize the yaw angle for the given WSOpt object.\n\n Returns\n -------\n opt_yaw_angles_vars: (ndarray) optimal yaw angles for the optimization variables.\n opt_yaw_angles_all: (ndarray) optimal yaw angles for all.wind farm turbines.\n \"\"\"\n # Tuning dynamic initialization check\n self._tuning_dyn_initialization_check()\n\n # Print optimization info\n self._print_info()\n\n # Wind farm power - no yaw\n self.wf_pow_noyaw = self._get_farm_power_noyaw()\n\n # Optimize\n self._iter_details_setup()\n self.opt_yaw_angles_vars, self.opt_yaw_angles_all = self.optimizer()\n self.opt_run = True\n\n return (self.opt_yaw_angles_vars, self.opt_yaw_angles_all)\n\n def get_optimization_details(self):\n \"\"\"\n Return optimization details: optimizer iterations details and objective function\n evaluations details. The two are identical for TURBO optimizers as an objective\n function evaluation corresponds to an optimizer iteration, different for SLSQP as\n additional objective function evaluations are required to approximate gradients.\n\n Returns\n -------\n iter_details: (tuple) optimizer iterations details.\n iter_yaw_angles: (list) list of yaw angles per optimizer iteration.\n iter_obj_func: (list) list of objective function per optimizer iteration.\n iter_farm_power: (list) list of farm power values per optimizer iteration.\n eval_details: (tuple) objective fucntion evaluations details.\n eval_yaw_angles: (list) list of yaw angles per evaluation.\n eval_obj_func: (list) list of objective function per evaluation.\n eval_farm_power: (list) list of farm power values per evaluation.\n \"\"\"\n iter_details = (self.iter_yaw_angles,\n self.iter_obj_func,\n self.iter_farm_power)\n eval_details = (self.eval_yaw_angles,\n self.eval_obj_func,\n self.eval_farm_power)\n return (iter_details, eval_details)\n\n # %% Private methods\n\n def _opt_method_settler(self):\n if self.opt_method not in self.opt_method_list:\n err_msg = \"Optimization method not recognized\"\n raise Exception(err_msg)\n\n def _opt_options_settler(self):\n if self.opt_options is None:\n self.opt_options = self.opt_options_dict[self.opt_method]\n\n def _obj_function_settler(self):\n if self.obj_function_name in list(self.obj_function_dict.keys()):\n self.obj_function = self.obj_function_dict[self.obj_function_name]\n else:\n err_msg = \"Optimization function not recognized\"\n raise Exception(err_msg)\n\n def _random_yaw_generator(self, yaw_number, yaw_bounds):\n yaw_angles = []\n for i in range(yaw_number):\n x = random.choice(range(yaw_bounds[0], yaw_bounds[1]+1))\n yaw_angles.append(x)\n return yaw_angles\n\n def _yaw_initial_input_handler(self):\n if len(self.yaw_initial) != self.wf_model.n_turbs:\n err_msg = \"Initial yaw angles do not match turbine number\"\n raise Exception(err_msg)\n\n def _by_row_input_handler(self):\n if self.rows*self.cols != self.wf_model.n_turbs:\n err_msg = \"Farm rows and columns provided do not match turbine number\"\n raise Exception(err_msg)\n\n def _constraints_input_handler(self):\n if self.opt_method != 'SLSQP':\n err_msg = \"Linear constraints (on top of bounds) limited to SLSQP optimizer\"\n raise Exception(err_msg)\n\n def _variables_input_handler(self):\n if self.by_row_bool:\n for row in self.variables:\n if row > self.rows:\n err_msg = \"Row/s specified not in farm\"\n raise Exception(err_msg)\n if len(self.variables) > self.rows:\n err_msg = \"Too many rows specified\"\n raise Exception(err_msg)\n else:\n for turb in self.variables:\n if turb > self.wf_model.n_turbs:\n err_msg = \"Turbine/s specified not in the farm\"\n raise Exception(err_msg)\n if len(self.variables) > self.wf_model.n_turbs:\n err_msg = \"Too many turbines specified\"\n raise Exception(err_msg)\n if 0 in self.variables:\n err_msg = \"Turbine/row counting convention starts from 1\"\n raise Exception(err_msg)\n\n def _var_initial_input_handler(self):\n if self.opt_method == 'TURBO_1':\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.var_initial == 'LHS':\n pass\n elif self.var_initial == 'random':\n err_msg = \"Random initial variables limited to SLSQP optimizer\"\n raise Exception(err_msg)\n else:\n if len(self.var_initial) != self.opt_options[\"n_init\"]:\n err_msg = \"n_init initial variable lists are needed (see TURBO options)\"\n raise Exception(err_msg)\n elif len(self.var_initial[0]) != len(self.variables):\n err_msg = \"var_initial sublists length not equal number of variables\"\n raise Exception(err_msg)\n elif self.opt_method == 'TURBO_M':\n if self.var_initial != 'LHS':\n err_msg = \"TURBO_M optimizer requires LHS as initial sampling\"\n elif self.opt_method == 'SLSQP':\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.var_initial == 'LHS':\n err_msg = \"Latin Hypercube Sampling limited to TURBO optimizers\"\n raise Exception(err_msg)\n elif len(self.variables) != len(self.var_initial):\n err_msg = \"var_initial length needs to equal number of variables\"\n raise Exception(err_msg)\n\n def _var_initial_norm(self):\n if self.opt_method == \"SLSQP\":\n self.var_initial = np.array([float(item) for item in self.var_initial])\n var_initial_norm = norm(self.var_initial, self.low_bound, self.upp_bound)\n elif self.var_initial == 'LHS':\n var_initial_norm = None\n else:\n self.var_initial = np.array([np.array(x) for x in self.var_initial])\n var_initial_norm = []\n for x_list in self.var_initial:\n x_list_norm = []\n for x in x_list:\n x_norm = norm(x, self.low_bound, self.upp_bound)\n x_list_norm.append(x_norm)\n var_initial_norm.append(np.array(x_list_norm))\n return np.array(var_initial_norm)\n\n def _get_farm_power_noyaw(self):\n if (self.tuning_dyn_initialization and\n hasattr(self.tuning_dyn_obj_list[0], 'wf_pow_noyaw')):\n wf_pow_noyaw = self.tuning_dyn_obj_list[0].wf_pow_noyaw\n else:\n self.yaw_zero = np.full(shape=self.wf_model.n_turbs, fill_value=0.0)\n self.wf_model = floris_reinitialise_atmosphere(self.wf_model,\n self.ws,\n self.wd,\n self.ti,\n self.shear)\n # Tune parameters\n if self.tuning_dyn_initialization:\n for tuning_dyn_obj in self.tuning_dyn_obj_list:\n self.wf_model = tuning_dyn_obj.tune_parameter(self, self.yaw_zero)\n\n wf_pow_noyaw = floris_calculate_farm_power(self.wf_model, self.yaw_zero)\n return wf_pow_noyaw\n\n def _print_info(self):\n print(\"=====================================================\")\n print(\"Optimizing wake redirection control...\")\n print(\"Optimization method: %s\" % (self.opt_method))\n print(\"Optimization function: %s \\n\" % (self.obj_function_name))\n if self.by_row_bool:\n print(\"Rows being optimized: \")\n print(self.variables)\n else:\n print(\"Turbines being optimized: \")\n print(self.variables)\n print(\"Number of variables to optimize = \", len(self.variables))\n print(\"=====================================================\")\n\n def _iter_details_setup(self):\n # Details for each obj function evaluation\n self.eval_yaw_angles = [] # deg\n self.eval_obj_func = []\n self.eval_farm_power = [] # MW\n\n # Details for each optimizer iteration\n self.iter_yaw_angles = [] # deg\n self.iter_obj_func = []\n self.iter_farm_power = [] # MW\n\n def _variables_to_farm_yaw(self, yaw_initial, var_values):\n yaw_angles = copy.deepcopy(yaw_initial)\n if self.by_row_bool:\n for i, row_idx in enumerate(self.variables):\n idx_1 = row_idx*self.cols\n idx_0 = idx_1-self.cols\n yaw_angles[idx_0:idx_1] = var_values[i]\n else:\n for i, turb_idx in enumerate(self.variables):\n yaw_angles[turb_idx-1] = var_values[i]\n return yaw_angles.tolist()\n\n # %% Optimizers\n\n def _optimizer_scipy(self):\n # Call back function for iter details\n def callback_func(xk):\n self.iter_yaw_angles.append(self.eval_yaw_angles[-1])\n self.iter_obj_func.append(self.eval_obj_func[-1])\n self.iter_farm_power.append(self.eval_farm_power[-1])\n # Linearly constrained case\n if self.A is not None:\n self.C = LinearConstraint(self.A,\n self.low_bound_constr_norm,\n self.upp_bound_constr_norm)\n self.residual_plant = minimize(self.obj_function,\n self.var_initial_norm,\n callback=callback_func,\n method=self.opt_method,\n bounds=self.var_bounds_norm_list,\n constraints=(self.C,),\n options=self.opt_options)\n # Unconstrained case\n else:\n self.residual_plant = minimize(self.obj_function,\n self.var_initial_norm,\n callback=callback_func,\n method=self.opt_method,\n bounds=self.var_bounds_norm_list,\n options=self.opt_options)\n # Extract optimal yaw angles for variables\n opt_yaw_angles_vars = unnorm(self.residual_plant.x,\n self.low_bound,\n self.upp_bound)\n # Extract optimal yaw angles for the entire farm\n opt_yaw_angles_all = self._variables_to_farm_yaw(self.yaw_initial,\n opt_yaw_angles_vars)\n\n # Equal yaw groups if dynamic tuning with grouping is in place\n if self.tuning_dyn_initialization:\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n opt_yaw_angles_all = self.tuning_dyn_obj_list[0].set_yaw_groups(\n opt_yaw_angles_all)\n\n # Use best index because if total iterations reached, optimum not last evaluation\n eval_yaw_angles_lists = [x.tolist() for x in self.eval_yaw_angles]\n index_best = eval_yaw_angles_lists.index(opt_yaw_angles_all)\n opt_yaw_angles_all = np.array(opt_yaw_angles_all)\n self.obj_func_opt = self.eval_obj_func[index_best]\n self.farm_power_opt = self.eval_farm_power[index_best]\n\n # Add initial and last points to iteration details\n self.iter_yaw_angles.insert(0, self.eval_yaw_angles[0])\n self.iter_obj_func.insert(0, self.eval_obj_func[0])\n self.iter_farm_power.insert(0, self.eval_farm_power[0])\n self.iter_yaw_angles.append(self.eval_yaw_angles[-1])\n self.iter_obj_func.append(self.eval_obj_func[-1])\n self.iter_farm_power.append(self.eval_farm_power[-1])\n\n return (opt_yaw_angles_vars, opt_yaw_angles_all)\n\n def _optimizer_turbo_1(self):\n\n # TURBO initial sampling\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.var_initial == 'LHS':\n X_init_provided = False\n X_init_same_norm = None\n else:\n X_init_provided = True\n X_init_same_norm = self.var_initial_norm\n\n # TURBO optimization\n turbo_1 = Turbo1(f=self.obj_function,\n lb=self.low_bound_norm_list,\n ub=self.upp_bound_norm_list,\n **self.opt_options,\n X_init_provided=X_init_provided,\n X_init_same=X_init_same_norm,\n )\n turbo_1.optimize()\n X = turbo_1.X # Evaluated points\n fX = turbo_1.fX # Observed values\n index_best = np.argmin(fX)\n f_best, x_best = fX[index_best], X[index_best, :]\n\n # Extract optimal yaw angles for variables and the entire farm\n opt_yaw_angles_vars = unnorm(x_best,\n self.low_bound,\n self.upp_bound)\n opt_yaw_angles_all = self._variables_to_farm_yaw(self.yaw_initial,\n opt_yaw_angles_vars)\n\n # Equal yaw groups if dynamic tuning with grouping is in place\n if self.tuning_dyn_initialization:\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n opt_yaw_angles_all = self.tuning_dyn_obj_list[0].set_yaw_groups(\n opt_yaw_angles_all)\n\n # Update iteration details (same as evaluation details)\n self.iter_yaw_angles = self.eval_yaw_angles\n self.iter_obj_func = self.eval_obj_func\n self.iter_farm_power = self.eval_farm_power\n\n # Use best index because last iteration might not be the optimal one\n self.obj_func_opt = f_best[0]\n self.farm_power_opt = self.iter_farm_power[index_best]\n\n return (opt_yaw_angles_vars, opt_yaw_angles_all)\n\n def _optimizer_turbo_m(self):\n\n # TURBO optimization\n turbo_m = TurboM(f=self.obj_function,\n lb=self.low_bound_norm_list,\n ub=self.upp_bound_norm_list,\n **self.opt_options,\n )\n turbo_m.optimize()\n X = turbo_m.X # Evaluated points\n fX = turbo_m.fX # Observed values\n index_best = np.argmin(fX)\n f_best, x_best = fX[index_best], X[index_best, :]\n\n # Extract optimal yaw angles for variables and the entire farm\n opt_yaw_angles_vars = unnorm(x_best,\n self.low_bound,\n self.upp_bound)\n opt_yaw_angles_all = self._variables_to_farm_yaw(self.yaw_initial,\n opt_yaw_angles_vars)\n\n # Equal yaw groups if dynamic tuning with grouping is in place\n if self.tuning_dyn_initialization:\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n opt_yaw_angles_all = self.tuning_dyn_obj_list[0].set_yaw_groups(\n opt_yaw_angles_all)\n\n # Update iteration details (same as evaluation details)\n self.iter_yaw_angles = self.eval_yaw_angles\n self.iter_obj_func = self.eval_obj_func\n self.iter_farm_power = self.eval_farm_power\n\n # Use best index because last iteration might not be the optimal one\n self.cost_func_opt = f_best[0]\n self.farm_power_opt = self.iter_farm_power[index_best]\n\n return (opt_yaw_angles_vars, opt_yaw_angles_all)\n\n # %% Objective functions\n\n def _obj_function_power(self, var_norm):\n\n # Extract farm yaw angles\n var_unnorm = unnorm(var_norm, self.low_bound, self.upp_bound)\n yaw_angles = self._variables_to_farm_yaw(self.yaw_initial, var_unnorm)\n yaw_angles = np.array([float(item) for item in yaw_angles])\n\n # Tune parameters dynamically\n if self.tuning_dyn_initialization:\n # Set equal yaw angles in groups\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n yaw_angles = self.tuning_dyn_obj_list[0].set_yaw_groups(yaw_angles)\n # Tune parameters\n for tuning_dyn_obj in self.tuning_dyn_obj_list:\n self.wf_model = tuning_dyn_obj.tune_parameter(self, yaw_angles)\n\n # Calculate negative of the farm power normalized by power for zero yaw\n self.wf_model = floris_reinitialise_atmosphere(self.wf_model,\n self.ws,\n self.wd,\n self.ti,\n self.shear)\n wf_pow = floris_calculate_farm_power(self.wf_model, yaw_angles)\n obj_function = (-1 * wf_pow / self.wf_pow_noyaw)\n\n # Update evalauation details\n self.eval_yaw_angles.append(yaw_angles)\n self.eval_obj_func.append(obj_function)\n self.eval_farm_power.append(wf_pow)\n\n return obj_function\n\n # %% Tuning Dynamic methods\n\n def _tuning_dyn_bool_check(self):\n if self.tuning_dyn_bool and self.by_row_bool:\n err_msg = \"Dynamic tuning not available for optimization by row.\"\n raise Exception(err_msg)\n\n def _tuning_dyn_init_input_handler(self):\n if isinstance(self.tuning_dyn_obj_list, (list, np.ndarray)) is False:\n err_msg = \"TuningDyn objects need to be in a list, even if only one.\"\n raise Exception(err_msg)\n # Check dynamic grouping tuning objects have the same tuning groups\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n tuning_groups_first = self.tuning_dyn_obj_list[0].tuning_groups\n same_groups = all(obj.tuning_groups == tuning_groups_first\n for obj in self.tuning_dyn_obj_list)\n if same_groups is False:\n err_msg = \"TuningDyn objects have different groupings.\"\n raise Exception(err_msg)\n\n def _tuning_dyn_initialization_check(self):\n if self.tuning_dyn_bool and self.tuning_dyn_initialization is False:\n err_msg = \"Tuning dynamic not initialized. See tuning_dyn_initialize method.\"\n raise Exception(err_msg)" }, { "identifier": "Tuning", "path": "deasc/tuning.py", "snippet": "class Tuning:\n \"\"\"\n Parameter tuning class for a low-fidelity model, where one or more\n parameters are tuned to higher fidelity power measurements. In particular,\n the RMSE is minimised for single turbine power measurements for a single or\n the sum of multiple atmospheric conditions. The wind farm layout is assumed fixed.\n \"\"\"\n\n def __init__(self,\n wf_model,\n variables_class_list,\n variables_names_list,\n variables_bounds_list,\n obj_func_name='RMSE',\n opt_method='SLSQP',\n opt_options=None\n ):\n \"\"\"\n Args\n ----\n wf_model : WfModel object (low-fidelity model)\n single WfModel object to tune\n variables_class_list: list of strings\n list of classes of parameters to tune, one per parameter\n variables_names_list : list of strings\n list of parameter names to tune\n variables_bounds_list : list of tuples\n list of parameter bounds, upper and lower limits for each parameter\n obj_func_name: string\n objective function. Default set to \"RMSE\"\n opt_method: string\n optimization method. Dafault set to \"SLSQP\" (\"TURBO_1\" also available)\n opt_options: dict\n optimizer options. Default set to None\n \"\"\"\n self.obj_func_dict = {'RMSE': self._tuning_rmse_function}\n self.opt_method_list = [\"SLSQP\", \"TURBO_1\"]\n self.opt_options_dict = {\"SLSQP\": {'maxiter': 100,\n 'disp': True,\n 'iprint': 2,\n 'ftol': 1e-12,\n 'eps': 0.1},\n \"TURBO_1\": {\"n_init\": 2*len(variables_names_list),\n \"max_evals\": 100,\n \"batch_size\": 1, # 1 = Serial\n \"verbose\": True,\n \"use_ard\": True,\n \"max_cholesky_size\": 2000,\n \"n_training_steps\": 50,\n \"min_cuda\": 1024,\n \"device\": \"cpu\",\n \"dtype\": \"float64\"}}\n self.tuning_optimizer_dict = {'SLSQP': self._tuning_optimizer_scipy,\n 'TURBO_1': self._tuning_optimizer_turbo_1}\n\n self.wf_model = wf_model\n self.variables_class_list = variables_class_list\n self.variables_names_list = variables_names_list\n self.variables_bounds_list = variables_bounds_list\n\n self.obj_func_name = obj_func_name\n self.obj_func = self.obj_func_dict[self.obj_func_name]\n self.opt_method = opt_method\n if opt_options == None:\n self.opt_options = self.opt_options_dict[self.opt_method]\n else:\n self.opt_options = opt_options\n self._tuning_optimizer = self.tuning_optimizer_dict[self.opt_method]\n\n self.tuning_data_received = False\n self.tuning_conditions_received = False\n\n print(\"\\nInitialised parameter tuning\")\n print(\"%i parameters to tune\" % (len(self.variables_names_list)))\n print(\"%s optimization method\" % (self.opt_method))\n\n def tuning_data(self, data_power_list):\n \"\"\"\n Provide training higher-fidelity data for parameter tuning.\n Limited to power of each turbine for each condition ('RMSE')\n\n Args\n ----\n data_power_list : list of lists\n For each condition:\n list of turbines power output ('RMSE')\n \"\"\"\n self.tuning_data_power_list = data_power_list\n self.tuning_data_received = True\n pass\n\n def tuning_conditions(self,\n yaw_angles_list,\n wind_directions_list,\n wind_speeds_list,\n turbulence_intensities_list,\n wind_shear_list):\n \"\"\"\n Define the wind farm conditions (yaw and atmospheric)\n of the higher-fidelity data.\n\n Args\n ----\n yaw_angles_list : list of lists\n For each condition, list of turbines yaw_angles\n wind_directions_list: list\n For each condtion, wind direction\n wind_speeds_list: list\n For each condtion, wind speed\n turbulence_intensities_list: list\n For each condtion, wind direction\n wind_shear_list: list\n For each condtion, wind shear\n \"\"\"\n self.yaw_angles_list = yaw_angles_list\n self.wind_directions_list = wind_directions_list\n self.wind_speeds_list = wind_speeds_list\n self.turbulence_intensities_list = turbulence_intensities_list\n self.wind_shear_list = wind_shear_list\n self.tuning_conditions_received = True\n pass\n\n def tune_parameters(self):\n \"\"\"\n Tune specified parameters of a WfModel object.\n Requires higher-fidelity tuning data and the related conditions to be\n previously specified (refer to Tuning methods: tuning_data and tuning_conditions).\n\n Returns\n -------\n wf_model_tuned: WfModel object\n WfModel object with parameters tuned\n wf_model_dict_opt: dictionary\n tuned WfModel object dictionary\n \"\"\"\n # Double check tuning data and conditions have been specified\n if self.tuning_data_received is False:\n err_msg = \"Tuning data not specified. Use tuning_data method.\"\n raise Exception(err_msg)\n if self.tuning_conditions_received is False:\n err_msg = \"Tuning conditions not specified. Use tuning_conditions method.\"\n raise Exception(err_msg)\n\n # Extract original wf_model object dictionary and print its parameters\n self.wf_model_dict_original = floris_extract_object_dict(self.wf_model)\n self.models_dict = floris_extract_models_dict(self.wf_model_dict_original)\n floris_print_params(self.wf_model_dict_original,\n self.models_dict,\n \"Original model parameters\")\n\n # Extract initial variable values and normalise them\n self.variables_init = self._wf_model_dict_to_variables(self.wf_model_dict_original,\n self.variables_class_list,\n self.variables_names_list)\n self.variables_init_norm = self._norm_variables(self.variables_init,\n self.variables_bounds_list)\n\n # Normalize variable bounds\n tmp = self.variables_bounds_list\n (self.variables_bounds_list_norm,\n self.variables_low_bound_list_norm,\n self.variables_upp_bound_list_norm) = self._norm_variables_bounds_lists(tmp)\n\n # Minimisation of error | Extract optimal variables\n self._tuning_optimizer()\n self.opt_variables = self._unnorm_variables(self.opt_variables_norm,\n self.variables_bounds_list)\n\n # Apply tuned parameters (opt_variables) to wf_model and print them\n self.wf_model_dict_opt = self._vars_to_wf_model_dict(self.wf_model_dict_original,\n self.variables_class_list,\n self.variables_names_list,\n self.opt_variables)\n self.wf_model = floris_param_change_object(self.wf_model, self.wf_model_dict_opt)\n floris_print_params(self.wf_model_dict_opt,\n self.models_dict,\n \"Optimal model parameters\")\n\n return self.wf_model, self.wf_model_dict_opt\n\n # %% Private methods\n\n def _wf_model_dict_to_variables(self, wf_model_dict, class_list, names_list):\n variables = []\n for i in range(len(names_list)):\n variable = floris_extract_parameter(wf_model_dict,\n class_list[i],\n names_list[i])\n variables.append(variable)\n return variables\n\n def _norm_variables(self, variables, variables_bounds_list):\n variables_norm = ([norm(variables[i],\n variables_bounds_list[i][0],\n variables_bounds_list[i][1])\n for i in range(len(variables))])\n return variables_norm\n\n def _norm_variables_bounds_lists(self, variables_bounds_list):\n variables_bounds_list_norm = []\n variables_low_bound_list_norm = []\n variables_upp_bound_list_norm = []\n for i, variable_bounds in enumerate(variables_bounds_list):\n lower_bound_norm = norm(variable_bounds[0],\n variable_bounds[0],\n variable_bounds[1])\n upper_bound_norm = norm(variable_bounds[1],\n variable_bounds[0],\n variable_bounds[1])\n bound_norm_tuple = (lower_bound_norm, upper_bound_norm)\n variables_bounds_list_norm.append(bound_norm_tuple)\n variables_low_bound_list_norm.append(lower_bound_norm)\n variables_upp_bound_list_norm.append(upper_bound_norm)\n return (variables_bounds_list_norm,\n np.array(variables_low_bound_list_norm),\n np.array(variables_upp_bound_list_norm))\n\n def _unnorm_variables(self, variables_norm, variables_bounds_list):\n variables = ([unnorm(variables_norm[i],\n variables_bounds_list[i][0],\n variables_bounds_list[i][1])\n for i in range(len(variables_norm))])\n return variables\n\n def _vars_to_wf_model_dict(self,\n wf_model_dict_original,\n variables_class_list,\n variables_names_list,\n variables):\n wf_model_dict_new = copy.deepcopy(wf_model_dict_original)\n for i in range(len(variables)):\n wf_model_dict_new = floris_param_change_object_dict(wf_model_dict_new,\n variables_class_list[i],\n variables_names_list[i],\n variables[i])\n return wf_model_dict_new\n\n def _tuning_optimizer_scipy(self):\n self.opt_results = minimize(self.obj_func,\n self.variables_init_norm,\n method=self.opt_method,\n bounds=self.variables_bounds_list_norm,\n options=self.opt_options)\n self.opt_variables_norm = self.opt_results.x\n\n def _tuning_optimizer_turbo_1(self):\n turbo_1 = Turbo1(f=self.obj_func,\n lb=self.variables_low_bound_list_norm,\n ub=self.variables_upp_bound_list_norm,\n **self.opt_options,\n )\n turbo_1.optimize()\n X = turbo_1.X # Evaluated points\n fX = turbo_1.fX # Observed values\n index_best = np.argmin(fX)\n f_best, x_best = fX[index_best], X[index_best, :]\n self.opt_variables_norm = x_best\n\n def _tuning_rmse_function(self, variables_norm):\n\n # Unnorm variables, create new wf_model dictionary\n variables = self._unnorm_variables(variables_norm, self.variables_bounds_list)\n wf_model_dict_new = self._vars_to_wf_model_dict(self.wf_model_dict_original,\n self.variables_class_list,\n self.variables_names_list,\n variables)\n\n # Create new wf_model object and reinitialize (atmospheric conditions set later)\n self.wf_model = floris_param_change_object(self.wf_model, wf_model_dict_new)\n\n rmse = 0\n for i in range(len(self.tuning_data_power_list)):\n\n # Calculate wind turbine power outputs with model to tune\n floris_reinitialise_atmosphere(self.wf_model,\n ws=self.wind_speeds_list[i],\n wd=self.wind_directions_list[i],\n ti=self.turbulence_intensities_list[i],\n shear=self.wind_shear_list[i])\n yaw_angles = np.array([float(item) for item in self.yaw_angles_list[i]])\n power_turbines = floris_calculate_turbine_power(self.wf_model, yaw_angles)\n\n # Calculate root mean squared error single condition\n error = 0\n for j in range(len(power_turbines)):\n error += (self.tuning_data_power_list[i][j]-power_turbines[j])**2\n rmse_single = error/len(power_turbines)\n\n # Calculate sum of root mean squared errors\n rmse += rmse_single\n\n return rmse" }, { "identifier": "GPWrap", "path": "deasc/gp.py", "snippet": "class GPWrap:\n \"\"\"\n Wrapper class to create, modify and visualise Gaussian Processes for dynamic parameter\n tuning. Currently limited to a single output GP.\n \"\"\"\n\n def __init__(self, parameter_class, parameter_name, dimensions):\n self.parameter_class = parameter_class\n self.parameter_name = parameter_name\n self.dimensions = dimensions\n \"\"\"\n Args\n ----\n parameter_class: string\n Parameter class of the optimal parameter to fit.\n parameter_name: string\n Name of the optimal parameter to fit.\n dimensions: integer\n Dimensions/inputs/variables of the GP.\n \"\"\"\n\n def GP_so(self, yaw_data, param_data, num_restarts=50, noise=0.05):\n \"\"\"\n Construct and returns a single-output (SO) GP for the given input dataset\n (optimal parameter for a given yaw configuration).\n\n Args\n ----\n yaw_data: list of lists\n list of input yaw configurations for which parameter has been tuned\n param_data: list of lists\n for each yaw configuration in yaw_data, list containing the optimal parameter\n num_restarts: int\n number of random starts of the GP hyperparameter tuning optimization\n noise: float\n noise in output prediction. Default is 0.05\n\n Returns\n -------\n m: GPy single-output Gaussian Process model\n \"\"\"\n # Sample check on argument dimension\n if len(yaw_data[0]) != self.dimensions:\n err_msg = (\"Yaw input and GP dimensions do not match\")\n raise Exception(err_msg)\n if len(param_data[0]) != 1:\n err_msg = (\"Single-output GPs only\")\n raise Exception(err_msg)\n\n # Data structure arguments\n yaw_data_GP = np.array(yaw_data)\n param_data_GP = np.array(param_data)\n\n # GP model\n kernel = GPy.kern.RBF(input_dim=self.dimensions, variance=1., lengthscale=1.)\n self.m = GPy.models.GPRegression(yaw_data_GP,\n param_data_GP,\n kernel,\n noise_var=noise)\n\n # Hyperparameter tuning\n self.m.optimize(optimizer=None, # Default lbfgsb\n start=None,\n messages=False,\n max_iters=1000)\n self.m.optimize_restarts(num_restarts=num_restarts)\n return self.m\n\n def GP_so_plot(self, parameter_range_plot, yaw_range_plot):\n \"\"\"\n Plot a single-output (SO) GP model. 1D and 2D plots are generated for each\n variable combination.\n\n Args\n ----\n parameter_range: tuple\n range of the optimal parameter to plot\n parameter_range: tuple\n range of the yaw variables to plot\n \"\"\"\n # Plotting library choice and defaults values\n GPy.plotting.change_plotting_library('matplotlib')\n GPy.plotting.matplot_dep.defaults.data_2d = {'s': 0,\n 'edgecolors': 'none',\n 'linewidth': 0.0,\n 'cmap': cm.get_cmap('hot'),\n 'alpha': 0.5}\n\n # 1D Plots\n if self.dimensions == 1:\n figure = GPy.plotting.plotting_library().figure(1, 1, figsize=(5, 2.5))\n title = 'GP %s' % (self.parameter_name)\n xlabel = '$\\gamma_{1}$ [deg]'\n ylabel = '$%s_{opt}$' % (self.parameter_name)\n fig = self.m.plot(figure=figure,\n col=1,\n row=1,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n ylim=list(parameter_range_plot),\n legend=False,\n plot_data=True)\n else:\n n_cuts = 3\n slices = np.linspace(yaw_range_plot[0], yaw_range_plot[1], n_cuts)\n figsize = (5*n_cuts, 2.5*self.dimensions)\n figure = GPy.plotting.plotting_library().figure(self.dimensions,\n n_cuts,\n figsize=figsize)\n\n for dim_idx in range(self.dimensions):\n for i, slice_single in zip(range(n_cuts), slices):\n title = \"GP %s - $\\gamma_{others}$\" \\\n \"%.1f $^{\\circ}$\" % (self.parameter_name, slice_single)\n xlabel = '$\\gamma_{%i}$ [deg]' % (dim_idx+1)\n ylabel = '$%s_{opt}$' % (self.parameter_name)\n inputs = []\n for j in range(self.dimensions):\n if j == dim_idx:\n pass\n else:\n inputs.append((j, slice_single))\n fig = self.m.plot(figure=figure,\n col=(i+1),\n row=(dim_idx+1),\n fixed_inputs=inputs,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n ylim=list(parameter_range_plot),\n legend=False,\n plot_data=False)\n\n # 2D Plots\n # Countours are fine ##\n # Data points (training) plotted are off ##\n # double checked with GP and training database ##\n if self.dimensions == 1:\n pass\n elif self.dimensions == 2:\n figure = GPy.plotting.plotting_library().figure(1, 1, figsize=(3, 2.5))\n\n title = 'GP %s' % (self.parameter_name)\n xlabel = '$\\gamma_{1}$ [deg]'\n ylabel = '$\\gamma_{2}$ [deg]'\n\n fig = self.m.plot(figure=figure,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n legend=False,\n plot_data=True)\n\n ax = plt.gca()\n mappable = ax.collections[0]\n cbar = plt.colorbar(mappable)\n # cbar.set_label('$%s_{opt}$'%(self.parameter_name))\n else:\n n_cuts = 3\n slices = np.linspace(yaw_range_plot[0], yaw_range_plot[1], n_cuts)\n plot_rows = self.dimensions-1\n plot_cols = self.dimensions-1\n combinations = list(itertools.combinations(\n list(range(0, self.dimensions)), 2))\n\n figsize = (3*plot_cols*len(slices), 2.5*plot_rows)\n figure = GPy.plotting.plotting_library().figure(plot_rows,\n plot_cols*len(slices),\n figsize=figsize)\n for i, slice_single in zip(range(n_cuts), slices):\n for comb_idx, comb in enumerate(combinations):\n title = 'GP %s - $\\gamma_{others}$' \\\n '%.1f $^{\\circ}$' % (self.parameter_name, slice_single)\n xlabel = '$\\gamma_{%i}$ [deg]' % (comb[0]+1)\n ylabel = '$\\gamma_{%i}$ [deg]' % (comb[1]+1)\n inputs = []\n for j in range(self.dimensions):\n if j in comb:\n pass\n else:\n inputs.append((j, slice_single))\n\n fig = self.m.plot(figure=figure,\n col=(comb[0]+1+plot_cols*i),\n row=(comb[1]),\n fixed_inputs=inputs,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n legend=False,\n plot_data=True)\n\n ax = plt.gca()\n mappable = ax.collections[0]\n cbar = plt.colorbar(mappable)\n # cbar.set_label('$%s_{opt}$'%(self.parameter_name))" }, { "identifier": "TuningDyn_Grouping", "path": "deasc/tuning_dynamic.py", "snippet": "class TuningDyn_Grouping(TuningDyn, TuningDyn_SharedMethods):\n \"\"\"Class for dynamic parameter tuning with grouping of turbines within a wind farm.\"\"\"\n\n def __init__(self, param_class, param_name, tuning_groups, GP_model):\n \"\"\"\n Args\n ----\n param_class: (string) tuning parameter class.\n param_name: (string) tuning parameter name.\n tuning_groups: (list of lists) list of turbine groups included in the tuning. In\n each list, specify the turbines in the group.\n GP_model: (GPy object) GP model with len(tuning_groups) input dimensions.\n \"\"\"\n super().__init__(param_class, param_name)\n # Tuning info\n self.tuning_variables = tuning_groups\n self.tuning_dimensions = len(self.tuning_variables)\n self.GP_model = GP_model\n # GP dimension check\n self._GP_dimension_check(self.tuning_dimensions, self.GP_model)\n # Grouping info\n self.tuning_groups = tuning_groups\n self.grouping_bool = True\n\n @property\n def tuning_turbines(self):\n \"\"\"List of the tuning turbines in the wind farm.\"\"\"\n return [x for sublist in self.tuning_variables for x in sublist]\n\n def wso_compatibility_check(self, wso_obj):\n \"\"\"\n Check compatibility with a WSOpt object.\n\n Args\n ----\n wso_obj: (WSOpt) WSOpt object to which dynamic parameter tuning is added.\n \"\"\"\n self._tuning_turbines_check(wso_obj, self.tuning_turbines)\n self._tuning_groups_check(wso_obj)\n\n def tune_parameter(self, wso_obj, yaw_angles):\n \"\"\"\n Perform parameter tuning in a WSOpt object.\n\n Args\n ----\n wso_obj: (WSOpt) WSOpt object.\n yaw_angles: (np.ndarray) yaw angles of all turbines in the wind farm.\n\n Returns\n -------\n wf-model_tuned: (WfModel) tuned WfModel to use in the current iteration of the\n wake steering optimisation.\n \"\"\"\n # Extract WSOpt WfModel dictionary\n wf_model_dict = floris_extract_object_dict(wso_obj.wf_model)\n\n # Create and apply tuned WfModel dictionary\n GP_input = self._get_GP_input_groups(self.tuning_groups, yaw_angles)\n mu, var, = self.GP_model.predict_noiseless(np.array([GP_input]))\n optimal_parameter = mu[0][0]\n wf_model_dict_tuned = floris_param_change_object_dict(wf_model_dict,\n self.param_class,\n self.param_name,\n optimal_parameter)\n wf_model_tuned = floris_param_change_object(wso_obj.wf_model,\n wf_model_dict_tuned)\n return wf_model_tuned\n\n def set_yaw_groups(self, yaw_angles):\n \"\"\"\n Force yaw angles of turbines in tuning groups to be equal in the wake\n steering optimisation.\n\n Args\n ----\n yaw_angles: (np.ndarray) yaw angles of all turbines in the wind farm.\n\n Returns\n -------\n yaw_angles_grouped: (np.ndarray) yaw angles of all turbines in the wind farm with\n equal yaw angles in each turbine group.\n \"\"\"\n return self._set_yaw_groups(yaw_angles)" }, { "identifier": "TuningDyn_Looping_Turbine", "path": "deasc/tuning_dynamic.py", "snippet": "class TuningDyn_Looping_Turbine(TuningDyn, TuningDyn_SharedMethods):\n \"\"\"\n Class for dynamic parameter tuning with the looping approach of turbines within\n a wind farm.\n \"\"\"\n\n def __init__(self, param_class, param_name, tuning_turbine, GP_model, wf_pow_noyaw):\n \"\"\"\n Args\n ----\n param_class: (string) tuning parameter class.\n param_name: (string) tuning parameter name.\n tuning_turbines: (list) list of single turbine included in the tuning.\n GP_model: (GPy object) GP model with a single input dimension.\n wf_pow_noyaw: (float) value of the wind farm power without any yaw applied,\n usually extracted from the previous grouping optimisation to refine.\n \"\"\"\n super().__init__(param_class, param_name)\n # Tuning info\n self.tuning_variables = tuning_turbine\n self.tuning_dimensions = len(self.tuning_variables)\n self.GP_model = GP_model\n self._GP_dimension_check(self.tuning_dimensions, self.GP_model)\n # Looping info\n self.wf_pow_noyaw = wf_pow_noyaw\n self.tuning_bool = True\n\n @property\n def tuning_turbines(self):\n \"\"\"List of the tuning turbines in the wind farm.\"\"\"\n return self.tuning_variables\n\n def wso_compatibility_check(self, wso_obj):\n \"\"\"\n Check compatibility with a WSOpt object.\n\n Args\n ----\n wso_obj: (WSOpt) WSOpt object to which dynamic parameter tuning is added.\n \"\"\"\n self._tuning_turbines_check(wso_obj, self.tuning_turbines)\n self._looping_check(wso_obj)\n\n def tune_parameter(self, wso_obj, yaw_angles):\n \"\"\"\n Perform parameter tuning in a WSOpt object.\n\n Args\n ----\n wso_obj: (WSOpt) WSOpt object.\n yaw_angles: (np.ndarray) yaw angles of all turbines in the wind farm.\n\n Returns\n -------\n wf-model_tuned: (WfModel) tuned WfModel to use in the current iteration of the\n wake steering optimisation.\n \"\"\"\n # Extract WSOpt WfModel dictionary\n wf_model_dict = floris_extract_object_dict(wso_obj.wf_model)\n\n # Create and apply tuned WfModel dictionary\n GP_input = self._get_GP_input_turbines(self.tuning_turbines, yaw_angles)\n mu, var, = self.GP_model.predict_noiseless(np.array([GP_input]))\n optimal_parameter = mu[0][0]\n wf_model_dict_tuned = floris_param_change_object_dict(wf_model_dict,\n self.param_class,\n self.param_name,\n optimal_parameter)\n wf_model_tuned = floris_param_change_object(wso_obj.wf_model,\n wf_model_dict_tuned)\n return wf_model_tuned\n\n def _looping_check(self, wso_obj):\n if len(self.tuning_variables) != 1:\n err_msg = \"While looping, only a single turbine can be tuned.\"\n raise Exception(err_msg)\n if len(wso_obj.variables) != 1:\n err_msg = \"While looping, only a single turbine can be optimised.\"\n raise Exception(err_msg)" }, { "identifier": "floris_extract_object_dict", "path": "deasc/utils_floris.py", "snippet": "def floris_extract_object_dict(wf_model):\n \"\"\"Extract and return the current FLORIS object dictionary.\"\"\"\n return wf_model.interface.floris.as_dict()" }, { "identifier": "floris_extract_parameter", "path": "deasc/utils_floris.py", "snippet": "def floris_extract_parameter(wf_model_dict, param_class, param_name):\n \"\"\"Extract and return the current parameter value of a FLORIS object parameter.\"\"\"\n models_dict = floris_extract_models_dict(wf_model_dict)\n return wf_model_dict['wake'][param_class][models_dict[param_class]][param_name]" }, { "identifier": "floris_param_change_object_dict", "path": "deasc/utils_floris.py", "snippet": "def floris_param_change_object_dict(wf_model_dict, param_class, param_name, param_value):\n \"\"\"\n Change FLORIS object with a new model parameter, return new FLORIS object dictionary.\n FLORIS object is not reinitialised (see function floris_parameter_change_object).\n \"\"\"\n wf_model_dict_new = copy.deepcopy(wf_model_dict)\n models_dict = floris_extract_models_dict(wf_model_dict_new)\n (wf_model_dict_new['wake'][param_class]\n [models_dict[param_class]][param_name]) = param_value\n return wf_model_dict_new" }, { "identifier": "floris_param_change_object", "path": "deasc/utils_floris.py", "snippet": "def floris_param_change_object(wf_model, wf_model_dict_new):\n \"\"\"Change FLORIS object with new object dictionary. Also reinitialise farm layout.\"\"\"\n x_reinit, y_reinit = wf_model.interface.get_turbine_layout()\n wf_model.interface = FI(wf_model_dict_new)\n wf_model.interface.reinitialize(layout_x=x_reinit, layout_y=y_reinit)\n return wf_model" } ]
import numpy as np from deasc import WfModel from deasc import WSOpt from deasc import Tuning from deasc import GPWrap from deasc import TuningDyn_Grouping from deasc import TuningDyn_Looping_Turbine from deasc.utils_floris import ( floris_extract_object_dict, floris_extract_parameter, floris_param_change_object_dict, floris_param_change_object )
15,170
""" This example shows wake steering optimisation on a 5x1 wind farm of NREL 5 MW turbines. Dynamic parameter tuning with the looping approach is implemented to refine the results achieved with grouping. Tuning is introduced in the optimisation for the wake expansion parameter k of the Jensen wake model. The tuning variables are the yaw angles of all wind turbines in the farm, excluding the most downstream one. """ # %% Initial wake steering optimisation - Grouping approach for dynamic parameter tuning # Initialise and set layout for wind farm model path = "./inputs/" input_file = "jensen.yaml" wf_model = WfModel(input_file, path) wf_model.set_aligned_layout(5, 1, 7, 5) # Set kd deflection parameter wf_model_dict = floris_extract_object_dict(wf_model)
""" This example shows wake steering optimisation on a 5x1 wind farm of NREL 5 MW turbines. Dynamic parameter tuning with the looping approach is implemented to refine the results achieved with grouping. Tuning is introduced in the optimisation for the wake expansion parameter k of the Jensen wake model. The tuning variables are the yaw angles of all wind turbines in the farm, excluding the most downstream one. """ # %% Initial wake steering optimisation - Grouping approach for dynamic parameter tuning # Initialise and set layout for wind farm model path = "./inputs/" input_file = "jensen.yaml" wf_model = WfModel(input_file, path) wf_model.set_aligned_layout(5, 1, 7, 5) # Set kd deflection parameter wf_model_dict = floris_extract_object_dict(wf_model)
wf_model_dict = floris_param_change_object_dict(wf_model_dict,
8
2023-11-10 18:13:27+00:00
24k
PlaxtonFlarion/NexaFlow
nexaflow/skills/alynex.py
[ { "identifier": "toolbox", "path": "nexaflow/toolbox.py", "snippet": "def video_capture(video_path: str):\ndef video_jump(video_cap: cv2.VideoCapture, frame_id: int):\ndef compare_ssim(pic1: np.ndarray, pic2: np.ndarray) -> float:\ndef multi_compare_ssim(\n pic1_list: typing.List, pic2_list: typing.List, hooks: typing.List = None\n) -> typing.List[float]:\ndef get_current_frame_id(video_cap: cv2.VideoCapture) -> int:\ndef get_current_frame_time(video_cap: cv2.VideoCapture) -> float:\ndef imread(img_path: str, *_, **__) -> np.ndarray:\ndef get_frame_time(\n video_cap: cv2.VideoCapture, frame_id: int, recover: bool = None\n) -> float:\ndef get_frame_count(video_cap: cv2.VideoCapture) -> int:\ndef get_frame_size(video_cap: cv2.VideoCapture) -> typing.Tuple[int, int]:\ndef get_frame(\n video_cap: cv2.VideoCapture, frame_id: int, recover: bool = None\n) -> np.ndarray:\ndef turn_grey(old: np.ndarray) -> np.ndarray:\ndef turn_binary(old: np.ndarray) -> np.ndarray:\ndef turn_hog_desc(old: np.ndarray) -> np.ndarray:\ndef turn_lbp_desc(old: np.ndarray, radius: int = None) -> np.ndarray:\ndef turn_blur(old: np.ndarray) -> np.ndarray:\ndef sharpen_frame(old: np.ndarray) -> np.ndarray:\ndef calc_mse(pic1: np.ndarray, pic2: np.ndarray) -> float:\ndef calc_psnr(pic1: np.ndarray, pic2: np.ndarray) -> float:\ndef compress_frame(\n old: np.ndarray,\n compress_rate: float = None,\n target_size: typing.Tuple[int, int] = None,\n not_grey: bool = None,\n interpolation: int = None,\n *_,\n **__,\n) -> np.ndarray:\ndef get_timestamp_str() -> str:\ndef np2b64str(frame: np.ndarray) -> str:\ndef fps_convert(\n target_fps: int, source_path: str, target_path: str, ffmpeg_exe: str = None\n) -> int:\ndef match_template_with_object(\n template: np.ndarray,\n target: np.ndarray,\n engine_template_cv_method_name: str = None,\n **kwargs,\n) -> typing.Dict[str, typing.Any]:\ndef match_template_with_path(\n template: str, target: np.ndarray, **kwargs\n) -> typing.Dict[str, typing.Any]:\ndef show_progress(total: int, color: int, title: str) -> tqdm:\ndef draw_line(image_path: str, save_path: str = None):" }, { "identifier": "Report", "path": "nexaflow/skills/report.py", "snippet": "class Report(object):\n\n __lock: threading.Lock = threading.Lock()\n __initialized: bool = False\n __instance = None\n __init_var = None\n\n def __new__(cls, *args, **kwargs):\n if cls.__instance is None:\n with cls.__lock:\n if cls.__instance is None:\n cls.__instance = super(Report, cls).__new__(cls)\n cls.__init_var = (args, kwargs)\n return cls.__instance\n\n def __init__(self, total_path: str):\n if not self.__initialized:\n self.__initialized = True\n\n self.clock: Any = lambda: time.strftime(\"%Y%m%d%H%M%S\")\n\n self.__title: str = \"\"\n self.__query: str = \"\"\n self.query_path: str = \"\"\n self.video_path: str = \"\"\n self.frame_path: str = \"\"\n self.extra_path: str = \"\"\n\n self.range_list: list[dict] = []\n self.total_list: list[dict] = []\n\n self.total_path = os.path.join(total_path, f\"Nexa_{self.clock()}_{os.getpid()}\", \"Nexa_Collection\")\n # self.total_path = \"/Users/acekeppel/PycharmProjects/NexaFlow/report/Nexa_20230822223025/Nexa_Collection\"\n os.makedirs(self.total_path, exist_ok=True)\n\n self.reset_path = os.path.join(os.path.dirname(self.total_path), \"Nexa_Recovery\")\n os.makedirs(self.reset_path, exist_ok=True)\n log_papers = os.path.join(self.reset_path, \"nexaflow.log\")\n logger.add(log_papers, format=FORMAT, level=\"DEBUG\")\n\n @property\n def proto_path(self) -> str:\n return os.path.join(self.query_path, self.query)\n\n @property\n def title(self):\n return self.__title\n\n @title.setter\n def title(self, title: str):\n self.__title = title\n self.query_path = os.path.join(self.total_path, self.title)\n os.makedirs(self.query_path, exist_ok=True)\n logger.info(f\"✪✪✪✪✪✪✪✪✪✪ {self.title} ✪✪✪✪✪✪✪✪✪✪\\n\")\n\n @title.deleter\n def title(self):\n del self.__title\n\n @property\n def query(self):\n return self.__query\n\n @query.setter\n def query(self, query: str):\n self.__query = query\n self.video_path = os.path.join(self.query_path, self.query, \"video\")\n self.frame_path = os.path.join(self.query_path, self.query, \"frame\")\n self.extra_path = os.path.join(self.query_path, self.query, \"extra\")\n os.makedirs(self.video_path, exist_ok=True)\n os.makedirs(self.frame_path, exist_ok=True)\n os.makedirs(self.extra_path, exist_ok=True)\n logger.info(f\"Start -> {self.query}\")\n\n @query.deleter\n def query(self):\n del self.__query\n\n def load(self, inform: Optional[Dict[str, Union[str | Dict]]]) -> None:\n if inform:\n self.range_list.append(inform)\n logger.info(f\"End -> {self.query}\\n\")\n\n def create_report(self) -> None:\n\n def start_create(result):\n handler_list = []\n query = result.get(\"query\", \"TimeCost\")\n stage = result.get(\"stage\", {\"start\": 1, \"end\": 2, \"cost\": \"0.00000\"})\n frame = result.get(\"frame\", \"\")\n extra = result.get(\"extra\", \"\")\n proto = result.get(\"proto\", \"\")\n\n image_list = []\n for image in os.listdir(frame):\n image_src = os.path.join(query, \"frame\", image)\n image_ids = re.search(r\"\\d+(?=_)\", image).group()\n timestamp = float(re.search(r\"(?<=_).+(?=\\.)\", image).group())\n image_list.append(\n {\n \"src\": image_src,\n \"frames_id\": image_ids,\n \"timestamp\": f\"{timestamp:.5f}\"\n }\n )\n image_list.sort(key=lambda x: int(x[\"frames_id\"]))\n\n extra_list = []\n for ex in os.listdir(extra):\n extra_src = os.path.join(query, \"extra\", ex)\n extra_idx = ex.split(\"(\")[0]\n extra_list.append(\n {\n \"src\": extra_src,\n \"idx\": extra_idx\n }\n )\n extra_list.sort(key=lambda x: int(x[\"idx\"].split(\"(\")[0]))\n\n handler_list.append(\n {\n \"query\": query,\n \"stage\": stage,\n \"image_list\": image_list,\n \"extra_list\": extra_list,\n \"proto\": os.path.join(query, os.path.basename(proto))\n }\n )\n\n return handler_list\n\n if len(self.range_list) > 0:\n if len(self.range_list) == 1:\n images_list = start_create(self.range_list[0])\n else:\n with ThreadPoolExecutor() as executor:\n future = executor.map(start_create, self.range_list)\n images_list = [i for f in future for i in f]\n\n loader = FileSystemLoader(os.path.join(Constants.NEXA, \"template\"))\n environment = Environment(loader=loader)\n template = environment.get_template(\"template_main.html\")\n\n html = template.render(title=self.title, images_list=images_list)\n report_html = os.path.join(self.query_path, f\"{self.title}.html\")\n with open(file=report_html, mode=\"w\", encoding=\"utf-8\") as f:\n f.write(html)\n logger.info(f\"生成聚合报告: {os.path.basename(report_html)}\")\n\n cost_list = [cost['stage']['cost'] for cost in images_list]\n href_path = os.path.join(\n os.path.basename(self.total_path),\n self.title,\n os.path.basename(report_html)\n )\n single = {\n \"case\": self.title,\n \"cost_list\": cost_list,\n \"avg\": f\"{sum(map(float, cost_list)) / len(cost_list):.5f}\",\n \"href\": href_path\n }\n logger.debug(\"Recovery: \" + json.dumps(single, ensure_ascii=False))\n self.total_list.append(single)\n self.range_list.clear()\n else:\n logger.info(\"没有可以聚合的报告 ...\")\n\n logger.info(f\"✪✪✪✪✪✪✪✪✪✪ {self.title} ✪✪✪✪✪✪✪✪✪✪\\n\\n\")\n\n def create_total_report(self) -> None:\n if len(self.total_list) > 0:\n loader = FileSystemLoader(os.path.join(Constants.NEXA, \"template\"))\n environment = Environment(loader=loader)\n template = environment.get_template(\"template_information.html\")\n report_time = time.strftime('%Y.%m.%d %H:%M:%S')\n html = template.render(report_time=report_time, total_list=self.total_list)\n\n total_html_path = os.path.join(os.path.dirname(self.total_path), \"NexaFlow.html\")\n with open(file=total_html_path, mode=\"w\", encoding=\"utf-8\") as f:\n f.write(html)\n logger.info(f\"生成汇总报告: {total_html_path}\\n\\n\")\n self.total_list.clear()\n else:\n logger.info(\"没有可以汇总的报告 ...\")\n\n @staticmethod\n def reset_report(file_name: str) -> None:\n loader = FileSystemLoader(os.path.join(Constants.NEXA, \"template\"))\n environment = Environment(loader=loader)\n template = environment.get_template(\"template_information.html\")\n report_time = time.strftime('%Y.%m.%d %H:%M:%S')\n\n with open(\n file=os.path.join(file_name, \"Nexa_Recovery\", \"nexaflow.log\"),\n mode=\"r\", encoding=\"utf-8\"\n ) as f:\n log_restore = re.findall(r\"(?<=Recovery: ).*}\", f.read())\n total_list = [json.loads(file) for file in log_restore]\n html = template.render(report_time=report_time, total_list=total_list)\n\n total_html_path = os.path.join(file_name, \"NexaFlow.html\")\n with open(file=total_html_path, mode=\"w\", encoding=\"utf-8\") as f:\n f.write(html)\n logger.info(f\"生成汇总报告: {total_html_path}\\n\\n\")\n\n @staticmethod\n def merge_report(merge_list: List[str], loader_merge_loc: str) -> None:\n merge_path = os.path.join(\n os.path.dirname(os.path.dirname(merge_list[0])),\n \"Merge_Nexa_\" + time.strftime(\"%Y%m%d%H%M%S\"),\n \"Nexa_Collection\"\n )\n os.makedirs(merge_path, exist_ok=True)\n log_restore = []\n for merge in merge_list:\n logs = os.path.join(os.path.dirname(merge), \"Nexa_Recovery\", \"nexaflow.log\")\n with open(file=logs, mode=\"r\", encoding=\"utf-8\") as f:\n log_restore.extend(re.findall(r\"(?<=Recovery: ).*}\", f.read()))\n shutil.copytree(\n merge, merge_path, dirs_exist_ok=True,\n ignore=shutil.ignore_patterns(\"NexaFlow.html\", \"nexaflow.log\")\n )\n\n loader = FileSystemLoader(loader_merge_loc)\n environment = Environment(loader=loader)\n template = environment.get_template(\"template_information.html\")\n report_time = time.strftime('%Y.%m.%d %H:%M:%S')\n total_list = [json.loads(file) for file in log_restore]\n html = template.render(report_time=report_time, total_list=total_list)\n\n total_html_path = os.path.join(os.path.dirname(merge_path), \"NexaFlow.html\")\n with open(file=total_html_path, mode=\"w\", encoding=\"utf-8\") as f:\n f.write(html)\n logger.info(f\"合并汇总报告: {total_html_path}\\n\\n\")\n\n @staticmethod\n async def ask_create_report(major_loc, title, total_path, query_path, range_list):\n\n async def handler_inform(result):\n handler_list = []\n query = result.get(\"query\", \"TimeCost\")\n stage = result.get(\"stage\", {\"start\": 1, \"end\": 2, \"cost\": \"0.00000\"})\n frame = result.get(\"frame\", \"\")\n extra = result.get(\"extra\", \"\")\n proto = result.get(\"proto\", \"\")\n\n async def handler_frame():\n handler_image_list = []\n for image in os.listdir(\n os.path.join(\n query_path, query, os.path.basename(frame)\n )\n ):\n image_src = os.path.join(query, \"frame\", image)\n image_ids = re.search(r\"\\d+(?=_)\", image).group()\n timestamp = float(re.search(r\"(?<=_).+(?=\\.)\", image).group())\n handler_image_list.append(\n {\n \"src\": image_src,\n \"frames_id\": image_ids,\n \"timestamp\": f\"{timestamp:.5f}\"\n }\n )\n handler_image_list.sort(key=lambda x: int(x[\"frames_id\"]))\n return handler_image_list\n\n async def handler_extra():\n handler_extra_list = []\n for ex in os.listdir(\n os.path.join(\n query_path, query, os.path.basename(extra)\n )\n ):\n extra_src = os.path.join(query, \"extra\", ex)\n extra_idx = ex.split(\"(\")[0]\n handler_extra_list.append(\n {\n \"src\": extra_src,\n \"idx\": extra_idx\n }\n )\n handler_extra_list.sort(key=lambda x: int(x[\"idx\"].split(\"(\")[0]))\n return handler_extra_list\n\n image_list, extra_list = await asyncio.gather(\n handler_frame(), handler_extra()\n )\n\n handler_list.append(\n {\n \"query\": query,\n \"stage\": stage,\n \"image_list\": image_list,\n \"extra_list\": extra_list,\n \"proto\": os.path.join(query, os.path.basename(proto))\n }\n )\n return handler_list\n\n async def handler_start():\n single = {}\n if len(range_list) > 0:\n tasks = [handler_inform(result) for result in range_list]\n results = await asyncio.gather(*tasks)\n images_list = [ele for res in results for ele in res]\n\n major_loader = FileSystemLoader(major_loc)\n major_environment = Environment(loader=major_loader)\n major_template = major_environment.get_template(\"template_main.html\")\n\n html = major_template.render(title=title, images_list=images_list)\n report_html = os.path.join(query_path, f\"{title}.html\")\n with open(file=report_html, mode=\"w\", encoding=\"utf-8\") as f:\n f.write(html)\n logger.info(f\"生成聚合报告: {os.path.basename(report_html)}\")\n\n cost_list = [cost['stage']['cost'] for cost in images_list]\n href_path = os.path.join(\n os.path.basename(total_path),\n title,\n os.path.basename(report_html)\n )\n single = {\n \"case\": title,\n \"cost_list\": cost_list,\n \"avg\": f\"{sum(map(float, cost_list)) / len(cost_list):.5f}\",\n \"href\": href_path\n }\n logger.debug(\"Recovery: \" + json.dumps(single, ensure_ascii=False))\n else:\n logger.info(\"没有可以聚合的报告 ...\")\n\n logger.info(f\"✪✪✪✪✪✪✪✪✪✪ {title} ✪✪✪✪✪✪✪✪✪✪\\n\\n\")\n return single\n\n return await handler_start()\n\n @staticmethod\n async def ask_create_total_report(file_name: str, major_loc: str, loader_total_loc: str):\n report_time = time.strftime('%Y.%m.%d %H:%M:%S')\n try:\n with open(file=os.path.join(file_name, \"Nexa_Recovery\", \"nexaflow.log\"), mode=\"r\", encoding=\"utf-8\") as f:\n open_file = f.read()\n except FileNotFoundError as e:\n return e\n else:\n match_list = re.findall(r\"(?<=Restore: ).*}\", open_file)\n range_list = [json.loads(file.replace(\"'\", '\"')) for file in match_list if file]\n grouped_dict = defaultdict(list)\n for part in range_list:\n parts = part.pop(\"title\"), part.pop(\"total_path\"), part.pop(\"query_path\")\n grouped_dict[parts].append(part)\n\n tasks = [\n Report.ask_create_report(\n major_loc,\n title,\n os.path.join(file_name, os.path.basename(total_path)),\n os.path.join(file_name, os.path.basename(total_path), title),\n range_list\n )\n for (title, total_path, query_path), range_list in grouped_dict.items()\n ]\n merge_result = await asyncio.gather(*tasks)\n total_list = [merge for merge in merge_result]\n\n if len(total_list) > 0:\n total_loader = FileSystemLoader(loader_total_loc)\n total_environment = Environment(loader=total_loader)\n total_template = total_environment.get_template(\"template_information.html\")\n\n html = total_template.render(report_time=report_time, total_list=total_list)\n total_html = os.path.join(file_name, \"NexaFlow.html\")\n with open(file=total_html, mode=\"w\", encoding=\"utf-8\") as f:\n f.write(html)\n logger.info(f\"生成汇总报告: {total_html}\")\n else:\n logger.info(\"没有可以汇总的报告 ...\")\n\n @staticmethod\n def draw(\n classifier_result,\n proto_path: str,\n compress_rate: float = None,\n target_size: Tuple[int, int] = None,\n boost_mode: bool = False,\n framix_template: str = None\n ) -> str:\n\n label_stable: str = \"稳定阶段\"\n label_unstable: str = \"不稳定阶段\"\n label_unspecific: str = \"不明阶段\"\n\n thumbnail_list: List[Dict[str, str]] = list()\n extra_dict: Dict[str, str] = dict()\n\n if not compress_rate:\n compress_rate = 0.2\n\n try:\n stage_range = classifier_result.get_stage_range()\n except AssertionError:\n stage_range = [classifier_result.data]\n\n if boost_mode:\n for cur_index in range(len(stage_range)):\n each = stage_range[cur_index]\n middle = each[len(each) // 2]\n image_list = []\n if middle.is_stable():\n label = label_stable\n image = toolbox.compress_frame(\n middle.get_data(), compress_rate=compress_rate, target_size=target_size\n )\n frame = {\n \"frame_id\": middle.frame_id,\n \"timestamp\": f\"{middle.timestamp:.5f}\",\n \"image\": toolbox.np2b64str(image)\n }\n image_list.append(frame)\n else:\n if middle.stage == constants.UNKNOWN_STAGE_FLAG:\n label = label_unspecific\n else:\n label = label_unstable\n\n if cur_index + 1 < len(stage_range):\n new_each = [*each, stage_range[cur_index + 1][0]]\n else:\n new_each = each\n\n for i in new_each:\n image = toolbox.compress_frame(\n i.get_data(), compress_rate=compress_rate, target_size=target_size\n )\n frame = {\n \"frame_id\": i.frame_id,\n \"timestamp\": f\"{i.timestamp:.5f}\",\n \"image\": toolbox.np2b64str(image)\n }\n image_list.append(frame)\n\n first, last = each[0], each[-1]\n title = (f\"{label} \"\n f\"区间: {first.frame_id}({first.timestamp:.5f}) - {last.frame_id}({last.timestamp:.5f}) \"\n f\"耗时: {last.timestamp - first.timestamp:.5f} \"\n f\"分类: {first.stage}\")\n thumbnail_list.append({title: image_list})\n else:\n for cur_index in range(len(stage_range)):\n each_range = stage_range[cur_index]\n middle = each_range[len(each_range) // 2]\n\n if middle.is_stable():\n label = label_stable\n elif middle.stage == constants.UNKNOWN_STAGE_FLAG:\n label = label_unspecific\n else:\n label = label_unstable\n\n if cur_index + 1 < len(stage_range):\n range_for_display = [*each_range, stage_range[cur_index + 1][0]]\n else:\n range_for_display = each_range\n\n image_list = []\n for i in range_for_display:\n image = toolbox.compress_frame(\n i.get_data(), compress_rate=compress_rate, target_size=target_size\n )\n frame = {\n \"frame_id\": i.frame_id,\n \"timestamp\": f\"{i.timestamp:.5f}\",\n \"image\": toolbox.np2b64str(image)\n }\n image_list.append(frame)\n\n first, last = each_range[0], each_range[-1]\n title = (f\"{label} \"\n f\"区间: {first.frame_id}({first.timestamp:.5f}) - {last.frame_id}({last.timestamp:.5f}) \"\n f\"耗时: {last.timestamp - first.timestamp:.5f} \"\n f\"分类: {first.stage}\")\n thumbnail_list.append({title: image_list})\n\n cost_dict = classifier_result.calc_changing_cost()\n timestamp = toolbox.get_timestamp_str()\n\n extra_dict[\"视频路径\"] = classifier_result.video_path\n extra_dict[\"总计帧数\"] = str(classifier_result.get_length())\n extra_dict[\"每帧间隔\"] = str(classifier_result.get_offset())\n\n def get_template() -> str:\n template_dirs = os.path.join(Constants.NEXA, \"template\")\n template_path = os.path.join(template_dirs, \"template_extra.html\")\n with open(template_path, encoding=constants.CHARSET) as t:\n template_file = t.read()\n return template_file\n\n if framix_template:\n template = Template(framix_template)\n else:\n template = Template(get_template())\n\n template_content = template.render(\n thumbnail_list=thumbnail_list,\n extras=extra_dict,\n background_color=constants.BACKGROUND_COLOR,\n cost_dict=cost_dict,\n timestamp=timestamp,\n version_code=\"1.0.0\"\n )\n\n default_name = f\"{timestamp}.html\"\n if os.path.isdir(proto_path):\n report_path = os.path.join(proto_path, default_name)\n else:\n report_path = proto_path\n\n with open(report_path, \"w\", encoding=constants.CHARSET) as fh:\n fh.write(template_content)\n logger.info(f\"生成单次报告: {os.path.basename(report_path)}\")\n\n return report_path" }, { "identifier": "Record", "path": "nexaflow/skills/record.py", "snippet": "class Record(object):\n\n def __init__(self):\n self.__connection: Optional[Popen] = None\n self.__record_event: threading.Event = threading.Event()\n self.__initial: str = \"scrcpy\"\n\n def start_record(self, video_path: str, serial: str = None) -> None:\n cmd = [\n self.__initial, \"--no-audio\", \"--video-bit-rate\", \"8M\", \"--max-fps\", \"60\", \"-Nr\",\n f\"{os.path.join(video_path, 'screen')}.mkv\"\n ]\n if serial:\n cmd.insert(1, \"-s\")\n cmd.insert(2, serial)\n self.__connection = Terminal.cmd_connect(cmd)\n\n def stream(flow: Union[int, IO[str]]) -> None:\n for line in iter(flow.readline, \"\"):\n logger.info(\" \".join(line.strip().split()))\n flow.close()\n\n if self.__connection:\n self.__record_event.set()\n threading.Thread(target=stream, args=(self.__connection.stdout, )).start()\n threading.Thread(target=stream, args=(self.__connection.stderr, )).start()\n time.sleep(1)\n\n def stop_record(self) -> None:\n self.__connection.send_signal(signal.CTRL_C_EVENT)\n self.__record_event.clear()\n self.__connection = None\n\n try:\n Terminal.cmd_oneshot([\"taskkill\", \"/im\", \"scrcpy.exe\"])\n except KeyboardInterrupt:\n logger.info(\"Stop with Ctrl_C_Event ...\")" }, { "identifier": "Player", "path": "nexaflow/skills/player.py", "snippet": "class Player(object):\n\n def __init__(self):\n pygame.mixer.init()\n\n @staticmethod\n def load_all_audio(audio_dirs: str) -> List[Tuple[str, str]]:\n audio_list = []\n for audio_file in os.listdir(audio_dirs):\n if \".mp3\" in audio_file or \".wav\" in audio_file:\n if match := re.search(r\".*?(?=\\.)\", audio_file):\n audio_list.append(\n (match.group(), os.path.join(audio_dirs, audio_file))\n )\n return audio_list\n\n @staticmethod\n def load_audio(audio_dirs: str, audio_name: str) -> Tuple[str, str]:\n query, audio = \"\", \"\"\n for audio_file in os.listdir(audio_dirs):\n if audio_name in audio_file:\n if match := re.search(r\".*?(?=\\.)\", audio_file):\n query = match.group()\n audio = os.path.join(audio_dirs, audio_file)\n return query, audio\n\n @staticmethod\n def play_audio(audio_file: str, volume: float = 1.0):\n if os.path.isfile(audio_file):\n pygame.mixer.music.load(audio_file)\n pygame.mixer.music.set_volume(volume)\n pygame.mixer.music.play()\n logger.info(f\"INFO: Playing audio {audio_file}\")\n while pygame.mixer.music.get_busy():\n pygame.time.Clock().tick(10)\n else:\n logger.error(f\"{audio_file} 不是一个音频文件 ...\")" }, { "identifier": "Switch", "path": "nexaflow/skills/switch.py", "snippet": "class Switch(object):\n\n def __init__(self):\n self.__ffmpeg = \"ffmpeg\"\n self.__ffprobe = \"ffprobe\"\n\n async def audio_reform(self, src: str, dst: str) -> None:\n \"\"\"\n 调整mp3编码格式为标准mp3\n :param src: 原音频路径\n :param dst: 新音频路径\n \"\"\"\n cmd = [self.__ffmpeg, \"-i\", src, \"-ar\", \"44100\", \"-b:a\", \"128k\", dst]\n await Terminal.cmd_line(*cmd)\n\n async def video_reform(self, src: str, dst: str) -> None:\n \"\"\"\n 转换视频格式\n :param src: 原始视频路径\n :param dst: 新视频路径\n \"\"\"\n cmd = [self.__ffmpeg, \"-i\", src, \"-r\", \"60\", dst]\n await Terminal.cmd_line(*cmd)\n\n async def video_change(self, src: str, dst: str) -> None:\n \"\"\"\n 调整视频\n :param src: 原视频路径\n :param dst: 新视频路径\n \"\"\"\n cmd = [\n self.__ffmpeg, \"-i\", src, \"-vf\", \"fps=60\", \"-c:v\",\n \"libx264\", \"-crf\", \"18\", \"-c:a\", \"copy\", dst\n ]\n await Terminal.cmd_line(*cmd)\n\n async def video_tailor(self, src: str, dst: str, start: str = \"00:00:00\", end: str = \"00:00:05\") -> None:\n \"\"\"\n 截取视频\n :param src: 原视频路径\n :param dst: 新视频路径\n :param start: 开始\n :param end: 结束\n \"\"\"\n before = os.path.basename(src).split(\".\")[0]\n after = os.path.basename(src).split(\".\")[-1]\n target = os.path.join(\n dst,\n f\"{before}_{time.strftime('%Y%m%d%H%M%S')}_{random.randint(100, 999)}.{after}\"\n )\n cmd = [self.__ffmpeg, \"-i\", src, \"-ss\", start, \"-t\", end, \"-c\", \"copy\", target]\n await Terminal.cmd_line(*cmd)\n\n async def video_cutter(self, src: str, dst: str, start: str = \"00:00:00\", end: str = \"00:00:05\") -> None:\n \"\"\"\n 流式截取视频\n :param src: 原视频路径\n :param dst: 新视频路径\n :param start: 开始\n :param end: 结束\n \"\"\"\n before = os.path.basename(src).split(\".\")[0]\n after = os.path.basename(src).split(\".\")[-1]\n target = os.path.join(\n dst,\n f\"{before}_{time.strftime('%Y%m%d%H%M%S')}_{random.randint(100, 999)}.{after}\"\n )\n cmd = [\n self.__ffmpeg, \"-i\", src, \"-ss\", start, \"-t\", end, \"-vf\", \"fps=60\",\n \"-c:v\", \"libx264\", \"-crf\", \"18\", \"-c:a\", \"copy\", target\n ]\n await Terminal.cmd_line(*cmd)\n\n async def video_length(self, src: str) -> float:\n \"\"\"\n 查看视频的时间长度\n :param src: 原视频路径\n :return: 视频时间长度\n \"\"\"\n cmd = [\n self.__ffprobe, \"-v\", \"error\", \"-show_entries\", \"format=duration\",\n \"-of\", \"default=noprint_wrappers=1:nokey=1\", \"-i\", src\n ]\n result = await Terminal.cmd_line(*cmd)\n return float(result.strip())" }, { "identifier": "VideoCutter", "path": "nexaflow/cutter/cutter.py", "snippet": "class VideoCutter(object):\n\n def __init__(\n self,\n step: int = None,\n compress_rate: float = None,\n target_size: typing.Tuple[int, int] = None,\n ):\n\n self.step = step or 1\n\n if (not compress_rate) and (not target_size):\n # logger.debug(\n # f\"no compress rate or target size received. set compress rate to 0.2\"\n # )\n compress_rate = 0.2\n\n self._hook_list: typing.List[BaseHook] = list()\n compress_hook = CompressHook(\n overwrite=True, compress_rate=compress_rate, target_size=target_size\n )\n grey_hook = GreyHook(overwrite=True)\n self.add_hook(compress_hook)\n self.add_hook(grey_hook)\n\n def add_hook(self, new_hook: BaseHook):\n self._hook_list.append(new_hook)\n # logger.debug(f\"add hook: {new_hook.__class__.__name__}\")\n\n @staticmethod\n def pic_split(origin: np.ndarray, block: int) -> typing.List[np.ndarray]:\n result: typing.List[np.ndarray] = list()\n for each_block in np.array_split(origin, block, axis=0):\n sub_block = np.array_split(each_block, block, axis=1)\n result += sub_block\n return result\n\n def _apply_hook(self, frame: VideoFrame, *args, **kwargs) -> VideoFrame:\n for each_hook in self._hook_list:\n frame = each_hook.do(frame, *args, **kwargs)\n return frame\n\n @staticmethod\n def compare_frame_list(\n src: typing.List[np.ndarray], target: typing.List[np.ndarray]\n ) -> typing.List[float]:\n\n ssim = 1.0\n mse = 0.0\n psnr = 0.0\n\n for part_index, (each_start, each_end) in enumerate(zip(src, target)):\n part_ssim = toolbox.compare_ssim(each_start, each_end)\n if part_ssim < ssim:\n ssim = part_ssim\n\n part_mse = toolbox.calc_mse(each_start, each_end)\n if part_mse > mse:\n mse = part_mse\n\n part_psnr = toolbox.calc_psnr(each_start, each_end)\n if part_psnr > psnr:\n psnr = part_psnr\n # logger.debug(\n # f\"part {part_index}: ssim={part_ssim}; mse={part_mse}; psnr={part_psnr}\"\n # )\n return [ssim, mse, psnr]\n\n @staticmethod\n def split_into_parts(value: int, parts: int) -> List[Tuple[int, int, int]]:\n division, remainder = value // parts, value % parts\n result, current_start = [], 1\n\n for i in range(parts):\n current_end = current_start + division - 1\n if i == parts - 1: # 处理最后一部分,加上余数\n current_end += remainder\n result.append((current_start, current_end, current_end - current_start))\n\n if i < parts - 1: # 不是最后一部分时,添加断开部分\n gap_start = current_end\n gap_end = current_end + 1\n result.append((gap_start, gap_end, gap_end - gap_start))\n current_start = current_end + 1\n\n return result\n\n def handler_frames(self, window: Window) -> typing.List[VideoCutRange]:\n range_list_part = []\n\n def technique():\n frame_list = window.load_data()\n frame_list = [self._apply_hook(each) for each in frame_list]\n\n ssim_list, mse_list, psnr_list = [], [], []\n\n cur_frame = frame_list[0]\n first_target_frame = frame_list[1]\n cur_frame_list = self.pic_split(cur_frame.data, window.block)\n for each in frame_list[1:]:\n each_frame_list = self.pic_split(each.data, window.block)\n ssim, mse, psnr = self.compare_frame_list(\n cur_frame_list, each_frame_list\n )\n ssim_list.append(ssim)\n mse_list.append(mse)\n psnr_list.append(psnr)\n\n ssim = window.float_merge(ssim_list)\n mse = window.float_merge(mse_list)\n psnr = window.float_merge(psnr_list)\n\n range_list_part.append(\n VideoCutRange(\n window.video,\n start=cur_frame.frame_id, end=first_target_frame.frame_id,\n ssim=[ssim], mse=[mse], psnr=[psnr],\n start_time=cur_frame.timestamp, end_time=first_target_frame.timestamp,\n )\n )\n\n pbar = toolbox.show_progress(window.frame_total, 174, \"Cutter\")\n while True:\n technique()\n pbar.update(1)\n\n continue_flag = window.shift()\n if not continue_flag:\n pbar.close()\n break\n\n return range_list_part\n\n def _convert_video_into_range_list(\n self, video: VideoObject, block: int, window_size: int, window_coefficient: int\n ) -> typing.List[VideoCutRange]:\n\n step = self.step\n video_length = video.frame_count\n range_list: typing.List[VideoCutRange] = list()\n logger.info(f\"总帧数: {video_length} 片段数: {video_length - 1} 分辨率: {video.frame_size}\")\n\n window_list: List[\"Window\"] = []\n for index, parts in enumerate(self.split_into_parts(video_length, 2)):\n start, end, size = parts\n logger.info(f\"帧片段: {index + 1:02} Start: {start:03} End: {end:03} Length: {size:03}\")\n window = Window(video, step, block, window_size, window_coefficient, start, end, size)\n window_list.append(window)\n\n with ThreadPoolExecutor() as executor:\n futures = [executor.submit(self.handler_frames, w) for w in window_list]\n for future in futures:\n range_list.extend(future.result())\n\n return range_list\n\n def cut(\n self,\n video: typing.Union[str, VideoObject],\n block: int = None,\n window_size: int = None,\n window_coefficient: int = None,\n *_,\n **kwargs,\n ) -> VideoCutResult:\n\n if not block:\n block = 3\n if not window_size:\n window_size = 1\n if not window_coefficient:\n window_coefficient = 2\n\n start_time = time.time()\n if isinstance(video, str):\n video = VideoObject(video)\n\n logger.info(f\"开始压缩视频: {os.path.basename(video.path)}\")\n range_list = self._convert_video_into_range_list(\n video, block, window_size, window_coefficient\n )\n logger.info(f\"视频压缩完成: {os.path.basename(video.path)}\")\n logger.info(f\"视频压缩耗时: {(time.time() - start_time):.2f}秒\")\n\n return VideoCutResult(video, range_list, cut_kwargs=kwargs)" }, { "identifier": "VideoObject", "path": "nexaflow/video.py", "snippet": "class VideoObject(object):\n\n def __init__(\n self,\n path: typing.Union[str, os.PathLike],\n fps: int = None,\n ):\n \"\"\"\n 初始化,检查文件路径是否有效,执行其他一些初始化操作\n \"\"\"\n assert os.path.isfile(path), f\"video {path} not existed\"\n self.path: str = str(path)\n self.grey_data: typing.Optional[typing.Tuple[\"VideoFrame\"]] = tuple() # 灰度帧\n self.hued_data: typing.Optional[typing.Tuple[\"ColorFrame\"]] = tuple() # 彩色帧\n\n if fps:\n video_path = os.path.join(tempfile.mkdtemp(), f\"tmp_{fps}.mp4\")\n logger.debug(f\"convert video, and bind path to {video_path}\")\n logger.info(f\"转换视频: {video_path}\")\n toolbox.fps_convert(\n fps, self.path, video_path, imageio_ffmpeg.get_ffmpeg_exe()\n )\n self.path = video_path\n\n with toolbox.video_capture(self.path) as cap:\n self.frame_count = toolbox.get_frame_count(cap)\n self.frame_size = toolbox.get_frame_size(cap)\n\n logger.info(f\"视频已生成,视频帧长度: {self.frame_count} 分辨率: {self.frame_size}\")\n\n def __str__(self):\n return f\"<VideoObject path={self.path}>\"\n\n __repr__ = __str__\n\n def sync_timestamp(self, frame_data: tuple[VideoFrame]) -> None:\n assert frame_data, \"load_frames() first\"\n vid = mpy.VideoFileClip(self.path)\n\n vid_count = vid.reader.nframes\n pbar = toolbox.show_progress(vid_count, 153, \"Synzer\")\n for frame_id, (timestamp, _) in enumerate(vid.iter_frames(with_times=True)):\n if frame_id >= len(frame_data):\n break\n # frame_id_real = frame_id + 1\n if not frame_data[frame_id].timestamp:\n # logger.debug(f\"fix frame {frame_id_real}'s timestamp: {timestamp}\")\n frame_data[frame_id].timestamp = timestamp\n pbar.update(1)\n pbar.close()\n\n def sync_backstage(self, frame_data: tuple[ColorFrame]) -> None:\n assert frame_data, \"load_frames() first\"\n vid = mpy.VideoFileClip(self.path)\n\n for frame_id, (timestamp, _) in enumerate(vid.iter_frames(with_times=True)):\n if frame_id >= len(frame_data):\n break\n # frame_id_real = frame_id + 1\n if not frame_data[frame_id].timestamp:\n # logger.debug(f\"fix frame {frame_id_real}'s timestamp: {timestamp}\")\n frame_data[frame_id].timestamp = timestamp\n\n def clean_frames(self):\n \"\"\"\n 清除所有帧数据\n \"\"\"\n self.grey_data = tuple()\n self.hued_data = tuple()\n\n @staticmethod\n def frame_details(frame_type):\n each_cost = frame_type[0].data.nbytes / (1024 ** 2)\n total_cost = each_cost * len(frame_type)\n frame_size = frame_type[0].data.shape[::-1]\n return f\"{frame_type[0].__class__.__name__}: [{each_cost:.2f} MB] [{total_cost:.2f} MB] {frame_size}\"\n\n def load_frames(self, color: bool = False):\n \"\"\"\n 从文件中加载所有帧到内存\n \"\"\"\n logger.info(f\"加载视频帧到内存: {os.path.basename(self.path)}\")\n\n def load_stream(frames: type[VideoFrame]):\n pbar = toolbox.show_progress(self.frame_count, 180, \"Loader\")\n data: list[VideoFrame] = []\n with toolbox.video_capture(self.path) as cap:\n for success, frame in iter(lambda: cap.read(), (False, None)):\n if success:\n data.append(frames.initial(cap, frame))\n pbar.update(1)\n pbar.close()\n return data\n\n def back_ground(frames: type[ColorFrame]):\n data: list[ColorFrame] = []\n with toolbox.video_capture(self.path) as cap:\n for success, frame in iter(lambda: cap.read(), (False, None)):\n if success:\n data.append(frames.initial(cap, frame))\n return data\n\n def load_stream_sync(brand):\n self.sync_timestamp(tuple(frame_data := load_stream(brand)))\n return frame_data\n\n def back_ground_sync(brand):\n self.sync_backstage(tuple(frame_data := back_ground(brand)))\n return frame_data\n\n start_time, task, hued = time.time(), None, None\n if color:\n task = ThreadPoolExecutor()\n hued = task.submit(back_ground_sync, ColorFrame)\n\n grey = load_stream_sync(VideoFrame)\n self.grey_data = tuple(grey)\n logger.info(f\"灰度帧已加载: {self.frame_details(self.grey_data)}\")\n logger.info(f\"视频加载耗时: {time.time() - start_time:.2f} 秒\")\n return task, hued\n\n def _read_from_file(self) -> typing.Generator[\"VideoFrame\", None, None]:\n \"\"\"\n 从文件中读取帧\n \"\"\"\n with toolbox.video_capture(self.path) as cap:\n success, frame = cap.read()\n while success:\n yield VideoFrame.initial(cap, frame)\n success, frame = cap.read()\n\n def _read_from_mem(self) -> typing.Generator[\"VideoFrame\", None, None]:\n \"\"\"\n 从内存中读取帧\n \"\"\"\n for each_frame in self.grey_data:\n yield each_frame\n\n def _read(self) -> typing.Generator[\"VideoFrame\", None, None]:\n \"\"\"\n 选择从文件还是从内存中读取帧\n \"\"\"\n if self.grey_data:\n yield from self._read_from_mem()\n else:\n yield from self._read_from_file()\n\n def get_iterator(self) -> typing.Generator[\"VideoFrame\", None, None]:\n \"\"\"\n 获取帧的迭代器\n \"\"\"\n return self._read()\n\n def get_operator(self) -> _BaseFrameOperator:\n \"\"\"\n 根据是否已经加载帧,返回相应的FrameOperator(`MemFrameOperator`或`FileFrameOperator`)\n \"\"\"\n if self.grey_data:\n return MemFrameOperator(self)\n return FileFrameOperator(self)\n\n def __iter__(self):\n \"\"\"\n 返回一个用于迭代帧的迭代器\n \"\"\"\n return self.get_iterator()" }, { "identifier": "Frame", "path": "nexaflow/video.py", "snippet": "class Frame(object):\n\n def __init__(self, frame_id: int, timestamp: float, data: np.ndarray):\n self.frame_id: int = frame_id\n self.timestamp: float = timestamp\n self.data: np.ndarray = data\n\n @staticmethod\n def initial(cap: cv2.VideoCapture, frame: np.ndarray) -> \"Frame\":\n raise NotImplementedError\n\n def copy(self) -> \"Frame\":\n raise NotImplementedError" }, { "identifier": "KerasClassifier", "path": "nexaflow/classifier/keras_classifier.py", "snippet": "class KerasClassifier(BaseModelClassifier):\n\n UNKNOWN_STAGE_NAME = constants.UNKNOWN_STAGE_FLAG\n MODEL_DENSE = 6\n\n def __init__(\n self,\n score_threshold: float = None,\n data_size: typing.Sequence[int] = None,\n nb_train_samples: int = None,\n nb_validation_samples: int = None,\n epochs: int = None,\n batch_size: int = None,\n *_,\n **__,\n ):\n super(KerasClassifier, self).__init__(*_, **__)\n\n # 模型\n self._model: typing.Optional[keras.Sequential] = None\n # 配置\n self.score_threshold: float = score_threshold or 0.0\n self.data_size: typing.Sequence[int] = data_size or (200, 200)\n self.nb_train_samples: int = nb_train_samples or 64\n self.nb_validation_samples: int = nb_validation_samples or 64\n self.epochs: int = epochs or 20\n self.batch_size: int = batch_size or 4\n\n # logger.debug(f\"score threshold: {self.score_threshold}\")\n # logger.debug(f\"data size: {self.data_size}\")\n # logger.debug(f\"nb train samples: {self.nb_train_samples}\")\n # logger.debug(f\"nb validation samples: {self.nb_validation_samples}\")\n # logger.debug(f\"epochs: {self.epochs}\")\n # logger.debug(f\"batch size: {self.batch_size}\")\n\n @property\n def follow_keras_size(self):\n return self.data_size[1], self.data_size[0]\n\n @property\n def follow_cv_size(self):\n return self.data_size[0], self.data_size[1]\n\n def clean_model(self):\n self._model = None\n\n def save_model(self, model_path: str, overwrite: bool = None):\n logger.debug(f\"save model to {model_path}\")\n # assert model file\n if os.path.isfile(model_path) and not overwrite:\n raise FileExistsError(\n f\"model file {model_path} already existed, you can set `overwrite` True to cover it\"\n )\n # assert model data is not empty\n assert self._model, \"model is empty\"\n print(self._model.summary())\n self._model.save_weights(model_path)\n\n def load_model(self, model_path: str, overwrite: bool = None):\n # logger.debug(f\"load model from {model_path}\")\n logger.info(f\"加载Keras神经网络引擎 ...\")\n # assert model file\n assert os.path.isfile(model_path), f\"model file {model_path} not existed\"\n # assert model data is empty\n if self._model and not overwrite:\n raise RuntimeError(\n f\"model is not empty, you can set `overwrite` True to cover it\"\n )\n self._model = self.create_model()\n self._model.load_weights(model_path)\n\n def create_model(self) -> keras.Sequential:\n # logger.info(f\"creating Keras sequential model\")\n logger.info(\"Keras神经网络引擎创建图像分析模型 ...\")\n if keras.backend.image_data_format() == \"channels_first\":\n input_shape = (1, *self.follow_keras_size)\n else:\n input_shape = (*self.follow_keras_size, 1)\n\n model = keras.Sequential()\n\n model.add(keras.layers.Conv2D(32, (3, 3), padding=\"same\", input_shape=input_shape))\n model.add(keras.layers.MaxPooling2D(pool_size=(2, 2)))\n model.add(keras.layers.Dropout(0.25))\n\n model.add(keras.layers.Conv2D(64, (3, 3), padding=\"same\"))\n model.add(keras.layers.MaxPooling2D(pool_size=(2, 2)))\n model.add(keras.layers.Dropout(0.25))\n\n model.add(keras.layers.Conv2D(128, (3, 3), padding=\"same\"))\n model.add(keras.layers.MaxPooling2D(pool_size=(2, 2)))\n model.add(keras.layers.Dropout(0.25))\n\n model.add(keras.layers.Flatten())\n model.add(keras.layers.Dense(256, activation=\"relu\"))\n model.add(keras.layers.Dropout(0.5))\n model.add(keras.layers.Dense(self.MODEL_DENSE, activation=\"softmax\"))\n\n model.compile(optimizer=\"adam\", loss=\"sparse_categorical_crossentropy\", metrics=[\"accuracy\"])\n\n # logger.info(\"Keras model created\")\n logger.info(\"Keras神经网络引擎加载完成,开始分析图像 ...\")\n return model\n\n def train(self, data_path: str = None, *_, **__):\n\n def _data_verify(p: str):\n p = pathlib.Path(p)\n assert p.is_dir(), f\"{p} is not a valid directory\"\n\n number_of_dir = len([each for each in os.listdir(p) if (p / each).is_dir()])\n assert (\n number_of_dir > 1\n ), f\"dataset only contains one class. maybe some path errors happened: {p}?\"\n\n assert number_of_dir <= self.MODEL_DENSE, (\n f\"dataset has {number_of_dir} classes (more than \" + str(self.MODEL_DENSE) + \")\"\n )\n\n _data_verify(data_path)\n\n if not self._model:\n self._model = self.create_model()\n\n datagen = keras.preprocessing.image.ImageDataGenerator(\n rescale=1.0 / 16,\n shear_range=0.2,\n zoom_range=0.2,\n validation_split=0.33,\n horizontal_flip=True # 水平翻转增强\n )\n\n train_generator = datagen.flow_from_directory(\n data_path,\n target_size=self.follow_keras_size,\n batch_size=self.batch_size,\n color_mode=\"grayscale\",\n class_mode=\"sparse\",\n subset=\"training\",\n )\n\n validation_generator = datagen.flow_from_directory(\n data_path,\n target_size=self.follow_keras_size,\n batch_size=self.batch_size,\n color_mode=\"grayscale\",\n class_mode=\"sparse\",\n subset=\"validation\",\n )\n\n self._model.fit(\n train_generator,\n epochs=self.epochs,\n validation_data=validation_generator,\n )\n\n logger.debug(\"train finished\")\n\n def predict(self, pic_path: str, *args, **kwargs) -> str:\n pic_object = toolbox.imread(pic_path)\n # fake VideoFrame for apply_hook\n fake_frame = VideoFrame(0, 0.0, pic_object)\n fake_frame = self._apply_hook(fake_frame, *args, **kwargs)\n return self.predict_with_object(fake_frame.data)\n\n def predict_with_object(self, frame: np.ndarray) -> str:\n # resize for model\n frame = cv2.resize(frame, dsize=self.follow_cv_size)\n frame = np.expand_dims(frame, axis=[0, -1])\n # verbose = 0, 静默Keras分类显示\n result = self._model.predict(frame, verbose=0)\n tag = str(np.argmax(result, axis=1)[0])\n confidence = result.max()\n # logger.debug(f\"confidence: {confidence}\")\n if confidence < self.score_threshold:\n logger.warning(\n f\"max score is lower than {self.score_threshold}, unknown class\"\n )\n return self.UNKNOWN_STAGE_NAME\n return tag\n\n def _classify_frame(self, frame: VideoFrame, *_, **__) -> str:\n return self.predict_with_object(frame.data)" }, { "identifier": "BaseHook", "path": "nexaflow/hook.py", "snippet": "class BaseHook(object):\n\n def __init__(self, *_, **__):\n # logger.debug(f\"start initialing: {self.__class__.__name__} ...\")\n logger.info(f\"加载视频帧处理单元: Frame Processor {self.__class__.__name__} ...\")\n self.result = dict()\n\n def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:\n # info = f\"execute hook: {self.__class__.__name__}\"\n\n frame_id = frame.frame_id\n if frame_id != -1:\n # logger.debug(f\"{info}, frame id: {frame_id}\")\n pass\n return frame" }, { "identifier": "CropHook", "path": "nexaflow/hook.py", "snippet": "class CropHook(_AreaBaseHook):\n\n def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:\n super().do(frame, *_, **__)\n\n height_range, width_range = self.convert_size_and_offset(*frame.data.shape)\n frame.data[: height_range[0], :] = 0\n frame.data[height_range[1]:, :] = 0\n frame.data[:, : width_range[0]] = 0\n frame.data[:, width_range[1]:] = 0\n return frame" }, { "identifier": "OmitHook", "path": "nexaflow/hook.py", "snippet": "class OmitHook(_AreaBaseHook):\n\n def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:\n super().do(frame, *_, **__)\n\n height_range, width_range = self.convert_size_and_offset(*frame.data.shape)\n frame.data[\n height_range[0]: height_range[1], width_range[0]: width_range[1]\n ] = 0\n return frame" }, { "identifier": "FrameSaveHook", "path": "nexaflow/hook.py", "snippet": "class FrameSaveHook(BaseHook):\n\n def __init__(self, target_dir: str, *_, **__):\n super().__init__(*_, **__)\n\n self.target_dir = target_dir\n os.makedirs(target_dir, exist_ok=True)\n # logger.debug(f\"target dir: {target_dir}\")\n\n def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:\n super().do(frame, *_, **__)\n\n safe_timestamp = str(frame.timestamp).replace(\".\", \"_\")\n frame_name = f\"{frame.frame_id}({safe_timestamp}).png\"\n target_path = os.path.join(self.target_dir, frame_name)\n\n # 不能保存中文路径\n # cv2.imwrite(target_path, frame.data)\n # logger.debug(f\"frame saved to {target_path}\")\n\n # 保存中文路径\n cv2.imencode(\".png\", frame.data)[1].tofile(target_path)\n\n return frame" }, { "identifier": "ClassifierResult", "path": "nexaflow/classifier/base.py", "snippet": "class ClassifierResult(object):\n\n LABEL_DATA: str = \"data\"\n LABEL_VIDEO_PATH: str = \"video_path\"\n\n def __init__(self, data: typing.List[SingleClassifierResult]):\n self.video_path: str = data[0].video_path\n self.data: typing.List[SingleClassifierResult] = data\n\n def get_timestamp_list(self) -> typing.List[float]:\n return [each.timestamp for each in self.data]\n\n def get_stage_list(self) -> typing.List[str]:\n return [each.stage for each in self.data]\n\n def get_length(self) -> int:\n return len(self.data)\n\n def get_offset(self) -> float:\n return self.data[1].timestamp - self.data[0].timestamp\n\n def get_ordered_stage_set(self) -> typing.List[str]:\n ret = list()\n for each in self.get_stage_list():\n if not ret:\n ret.append(each)\n continue\n if each == ret[-1]:\n continue\n ret.append(each)\n return ret\n\n def get_stage_set(self) -> typing.Set[str]:\n return set(self.get_stage_list())\n\n def to_dict(\n self,\n ) -> typing.Dict[str, typing.List[typing.List[SingleClassifierResult]]]:\n stage_list = list(self.get_stage_set())\n try:\n int(stage_list[0])\n except ValueError:\n stage_list.sort()\n else:\n stage_list.sort(key=lambda o: int(o))\n\n d = OrderedDict()\n for each_stage in stage_list:\n d[each_stage] = self.get_specific_stage_range(each_stage)\n return d\n\n def contain(self, stage_name: str) -> bool:\n return stage_name in self.get_stage_set()\n\n def first(self, stage_name: str) -> SingleClassifierResult:\n for each in self.data:\n if each.stage == stage_name:\n # logger.debug(f\"first frame of {stage_name}: {each}\")\n return each\n logger.warning(f\"no stage named {stage_name} found\")\n\n def last(self, stage_name: str) -> SingleClassifierResult:\n for each in self.data[::-1]:\n if each.stage == stage_name:\n # logger.debug(f\"last frame of {stage_name}: {each}\")\n return each\n logger.warning(f\"no stage named {stage_name} found\")\n\n def get_stage_range(self) -> typing.List[typing.List[SingleClassifierResult]]:\n result: typing.List[typing.List[SingleClassifierResult]] = []\n\n cur = self.data[0]\n cur_index = cur.frame_id - 1\n ptr = cur_index\n length = self.get_length()\n while ptr < length:\n next_one = self.data[ptr]\n if cur.stage == next_one.stage:\n ptr += 1\n continue\n\n result.append(self.data[cur_index: ptr + 1 - 1] or [self.data[cur_index]])\n cur = next_one\n cur_index = next_one.frame_id - 1\n\n assert len(result) > 0, \"video seems to only contain one stage\"\n\n last_data = self.data[-1]\n last_result = result[-1][-1]\n if last_result != last_data:\n result.append(\n self.data[last_result.frame_id - 1 + 1: last_data.frame_id - 1 + 1]\n or [self.data[last_result.frame_id - 1]]\n )\n # logger.debug(f\"get stage range: {result}\")\n return result\n\n def get_specific_stage_range(\n self, stage_name: str\n ) -> typing.List[typing.List[SingleClassifierResult]]:\n ret = list()\n for each_range in self.get_stage_range():\n cur = each_range[0]\n if cur.stage == stage_name:\n ret.append(each_range)\n return ret\n\n def get_not_stable_stage_range(\n self,\n ) -> typing.List[typing.List[SingleClassifierResult]]:\n unstable = self.get_specific_stage_range(constants.UNSTABLE_FLAG)\n ignore = self.get_specific_stage_range(constants.IGNORE_FLAG)\n return sorted(unstable + ignore, key=lambda x: x[0].stage)\n\n def mark_range(self, start: int, end: int, target_stage: str):\n for each in self.data[start:end]:\n each.stage = target_stage\n # logger.debug(f\"range {start} to {end} has been marked as {target_stage}\")\n\n def mark_range_unstable(self, start: int, end: int):\n self.mark_range(start, end, constants.UNSTABLE_FLAG)\n\n def mark_range_ignore(self, start: int, end: int):\n self.mark_range(start, end, constants.IGNORE_FLAG)\n\n def time_cost_between(self, start_stage: str, end_stage: str) -> float:\n return self.first(end_stage).timestamp - self.last(start_stage).timestamp\n\n def get_important_frame_list(self) -> typing.List[SingleClassifierResult]:\n result = [self.data[0]]\n\n prev = self.data[0]\n for cur in self.data[1:]:\n if cur.stage != prev.stage:\n result.append(prev)\n result.append(cur)\n prev = cur\n\n if result[-1] != self.data[-1]:\n result.append(self.data[-1])\n return result\n\n def calc_changing_cost(\n self,\n ) -> typing.Dict[str, typing.Tuple[SingleClassifierResult, SingleClassifierResult]]:\n\n cost_dict: typing.Dict[\n str, typing.Tuple[SingleClassifierResult, SingleClassifierResult]\n ] = {}\n i = 0\n while i < len(self.data) - 1:\n cur = self.data[i]\n next_one = self.data[i + 1]\n\n if not next_one.is_stable():\n for j in range(i + 1, len(self.data)):\n i = j\n next_one = self.data[j]\n if next_one.is_stable():\n break\n\n changing_name = f\"from {cur.stage} to {next_one.stage}\"\n cost_dict[changing_name] = (cur, next_one)\n else:\n i += 1\n return cost_dict\n\n def dumps(self) -> str:\n\n def _handler(obj: object):\n if isinstance(obj, np.ndarray):\n return \"<np.ndarray object>\"\n return obj.__dict__\n\n return json.dumps(self, sort_keys=True, default=_handler)\n\n def dump(self, json_path: str, **kwargs):\n logger.debug(f\"dump result to {json_path}\")\n assert not os.path.isfile(json_path), f\"{json_path} already existed\"\n with open(json_path, \"w+\", **kwargs) as f:\n f.write(self.dumps())\n\n @classmethod\n def load(cls, from_file: str) -> \"ClassifierResult\":\n assert os.path.isfile(from_file), f\"file {from_file} not existed\"\n with open(from_file, encoding=constants.CHARSET) as f:\n content = json.load(f)\n\n data = content[cls.LABEL_DATA]\n return ClassifierResult([SingleClassifierResult(**each) for each in data])\n\n def diff(self, another: \"ClassifierResult\") -> DiffResult:\n return DiffResult(self, another)\n\n def is_order_correct(self, should_be: typing.List[str]) -> bool:\n cur = self.get_ordered_stage_set()\n len_cur, len_should_be = len(cur), len(should_be)\n if len_cur == len_should_be:\n return cur == should_be\n if len_cur < len_should_be:\n return False\n\n ptr_should, ptr_cur = 0, 0\n while ptr_cur < len_cur:\n if cur[ptr_cur] == should_be[ptr_should]:\n ptr_should += 1\n ptr_cur += 1\n if ptr_should == len_should_be:\n return True\n return False\n\n get_frame_length = get_offset" }, { "identifier": "SingleClassifierResult", "path": "nexaflow/classifier/base.py", "snippet": "class SingleClassifierResult(object):\n\n def __init__(\n self,\n video_path: str,\n frame_id: int,\n timestamp: float,\n stage: str,\n data: np.ndarray = None,\n ):\n self.video_path: str = video_path\n self.frame_id: int = frame_id\n self.timestamp: float = timestamp\n self.stage: str = stage\n self.data: np.ndarray = data\n\n def to_video_frame(self, *args, **kwargs) -> VideoFrame:\n if self.data is not None:\n return VideoFrame(self.frame_id, self.timestamp, self.data)\n\n with toolbox.video_capture(self.video_path) as cap:\n frame = toolbox.get_frame(cap, self.frame_id)\n compressed = toolbox.compress_frame(frame, *args, **kwargs)\n return VideoFrame(self.frame_id, self.timestamp, compressed)\n\n def get_data(self) -> np.ndarray:\n return self.to_video_frame().data\n\n def is_stable(self) -> bool:\n return self.stage not in (\n constants.UNSTABLE_FLAG,\n constants.IGNORE_FLAG,\n constants.UNKNOWN_STAGE_FLAG,\n )\n\n def contain_image(\n self, *, image_path: str = None, image_object: np.ndarray = None, **kwargs\n ) -> typing.Dict[str, typing.Any]:\n return self.to_video_frame().contain_image(\n image_path=image_path, image_object=image_object, **kwargs\n )\n\n def to_dict(self) -> typing.Dict:\n return self.__dict__\n\n def __str__(self):\n return f\"<ClassifierResult stage={self.stage} frame_id={self.frame_id} timestamp={self.timestamp}>\"\n\n __repr__ = __str__" } ]
import os import cv2 import time import random import asyncio from loguru import logger from typing import List, Union, Optional from concurrent.futures import ThreadPoolExecutor from nexaflow import toolbox from nexaflow.skills.report import Report from nexaflow.skills.record import Record from nexaflow.skills.player import Player from nexaflow.skills.switch import Switch from nexaflow.cutter.cutter import VideoCutter from nexaflow.video import VideoObject, Frame from nexaflow.classifier.keras_classifier import KerasClassifier from nexaflow.hook import BaseHook, CropHook, OmitHook, FrameSaveHook from nexaflow.classifier.base import ClassifierResult, SingleClassifierResult
16,763
class Alynex(object): target_size: tuple = (350, 700) fps: int = 60 step: int = 1 block: int = 6 threshold: Union[int | float] = 0.97 offset: int = 3 compress_rate: float = 0.5 window_size: int = 1 window_coefficient: int = 2 kc: KerasClassifier = KerasClassifier( target_size=target_size, data_size=target_size ) def __init__(self): self.__report: Optional[Report] = None self.__record: Optional[Record] = Record() self.__player: Optional[Player] = Player() self.__ffmpeg: Optional[Switch] = Switch() self.__filmer: Optional[Alynex._Filmer] = Alynex._Filmer() self.__framix: Optional[Alynex._Framix] = None def __str__(self): return (f""" <Alynex for NexaFlow Target Size: {self.target_size} Fps: {self.fps} Step: {self.step} Block: {self.block} Threshold: {self.threshold} Offset: {self.offset} Compress Rate: {self.compress_rate} Window Size: {self.window_size} Window Coefficient: {self.window_coefficient} > """) __repr__ = __str__ def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass @property def report(self) -> "Report": assert self.__report, f"{self.activate.__name__} first ..." return self.__report @property def record(self) -> "Record": return self.__record @property def player(self) -> "Player": return self.__player @property def ffmpeg(self) -> "Switch": return self.__ffmpeg @property def filmer(self) -> "Alynex._Filmer": return self.__filmer @property def framix(self) -> "Alynex._Framix": assert self.__framix, f"{self.activate.__name__} first ..." return self.__framix @staticmethod def only_video(folder: str) -> List: class Entry(object): def __init__(self, title: str, place: str, sheet: list): self.title = title self.place = place self.sheet = sheet return [ Entry( os.path.basename(root), root, [os.path.join(root, f) for f in sorted(file)] ) for root, _, file in os.walk(folder) if file ] def activate(self, models: str, total_path: str): if not self.__report: self.__report = Report(total_path) self.__framix = Alynex._Framix(self.report) Alynex.kc.load_model(models) class _Filmer(object): @staticmethod def train_model(video_file: str) -> None: model_path = os.path.join( os.path.dirname(video_file), f"Model_{time.strftime('%Y%m%d%H%M%S')}_{os.getpid()}" ) if not os.path.exists(model_path): os.makedirs(model_path, exist_ok=True) # 将视频切分成帧
class Alynex(object): target_size: tuple = (350, 700) fps: int = 60 step: int = 1 block: int = 6 threshold: Union[int | float] = 0.97 offset: int = 3 compress_rate: float = 0.5 window_size: int = 1 window_coefficient: int = 2 kc: KerasClassifier = KerasClassifier( target_size=target_size, data_size=target_size ) def __init__(self): self.__report: Optional[Report] = None self.__record: Optional[Record] = Record() self.__player: Optional[Player] = Player() self.__ffmpeg: Optional[Switch] = Switch() self.__filmer: Optional[Alynex._Filmer] = Alynex._Filmer() self.__framix: Optional[Alynex._Framix] = None def __str__(self): return (f""" <Alynex for NexaFlow Target Size: {self.target_size} Fps: {self.fps} Step: {self.step} Block: {self.block} Threshold: {self.threshold} Offset: {self.offset} Compress Rate: {self.compress_rate} Window Size: {self.window_size} Window Coefficient: {self.window_coefficient} > """) __repr__ = __str__ def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass @property def report(self) -> "Report": assert self.__report, f"{self.activate.__name__} first ..." return self.__report @property def record(self) -> "Record": return self.__record @property def player(self) -> "Player": return self.__player @property def ffmpeg(self) -> "Switch": return self.__ffmpeg @property def filmer(self) -> "Alynex._Filmer": return self.__filmer @property def framix(self) -> "Alynex._Framix": assert self.__framix, f"{self.activate.__name__} first ..." return self.__framix @staticmethod def only_video(folder: str) -> List: class Entry(object): def __init__(self, title: str, place: str, sheet: list): self.title = title self.place = place self.sheet = sheet return [ Entry( os.path.basename(root), root, [os.path.join(root, f) for f in sorted(file)] ) for root, _, file in os.walk(folder) if file ] def activate(self, models: str, total_path: str): if not self.__report: self.__report = Report(total_path) self.__framix = Alynex._Framix(self.report) Alynex.kc.load_model(models) class _Filmer(object): @staticmethod def train_model(video_file: str) -> None: model_path = os.path.join( os.path.dirname(video_file), f"Model_{time.strftime('%Y%m%d%H%M%S')}_{os.getpid()}" ) if not os.path.exists(model_path): os.makedirs(model_path, exist_ok=True) # 将视频切分成帧
video = VideoObject(video_file, fps=Alynex.fps)
6
2023-11-13 05:27:34+00:00
24k
microsoft/SoM
task_adapter/semantic_sam/tasks/inference_semsam_m2m_auto.py
[ { "identifier": "Visualizer", "path": "task_adapter/utils/visualizer.py", "snippet": "class Visualizer:\n \"\"\"\n Visualizer that draws data about detection/segmentation on images.\n\n It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}`\n that draw primitive objects to images, as well as high-level wrappers like\n `draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}`\n that draw composite data in some pre-defined style.\n\n Note that the exact visualization style for the high-level wrappers are subject to change.\n Style such as color, opacity, label contents, visibility of labels, or even the visibility\n of objects themselves (e.g. when the object is too small) may change according\n to different heuristics, as long as the results still look visually reasonable.\n\n To obtain a consistent style, you can implement custom drawing functions with the\n abovementioned primitive methods instead. If you need more customized visualization\n styles, you can process the data yourself following their format documented in\n tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not\n intend to satisfy everyone's preference on drawing styles.\n\n This visualizer focuses on high rendering quality rather than performance. It is not\n designed to be used for real-time applications.\n \"\"\"\n\n # TODO implement a fast, rasterized version using OpenCV\n\n def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE):\n \"\"\"\n Args:\n img_rgb: a numpy array of shape (H, W, C), where H and W correspond to\n the height and width of the image respectively. C is the number of\n color channels. The image is required to be in RGB format since that\n is a requirement of the Matplotlib library. The image is also expected\n to be in the range [0, 255].\n metadata (Metadata): dataset metadata (e.g. class names and colors)\n instance_mode (ColorMode): defines one of the pre-defined style for drawing\n instances on an image.\n \"\"\"\n self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)\n if metadata is None:\n metadata = MetadataCatalog.get(\"__nonexist__\")\n self.metadata = metadata\n self.output = VisImage(self.img, scale=scale)\n self.cpu_device = torch.device(\"cpu\")\n\n # too small texts are useless, therefore clamp to 9\n self._default_font_size = max(\n np.sqrt(self.output.height * self.output.width) // 90, 10 // scale\n )\n self._default_font_size = 18\n self._instance_mode = instance_mode\n self.keypoint_threshold = _KEYPOINT_THRESHOLD\n\n import matplotlib.colors as mcolors\n css4_colors = mcolors.CSS4_COLORS\n self.color_proposals = [list(mcolors.hex2color(color)) for color in css4_colors.values()]\n\n def draw_instance_predictions(self, predictions):\n \"\"\"\n Draw instance-level prediction results on an image.\n\n Args:\n predictions (Instances): the output of an instance detection/segmentation\n model. Following fields will be used to draw:\n \"pred_boxes\", \"pred_classes\", \"scores\", \"pred_masks\" (or \"pred_masks_rle\").\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n boxes = predictions.pred_boxes if predictions.has(\"pred_boxes\") else None\n scores = predictions.scores if predictions.has(\"scores\") else None\n classes = predictions.pred_classes.tolist() if predictions.has(\"pred_classes\") else None\n labels = _create_text_labels(classes, scores, self.metadata.get(\"thing_classes\", None))\n keypoints = predictions.pred_keypoints if predictions.has(\"pred_keypoints\") else None\n\n keep = (scores > 0.5).cpu()\n boxes = boxes[keep]\n scores = scores[keep]\n classes = np.array(classes)\n classes = classes[np.array(keep)]\n labels = np.array(labels)\n labels = labels[np.array(keep)]\n\n if predictions.has(\"pred_masks\"):\n masks = np.asarray(predictions.pred_masks)\n masks = masks[np.array(keep)]\n masks = [GenericMask(x, self.output.height, self.output.width) for x in masks]\n else:\n masks = None\n\n if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get(\"thing_colors\"):\n # if self.metadata.get(\"thing_colors\"):\n colors = [\n self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes\n ]\n alpha = 0.4\n else:\n colors = None\n alpha = 0.4\n\n if self._instance_mode == ColorMode.IMAGE_BW:\n self.output.reset_image(\n self._create_grayscale_image(\n (predictions.pred_masks.any(dim=0) > 0).numpy()\n if predictions.has(\"pred_masks\")\n else None\n )\n )\n alpha = 0.3\n \n self.overlay_instances(\n masks=masks,\n boxes=boxes,\n labels=labels,\n keypoints=keypoints,\n assigned_colors=colors,\n alpha=alpha,\n )\n return self.output\n\n def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.7):\n \"\"\"\n Draw semantic segmentation predictions/labels.\n\n Args:\n sem_seg (Tensor or ndarray): the segmentation of shape (H, W).\n Each value is the integer label of the pixel.\n area_threshold (int): segments with less than `area_threshold` are not drawn.\n alpha (float): the larger it is, the more opaque the segmentations are.\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n if isinstance(sem_seg, torch.Tensor):\n sem_seg = sem_seg.numpy()\n labels, areas = np.unique(sem_seg, return_counts=True)\n sorted_idxs = np.argsort(-areas).tolist()\n labels = labels[sorted_idxs]\n for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels):\n try:\n mask_color = [x / 255 for x in self.metadata.stuff_colors[label]]\n except (AttributeError, IndexError):\n mask_color = None\n\n binary_mask = (sem_seg == label).astype(np.uint8)\n text = self.metadata.stuff_classes[label]\n self.draw_binary_mask(\n binary_mask,\n color=mask_color,\n edge_color=_OFF_WHITE,\n text=text,\n alpha=alpha,\n area_threshold=area_threshold,\n )\n return self.output\n\n def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7):\n \"\"\"\n Draw panoptic prediction annotations or results.\n\n Args:\n panoptic_seg (Tensor): of shape (height, width) where the values are ids for each\n segment.\n segments_info (list[dict] or None): Describe each segment in `panoptic_seg`.\n If it is a ``list[dict]``, each dict contains keys \"id\", \"category_id\".\n If None, category id of each pixel is computed by\n ``pixel // metadata.label_divisor``.\n area_threshold (int): stuff segments with less than `area_threshold` are not drawn.\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata)\n\n if self._instance_mode == ColorMode.IMAGE_BW:\n self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask()))\n\n # draw mask for all semantic segments first i.e. \"stuff\"\n for mask, sinfo in pred.semantic_masks():\n category_idx = sinfo[\"category_id\"]\n try:\n mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]]\n except AttributeError:\n mask_color = None\n\n text = self.metadata.stuff_classes[category_idx].replace('-other','').replace('-merged','')\n self.draw_binary_mask(\n mask,\n color=mask_color,\n edge_color=_OFF_WHITE,\n text=text,\n alpha=alpha,\n area_threshold=area_threshold,\n )\n\n # draw mask for all instances second\n all_instances = list(pred.instance_masks())\n if len(all_instances) == 0:\n return self.output\n masks, sinfo = list(zip(*all_instances))\n category_ids = [x[\"category_id\"] for x in sinfo]\n\n try:\n scores = [x[\"score\"] for x in sinfo]\n except KeyError:\n scores = None\n class_names = [name.replace('-other','').replace('-merged','') for name in self.metadata.thing_classes]\n labels = _create_text_labels(\n category_ids, scores, class_names, [x.get(\"iscrowd\", 0) for x in sinfo]\n )\n\n try:\n colors = [\n self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids\n ]\n except AttributeError:\n colors = None\n self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha)\n\n return self.output\n\n draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility\n\n def draw_dataset_dict(self, dic):\n \"\"\"\n Draw annotations/segmentaions in Detectron2 Dataset format.\n\n Args:\n dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format.\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n annos = dic.get(\"annotations\", None)\n if annos:\n if \"segmentation\" in annos[0]:\n masks = [x[\"segmentation\"] for x in annos]\n else:\n masks = None\n if \"keypoints\" in annos[0]:\n keypts = [x[\"keypoints\"] for x in annos]\n keypts = np.array(keypts).reshape(len(annos), -1, 3)\n else:\n keypts = None\n\n boxes = [\n BoxMode.convert(x[\"bbox\"], x[\"bbox_mode\"], BoxMode.XYXY_ABS)\n if len(x[\"bbox\"]) == 4\n else x[\"bbox\"]\n for x in annos\n ]\n\n colors = None\n category_ids = [x[\"category_id\"] for x in annos]\n if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get(\"thing_colors\"):\n colors = [\n self._jitter([x / 255 for x in self.metadata.thing_colors[c]])\n for c in category_ids\n ]\n names = self.metadata.get(\"thing_classes\", None)\n labels = _create_text_labels(\n category_ids,\n scores=None,\n class_names=names,\n is_crowd=[x.get(\"iscrowd\", 0) for x in annos],\n )\n self.overlay_instances(\n labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors\n )\n\n sem_seg = dic.get(\"sem_seg\", None)\n if sem_seg is None and \"sem_seg_file_name\" in dic:\n with PathManager.open(dic[\"sem_seg_file_name\"], \"rb\") as f:\n sem_seg = Image.open(f)\n sem_seg = np.asarray(sem_seg, dtype=\"uint8\")\n if sem_seg is not None:\n self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.4)\n\n pan_seg = dic.get(\"pan_seg\", None)\n if pan_seg is None and \"pan_seg_file_name\" in dic:\n with PathManager.open(dic[\"pan_seg_file_name\"], \"rb\") as f:\n pan_seg = Image.open(f)\n pan_seg = np.asarray(pan_seg)\n from panopticapi.utils import rgb2id\n\n pan_seg = rgb2id(pan_seg)\n if pan_seg is not None:\n segments_info = dic[\"segments_info\"]\n pan_seg = torch.tensor(pan_seg)\n self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.7)\n return self.output\n\n def overlay_instances(\n self,\n *,\n boxes=None,\n labels=None,\n masks=None,\n keypoints=None,\n assigned_colors=None,\n alpha=0.5,\n ):\n \"\"\"\n Args:\n boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,\n or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,\n or a :class:`RotatedBoxes`,\n or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format\n for the N objects in a single image,\n labels (list[str]): the text to be displayed for each instance.\n masks (masks-like object): Supported types are:\n\n * :class:`detectron2.structures.PolygonMasks`,\n :class:`detectron2.structures.BitMasks`.\n * list[list[ndarray]]: contains the segmentation masks for all objects in one image.\n The first level of the list corresponds to individual instances. The second\n level to all the polygon that compose the instance, and the third level\n to the polygon coordinates. The third level should have the format of\n [x0, y0, x1, y1, ..., xn, yn] (n >= 3).\n * list[ndarray]: each ndarray is a binary mask of shape (H, W).\n * list[dict]: each dict is a COCO-style RLE.\n keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),\n where the N is the number of instances and K is the number of keypoints.\n The last dimension corresponds to (x, y, visibility or score).\n assigned_colors (list[matplotlib.colors]): a list of colors, where each color\n corresponds to each mask or box in the image. Refer to 'matplotlib.colors'\n for full list of formats that the colors are accepted in.\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n num_instances = 0\n if boxes is not None:\n boxes = self._convert_boxes(boxes)\n num_instances = len(boxes)\n if masks is not None:\n masks = self._convert_masks(masks)\n if num_instances:\n assert len(masks) == num_instances\n else:\n num_instances = len(masks)\n if keypoints is not None:\n if num_instances:\n assert len(keypoints) == num_instances\n else:\n num_instances = len(keypoints)\n keypoints = self._convert_keypoints(keypoints)\n if labels is not None:\n assert len(labels) == num_instances\n if assigned_colors is None:\n assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]\n if num_instances == 0:\n return self.output\n if boxes is not None and boxes.shape[1] == 5:\n return self.overlay_rotated_instances(\n boxes=boxes, labels=labels, assigned_colors=assigned_colors\n )\n\n # Display in largest to smallest order to reduce occlusion.\n areas = None\n if boxes is not None:\n areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)\n elif masks is not None:\n areas = np.asarray([x.area() for x in masks])\n\n if areas is not None:\n sorted_idxs = np.argsort(-areas).tolist()\n # Re-order overlapped instances in descending order.\n boxes = boxes[sorted_idxs] if boxes is not None else None\n labels = [labels[k] for k in sorted_idxs] if labels is not None else None\n masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None\n assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]\n keypoints = keypoints[sorted_idxs] if keypoints is not None else None\n\n for i in range(num_instances):\n color = assigned_colors[i]\n if boxes is not None:\n self.draw_box(boxes[i], edge_color=color)\n\n if masks is not None:\n for segment in masks[i].polygons:\n self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)\n\n if labels is not None:\n # first get a box\n if boxes is not None:\n x0, y0, x1, y1 = boxes[i]\n text_pos = (x0, y0) # if drawing boxes, put text on the box corner.\n horiz_align = \"left\"\n elif masks is not None:\n # skip small mask without polygon\n if len(masks[i].polygons) == 0:\n continue\n\n x0, y0, x1, y1 = masks[i].bbox()\n\n # draw text in the center (defined by median) when box is not drawn\n # median is less sensitive to outliers.\n text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]\n horiz_align = \"center\"\n else:\n continue # drawing the box confidence for keypoints isn't very useful.\n # for small objects, draw text at the side to avoid occlusion\n instance_area = (y1 - y0) * (x1 - x0)\n if (\n instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale\n or y1 - y0 < 40 * self.output.scale\n ):\n if y1 >= self.output.height - 5:\n text_pos = (x1, y0)\n else:\n text_pos = (x0, y1)\n\n height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)\n lighter_color = self._change_color_brightness(color, brightness_factor=0.7)\n font_size = (\n np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)\n * 0.5\n * self._default_font_size\n )\n self.draw_text(\n labels[i],\n text_pos,\n color=lighter_color,\n horizontal_alignment=horiz_align,\n font_size=font_size,\n )\n\n # draw keypoints\n if keypoints is not None:\n for keypoints_per_instance in keypoints:\n self.draw_and_connect_keypoints(keypoints_per_instance)\n\n return self.output\n\n def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None):\n \"\"\"\n Args:\n boxes (ndarray): an Nx5 numpy array of\n (x_center, y_center, width, height, angle_degrees) format\n for the N objects in a single image.\n labels (list[str]): the text to be displayed for each instance.\n assigned_colors (list[matplotlib.colors]): a list of colors, where each color\n corresponds to each mask or box in the image. Refer to 'matplotlib.colors'\n for full list of formats that the colors are accepted in.\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n num_instances = len(boxes)\n\n if assigned_colors is None:\n assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]\n if num_instances == 0:\n return self.output\n\n # Display in largest to smallest order to reduce occlusion.\n if boxes is not None:\n areas = boxes[:, 2] * boxes[:, 3]\n\n sorted_idxs = np.argsort(-areas).tolist()\n # Re-order overlapped instances in descending order.\n boxes = boxes[sorted_idxs]\n labels = [labels[k] for k in sorted_idxs] if labels is not None else None\n colors = [assigned_colors[idx] for idx in sorted_idxs]\n\n for i in range(num_instances):\n self.draw_rotated_box_with_label(\n boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None\n )\n\n return self.output\n\n def draw_and_connect_keypoints(self, keypoints):\n \"\"\"\n Draws keypoints of an instance and follows the rules for keypoint connections\n to draw lines between appropriate keypoints. This follows color heuristics for\n line color.\n\n Args:\n keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints\n and the last dimension corresponds to (x, y, probability).\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n visible = {}\n keypoint_names = self.metadata.get(\"keypoint_names\")\n for idx, keypoint in enumerate(keypoints):\n\n # draw keypoint\n x, y, prob = keypoint\n if prob > self.keypoint_threshold:\n self.draw_circle((x, y), color=_RED)\n if keypoint_names:\n keypoint_name = keypoint_names[idx]\n visible[keypoint_name] = (x, y)\n\n if self.metadata.get(\"keypoint_connection_rules\"):\n for kp0, kp1, color in self.metadata.keypoint_connection_rules:\n if kp0 in visible and kp1 in visible:\n x0, y0 = visible[kp0]\n x1, y1 = visible[kp1]\n color = tuple(x / 255.0 for x in color)\n self.draw_line([x0, x1], [y0, y1], color=color)\n\n # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip\n # Note that this strategy is specific to person keypoints.\n # For other keypoints, it should just do nothing\n try:\n ls_x, ls_y = visible[\"left_shoulder\"]\n rs_x, rs_y = visible[\"right_shoulder\"]\n mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2\n except KeyError:\n pass\n else:\n # draw line from nose to mid-shoulder\n nose_x, nose_y = visible.get(\"nose\", (None, None))\n if nose_x is not None:\n self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED)\n\n try:\n # draw line from mid-shoulder to mid-hip\n lh_x, lh_y = visible[\"left_hip\"]\n rh_x, rh_y = visible[\"right_hip\"]\n except KeyError:\n pass\n else:\n mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2\n self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED)\n return self.output\n\n \"\"\"\n Primitive drawing functions:\n \"\"\"\n\n def draw_text(\n self,\n text,\n position,\n *,\n font_size=None,\n color=\"g\",\n horizontal_alignment=\"center\",\n rotation=0,\n ):\n \"\"\"\n Args:\n text (str): class label\n position (tuple): a tuple of the x and y coordinates to place text on image.\n font_size (int, optional): font of the text. If not provided, a font size\n proportional to the image width is calculated and used.\n color: color of the text. Refer to `matplotlib.colors` for full list\n of formats that are accepted.\n horizontal_alignment (str): see `matplotlib.text.Text`\n rotation: rotation angle in degrees CCW\n\n Returns:\n output (VisImage): image object with text drawn.\n \"\"\"\n if not font_size:\n font_size = self._default_font_size\n\n # since the text background is dark, we don't want the text to be dark\n color = np.maximum(list(mplc.to_rgb(color)), 0.15)\n color[np.argmax(color)] = max(0.8, np.max(color))\n\n def contrasting_color(rgb):\n \"\"\"Returns 'white' or 'black' depending on which color contrasts more with the given RGB value.\"\"\"\n \n # Decompose the RGB tuple\n R, G, B = rgb\n\n # Calculate the Y value\n Y = 0.299 * R + 0.587 * G + 0.114 * B\n\n # If Y value is greater than 128, it's closer to white so return black. Otherwise, return white.\n return 'black' if Y > 128 else 'white'\n\n bbox_background = contrasting_color(color*255)\n\n x, y = position\n self.output.ax.text(\n x,\n y,\n text,\n size=font_size * self.output.scale,\n family=\"sans-serif\",\n bbox={\"facecolor\": bbox_background, \"alpha\": 0.8, \"pad\": 0.7, \"edgecolor\": \"none\"},\n verticalalignment=\"top\",\n horizontalalignment=horizontal_alignment,\n color=color,\n zorder=10,\n rotation=rotation,\n )\n return self.output\n\n def draw_box(self, box_coord, alpha=0.5, edge_color=\"g\", line_style=\"-\"):\n \"\"\"\n Args:\n box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0\n are the coordinates of the image's top left corner. x1 and y1 are the\n coordinates of the image's bottom right corner.\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n edge_color: color of the outline of the box. Refer to `matplotlib.colors`\n for full list of formats that are accepted.\n line_style (string): the string to use to create the outline of the boxes.\n\n Returns:\n output (VisImage): image object with box drawn.\n \"\"\"\n x0, y0, x1, y1 = box_coord\n width = x1 - x0\n height = y1 - y0\n\n linewidth = max(self._default_font_size / 12, 1)\n\n self.output.ax.add_patch(\n mpl.patches.Rectangle(\n (x0, y0),\n width,\n height,\n fill=False,\n edgecolor=edge_color,\n linewidth=linewidth * self.output.scale,\n alpha=alpha,\n linestyle=line_style,\n )\n )\n return self.output\n\n def draw_rotated_box_with_label(\n self, rotated_box, alpha=0.5, edge_color=\"g\", line_style=\"-\", label=None\n ):\n \"\"\"\n Draw a rotated box with label on its top-left corner.\n\n Args:\n rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle),\n where cnt_x and cnt_y are the center coordinates of the box.\n w and h are the width and height of the box. angle represents how\n many degrees the box is rotated CCW with regard to the 0-degree box.\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n edge_color: color of the outline of the box. Refer to `matplotlib.colors`\n for full list of formats that are accepted.\n line_style (string): the string to use to create the outline of the boxes.\n label (string): label for rotated box. It will not be rendered when set to None.\n\n Returns:\n output (VisImage): image object with box drawn.\n \"\"\"\n cnt_x, cnt_y, w, h, angle = rotated_box\n area = w * h\n # use thinner lines when the box is small\n linewidth = self._default_font_size / (\n 6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3\n )\n\n theta = angle * math.pi / 180.0\n c = math.cos(theta)\n s = math.sin(theta)\n rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)]\n # x: left->right ; y: top->down\n rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect]\n for k in range(4):\n j = (k + 1) % 4\n self.draw_line(\n [rotated_rect[k][0], rotated_rect[j][0]],\n [rotated_rect[k][1], rotated_rect[j][1]],\n color=edge_color,\n linestyle=\"--\" if k == 1 else line_style,\n linewidth=linewidth,\n )\n\n if label is not None:\n text_pos = rotated_rect[1] # topleft corner\n\n height_ratio = h / np.sqrt(self.output.height * self.output.width)\n label_color = self._change_color_brightness(edge_color, brightness_factor=0.7)\n font_size = (\n np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size\n )\n self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle)\n\n return self.output\n\n def draw_circle(self, circle_coord, color, radius=3):\n \"\"\"\n Args:\n circle_coord (list(int) or tuple(int)): contains the x and y coordinates\n of the center of the circle.\n color: color of the polygon. Refer to `matplotlib.colors` for a full list of\n formats that are accepted.\n radius (int): radius of the circle.\n\n Returns:\n output (VisImage): image object with box drawn.\n \"\"\"\n x, y = circle_coord\n self.output.ax.add_patch(\n mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color)\n )\n return self.output\n\n def draw_line(self, x_data, y_data, color, linestyle=\"-\", linewidth=None):\n \"\"\"\n Args:\n x_data (list[int]): a list containing x values of all the points being drawn.\n Length of list should match the length of y_data.\n y_data (list[int]): a list containing y values of all the points being drawn.\n Length of list should match the length of x_data.\n color: color of the line. Refer to `matplotlib.colors` for a full list of\n formats that are accepted.\n linestyle: style of the line. Refer to `matplotlib.lines.Line2D`\n for a full list of formats that are accepted.\n linewidth (float or None): width of the line. When it's None,\n a default value will be computed and used.\n\n Returns:\n output (VisImage): image object with line drawn.\n \"\"\"\n if linewidth is None:\n linewidth = self._default_font_size / 3\n linewidth = max(linewidth, 1)\n self.output.ax.add_line(\n mpl.lines.Line2D(\n x_data,\n y_data,\n linewidth=linewidth * self.output.scale,\n color=color,\n linestyle=linestyle,\n )\n )\n return self.output\n\n def draw_binary_mask(\n self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.7, area_threshold=10\n ):\n \"\"\"\n Args:\n binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and\n W is the image width. Each value in the array is either a 0 or 1 value of uint8\n type.\n color: color of the mask. Refer to `matplotlib.colors` for a full list of\n formats that are accepted. If None, will pick a random color.\n edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a\n full list of formats that are accepted.\n text (str): if None, will be drawn on the object\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n area_threshold (float): a connected component smaller than this area will not be shown.\n\n Returns:\n output (VisImage): image object with mask drawn.\n \"\"\"\n if color is None:\n color = random_color(rgb=True, maximum=1)\n color = mplc.to_rgb(color)\n\n has_valid_segment = False\n binary_mask = binary_mask.astype(\"uint8\") # opencv needs uint8\n mask = GenericMask(binary_mask, self.output.height, self.output.width)\n shape2d = (binary_mask.shape[0], binary_mask.shape[1])\n\n if not mask.has_holes:\n # draw polygons for regular masks\n for segment in mask.polygons:\n area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))\n if area < (area_threshold or 0):\n continue\n has_valid_segment = True\n segment = segment.reshape(-1, 2)\n self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)\n else:\n # TODO: Use Path/PathPatch to draw vector graphics:\n # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon\n rgba = np.zeros(shape2d + (4,), dtype=\"float32\")\n rgba[:, :, :3] = color\n rgba[:, :, 3] = (mask.mask == 1).astype(\"float32\") * alpha\n has_valid_segment = True\n self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))\n\n if text is not None and has_valid_segment:\n lighter_color = self._change_color_brightness(color, brightness_factor=0.7)\n self._draw_text_in_mask(binary_mask, text, lighter_color)\n return self.output\n \n def draw_binary_mask_with_number(\n self, binary_mask, color=None, *, edge_color=None, text=None, label_mode='1', alpha=0.1, anno_mode=['Mask'], area_threshold=10\n ):\n \"\"\"\n Args:\n binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and\n W is the image width. Each value in the array is either a 0 or 1 value of uint8\n type.\n color: color of the mask. Refer to `matplotlib.colors` for a full list of\n formats that are accepted. If None, will pick a random color.\n edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a\n full list of formats that are accepted.\n text (str): if None, will be drawn on the object\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n area_threshold (float): a connected component smaller than this area will not be shown.\n\n Returns:\n output (VisImage): image object with mask drawn.\n \"\"\"\n if color is None:\n randint = random.randint(0, len(self.color_proposals)-1)\n color = self.color_proposals[randint]\n color = mplc.to_rgb(color)\n\n has_valid_segment = True\n binary_mask = binary_mask.astype(\"uint8\") # opencv needs uint8\n mask = GenericMask(binary_mask, self.output.height, self.output.width)\n shape2d = (binary_mask.shape[0], binary_mask.shape[1])\n bbox = mask.bbox()\n\n if 'Mask' in anno_mode:\n if not mask.has_holes:\n # draw polygons for regular masks\n for segment in mask.polygons:\n area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))\n if area < (area_threshold or 0):\n continue\n has_valid_segment = True\n segment = segment.reshape(-1, 2)\n self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)\n else:\n # TODO: Use Path/PathPatch to draw vector graphics:\n # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon\n rgba = np.zeros(shape2d + (4,), dtype=\"float32\")\n rgba[:, :, :3] = color\n rgba[:, :, 3] = (mask.mask == 1).astype(\"float32\") * alpha\n has_valid_segment = True\n self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))\n\n if 'Box' in anno_mode:\n self.draw_box(bbox, edge_color=color, alpha=0.75)\n\n if 'Mark' in anno_mode:\n has_valid_segment = True\n else:\n has_valid_segment = False\n\n if text is not None and has_valid_segment:\n # lighter_color = tuple([x*0.2 for x in color])\n lighter_color = [1,1,1] # self._change_color_brightness(color, brightness_factor=0.7)\n self._draw_number_in_mask(binary_mask, text, lighter_color, label_mode)\n return self.output\n\n def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5):\n \"\"\"\n Args:\n soft_mask (ndarray): float array of shape (H, W), each value in [0, 1].\n color: color of the mask. Refer to `matplotlib.colors` for a full list of\n formats that are accepted. If None, will pick a random color.\n text (str): if None, will be drawn on the object\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n\n Returns:\n output (VisImage): image object with mask drawn.\n \"\"\"\n if color is None:\n color = random_color(rgb=True, maximum=1)\n color = mplc.to_rgb(color)\n\n shape2d = (soft_mask.shape[0], soft_mask.shape[1])\n rgba = np.zeros(shape2d + (4,), dtype=\"float32\")\n rgba[:, :, :3] = color\n rgba[:, :, 3] = soft_mask * alpha\n self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))\n\n if text is not None:\n lighter_color = self._change_color_brightness(color, brightness_factor=0.7)\n binary_mask = (soft_mask > 0.5).astype(\"uint8\")\n self._draw_text_in_mask(binary_mask, text, lighter_color)\n return self.output\n\n def draw_polygon(self, segment, color, edge_color=None, alpha=0.5):\n \"\"\"\n Args:\n segment: numpy array of shape Nx2, containing all the points in the polygon.\n color: color of the polygon. Refer to `matplotlib.colors` for a full list of\n formats that are accepted.\n edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a\n full list of formats that are accepted. If not provided, a darker shade\n of the polygon color will be used instead.\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n\n Returns:\n output (VisImage): image object with polygon drawn.\n \"\"\"\n if edge_color is None:\n # make edge color darker than the polygon color\n if alpha > 0.8:\n edge_color = self._change_color_brightness(color, brightness_factor=-0.7)\n else:\n edge_color = color\n edge_color = mplc.to_rgb(edge_color) + (1,)\n\n polygon = mpl.patches.Polygon(\n segment,\n fill=True,\n facecolor=mplc.to_rgb(color) + (alpha,),\n edgecolor=edge_color,\n linewidth=max(self._default_font_size // 15 * self.output.scale, 1),\n )\n self.output.ax.add_patch(polygon)\n return self.output\n\n \"\"\"\n Internal methods:\n \"\"\"\n\n def _jitter(self, color):\n \"\"\"\n Randomly modifies given color to produce a slightly different color than the color given.\n\n Args:\n color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color\n picked. The values in the list are in the [0.0, 1.0] range.\n\n Returns:\n jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the\n color after being jittered. The values in the list are in the [0.0, 1.0] range.\n \"\"\"\n color = mplc.to_rgb(color)\n # np.random.seed(0)\n vec = np.random.rand(3)\n # better to do it in another color space\n vec = vec / np.linalg.norm(vec) * 0.5\n res = np.clip(vec + color, 0, 1)\n return tuple(res)\n\n def _create_grayscale_image(self, mask=None):\n \"\"\"\n Create a grayscale version of the original image.\n The colors in masked area, if given, will be kept.\n \"\"\"\n img_bw = self.img.astype(\"f4\").mean(axis=2)\n img_bw = np.stack([img_bw] * 3, axis=2)\n if mask is not None:\n img_bw[mask] = self.img[mask]\n return img_bw\n\n def _change_color_brightness(self, color, brightness_factor):\n \"\"\"\n Depending on the brightness_factor, gives a lighter or darker color i.e. a color with\n less or more saturation than the original color.\n\n Args:\n color: color of the polygon. Refer to `matplotlib.colors` for a full list of\n formats that are accepted.\n brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of\n 0 will correspond to no change, a factor in [-1.0, 0) range will result in\n a darker color and a factor in (0, 1.0] range will result in a lighter color.\n\n Returns:\n modified_color (tuple[double]): a tuple containing the RGB values of the\n modified color. Each value in the tuple is in the [0.0, 1.0] range.\n \"\"\"\n assert brightness_factor >= -1.0 and brightness_factor <= 1.0\n color = mplc.to_rgb(color)\n polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color))\n modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1])\n modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness\n modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness\n modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2])\n return modified_color\n\n def _convert_boxes(self, boxes):\n \"\"\"\n Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension.\n \"\"\"\n if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes):\n return boxes.tensor.detach().numpy()\n else:\n return np.asarray(boxes)\n\n def _convert_masks(self, masks_or_polygons):\n \"\"\"\n Convert different format of masks or polygons to a tuple of masks and polygons.\n\n Returns:\n list[GenericMask]:\n \"\"\"\n\n m = masks_or_polygons\n if isinstance(m, PolygonMasks):\n m = m.polygons\n if isinstance(m, BitMasks):\n m = m.tensor.numpy()\n if isinstance(m, torch.Tensor):\n m = m.numpy()\n ret = []\n for x in m:\n if isinstance(x, GenericMask):\n ret.append(x)\n else:\n ret.append(GenericMask(x, self.output.height, self.output.width))\n return ret\n\n def _draw_number_in_mask(self, binary_mask, text, color, label_mode='1'):\n \"\"\"\n Find proper places to draw text given a binary mask.\n \"\"\"\n\n def number_to_string(n):\n chars = []\n while n:\n n, remainder = divmod(n-1, 26)\n chars.append(chr(97 + remainder))\n return ''.join(reversed(chars))\n\n binary_mask = np.pad(binary_mask, ((1, 1), (1, 1)), 'constant')\n mask_dt = cv2.distanceTransform(binary_mask, cv2.DIST_L2, 0)\n mask_dt = mask_dt[1:-1, 1:-1]\n max_dist = np.max(mask_dt)\n coords_y, coords_x = np.where(mask_dt == max_dist) # coords is [y, x]\n\n if label_mode == 'a':\n text = number_to_string(int(text))\n else:\n text = text\n\n self.draw_text(text, (coords_x[len(coords_x)//2] + 2, coords_y[len(coords_y)//2] - 6), color=color)\n\n # TODO sometimes drawn on wrong objects. the heuristics here can improve.\n # _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)\n # if stats[1:, -1].size == 0:\n # return\n # largest_component_id = np.argmax(stats[1:, -1]) + 1\n\n # # draw text on the largest component, as well as other very large components.\n # for cid in range(1, _num_cc):\n # if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:\n # # median is more stable than centroid\n # # center = centroids[largest_component_id]\n # center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]\n # # bottom=np.max((cc_labels == cid).nonzero(), axis=1)[::-1]\n # # center[1]=bottom[1]+2\n # self.draw_text(text, center, color=color)\n \n def _draw_text_in_mask(self, binary_mask, text, color):\n \"\"\"\n Find proper places to draw text given a binary mask.\n \"\"\"\n # TODO sometimes drawn on wrong objects. the heuristics here can improve.\n _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)\n if stats[1:, -1].size == 0:\n return\n largest_component_id = np.argmax(stats[1:, -1]) + 1\n\n # draw text on the largest component, as well as other very large components.\n for cid in range(1, _num_cc):\n if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:\n # median is more stable than centroid\n # center = centroids[largest_component_id]\n center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]\n bottom=np.max((cc_labels == cid).nonzero(), axis=1)[::-1]\n center[1]=bottom[1]+2\n self.draw_text(text, center, color=color)\n\n def _convert_keypoints(self, keypoints):\n if isinstance(keypoints, Keypoints):\n keypoints = keypoints.tensor\n keypoints = np.asarray(keypoints)\n return keypoints\n\n def get_output(self):\n \"\"\"\n Returns:\n output (VisImage): the image output containing the visualizations added\n to the image.\n \"\"\"\n return self.output" }, { "identifier": "SemanticSamAutomaticMaskGenerator", "path": "task_adapter/semantic_sam/tasks/automatic_mask_generator.py", "snippet": "class SemanticSamAutomaticMaskGenerator:\n def __init__(\n self,\n model,\n points_per_side: Optional[int] = 32,\n points_per_batch: int = 200,\n pred_iou_thresh: float = 0.88,\n stability_score_thresh: float = 0.92,\n stability_score_offset: float = 1.0,\n box_nms_thresh: float = 0.7,\n crop_n_layers: int = 0,\n crop_nms_thresh: float = 0.7,\n crop_overlap_ratio: float = 512 / 1500,\n crop_n_points_downscale_factor: int = 1,\n point_grids: Optional[List[np.ndarray]] = None,\n min_mask_region_area: int = 10,\n output_mode: str = \"binary_mask\",\n level: list = [1, 2, 3, 4, 5, 6],\n ) -> None:\n \"\"\"\n Using a SAM model, generates masks for the entire image.\n Generates a grid of point prompts over the image, then filters\n low quality and duplicate masks. The default settings are chosen\n for SAM with a ViT-H backbone.\n\n Arguments:\n model (Sam): The SAM model to use for mask prediction.\n points_per_side (int or None): The number of points to be sampled\n along one side of the image. The total number of points is\n points_per_side**2. If None, 'point_grids' must provide explicit\n point sampling.\n points_per_batch (int): Sets the number of points run simultaneously\n by the model. Higher numbers may be faster but use more GPU memory.\n pred_iou_thresh (float): A filtering threshold in [0,1], using the\n model's predicted mask quality.\n stability_score_thresh (float): A filtering threshold in [0,1], using\n the stability of the mask under changes to the cutoff used to binarize\n the model's mask predictions.\n stability_score_offset (float): The amount to shift the cutoff when\n calculated the stability score.\n box_nms_thresh (float): The box IoU cutoff used by non-maximal\n suppression to filter duplicate masks.\n crops_n_layers (int): If >0, mask prediction will be run again on\n crops of the image. Sets the number of layers to run, where each\n layer has 2**i_layer number of image crops.\n crops_nms_thresh (float): The box IoU cutoff used by non-maximal\n suppression to filter duplicate masks between different crops.\n crop_overlap_ratio (float): Sets the degree to which crops overlap.\n In the first crop layer, crops will overlap by this fraction of\n the image length. Later layers with more crops scale down this overlap.\n crop_n_points_downscale_factor (int): The number of points-per-side\n sampled in layer n is scaled down by crop_n_points_downscale_factor**n.\n point_grids (list(np.ndarray) or None): A list over explicit grids\n of points used for sampling, normalized to [0,1]. The nth grid in the\n list is used in the nth crop layer. Exclusive with points_per_side.\n min_mask_region_area (int): If >0, postprocessing will be applied\n to remove disconnected regions and holes in masks with area smaller\n than min_mask_region_area. Requires opencv.\n output_mode (str): The form masks are returned in. Can be 'binary_mask',\n 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.\n For large resolutions, 'binary_mask' may consume large amounts of\n memory.\n \"\"\"\n self.level = [prompt_switch(l) for l in level]\n assert (points_per_side is None) != (\n point_grids is None\n ), \"Exactly one of points_per_side or point_grid must be provided.\"\n if points_per_side is not None:\n self.point_grids = build_all_layer_point_grids(\n points_per_side,\n crop_n_layers,\n crop_n_points_downscale_factor,\n )\n elif point_grids is not None:\n self.point_grids = point_grids\n else:\n raise ValueError(\"Can't have both points_per_side and point_grid be None.\")\n\n assert output_mode in [\n \"binary_mask\",\n \"uncompressed_rle\",\n \"coco_rle\",\n ], f\"Unknown output_mode {output_mode}.\"\n if output_mode == \"coco_rle\":\n from pycocotools import mask as mask_utils # type: ignore # noqa: F401\n\n if min_mask_region_area > 0:\n import cv2 # type: ignore # noqa: F401\n\n self.predictor = model\n self.points_per_batch = points_per_batch\n self.pred_iou_thresh = pred_iou_thresh\n self.stability_score_thresh = stability_score_thresh\n self.stability_score_offset = stability_score_offset\n self.box_nms_thresh = box_nms_thresh\n self.crop_n_layers = crop_n_layers\n self.crop_nms_thresh = crop_nms_thresh\n self.crop_overlap_ratio = crop_overlap_ratio\n self.crop_n_points_downscale_factor = crop_n_points_downscale_factor\n self.min_mask_region_area = min_mask_region_area\n self.output_mode = output_mode\n\n @torch.no_grad()\n def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:\n \"\"\"\n Generates masks for the given image.\n\n Arguments:\n image (np.ndarray): The image to generate masks for, in HWC uint8 format.\n\n Returns:\n list(dict(str, any)): A list over records for masks. Each record is\n a dict containing the following keys:\n segmentation (dict(str, any) or np.ndarray): The mask. If\n output_mode='binary_mask', is an array of shape HW. Otherwise,\n is a dictionary containing the RLE.\n bbox (list(float)): The box around the mask, in XYWH format.\n area (int): The area in pixels of the mask.\n predicted_iou (float): The model's own prediction of the mask's\n quality. This is filtered by the pred_iou_thresh parameter.\n point_coords (list(list(float))): The point coordinates input\n to the model to generate this mask.\n stability_score (float): A measure of the mask's quality. This\n is filtered on using the stability_score_thresh parameter.\n crop_box (list(float)): The crop of the image used to generate\n the mask, given in XYWH format.\n \"\"\"\n\n # Generate masks\n mask_data = self._generate_masks(image)\n\n # Filter small disconnected regions and holes in masks\n if self.min_mask_region_area > 0:\n mask_data = self.postprocess_small_regions(\n mask_data,\n self.min_mask_region_area,\n max(self.box_nms_thresh, self.crop_nms_thresh),\n )\n # Encode masks\n if self.output_mode == \"coco_rle\":\n mask_data[\"segmentations\"] = [coco_encode_rle(rle) for rle in mask_data[\"rles\"]]\n elif self.output_mode == \"binary_mask\":\n mask_data[\"segmentations\"] = [rle_to_mask(rle) for rle in mask_data[\"rles\"]]\n else:\n mask_data[\"segmentations\"] = mask_data[\"rles\"]\n\n # Write mask records\n curr_anns = []\n for idx in range(len(mask_data[\"segmentations\"])):\n ann = {\n \"segmentation\": mask_data[\"segmentations\"][idx],\n \"area\": area_from_rle(mask_data[\"rles\"][idx]),\n \"bbox\": box_xyxy_to_xywh(mask_data[\"boxes\"][idx]).tolist(),\n \"predicted_iou\": mask_data[\"iou_preds\"][idx].item(),\n \"point_coords\": [mask_data[\"points\"][idx].tolist()],\n \"stability_score\": mask_data[\"stability_score\"][idx].item(),\n \"crop_box\": box_xyxy_to_xywh(mask_data[\"crop_boxes\"][idx]).tolist(),\n }\n curr_anns.append(ann)\n\n return curr_anns\n\n def _generate_masks(self, image: np.ndarray) -> MaskData:\n orig_size = image.shape[-2:]\n crop_boxes, layer_idxs = generate_crop_boxes(\n orig_size, self.crop_n_layers, self.crop_overlap_ratio\n )\n\n # Iterate over image crops\n assert len(crop_boxes)==1\n data = MaskData()\n # import ipdb; ipdb.set_trace()\n for crop_box, layer_idx in zip(crop_boxes, layer_idxs):\n crop_data = self._process_crop(image, crop_box, layer_idx, orig_size)\n\n data.cat(crop_data)\n # import ipdb; ipdb.set_trace()\n # Remove duplicate masks between crops\n if len(crop_boxes) > 1:\n # Prefer masks from smaller crops\n scores = 1 / box_area(data[\"crop_boxes\"])\n scores = scores.to(data[\"boxes\"].device)\n keep_by_nms = batched_nms(\n data[\"boxes\"].float(),\n scores,\n torch.zeros(len(data[\"boxes\"])), # categories\n iou_threshold=self.crop_nms_thresh,\n )\n data.filter(keep_by_nms)\n\n data.to_numpy()\n return data\n\n def _process_crop(\n self,\n image: np.ndarray,\n crop_box: List[int],\n crop_layer_idx: int,\n orig_size: Tuple[int, ...],\n ) -> MaskData:\n # Crop the image and calculate embeddings\n x0, y0, x1, y1 = crop_box\n cropped_im = image#[y0:y1, x0:x1, :]\n cropped_im_size = cropped_im.shape[-2:]\n # self.predictor.set_image(cropped_im)\n\n # Get points for this crop\n points_scale = np.array(cropped_im_size)[None, ::-1]\n points_for_image = self.point_grids[crop_layer_idx] #* points_scale\n\n # Generate masks for this crop in batches\n data = MaskData()\n self.enc_features=None\n # import ipdb; ipdb.set_trace()\n for (points,) in batch_iterator(self.points_per_batch, points_for_image):\n batch_data = self._process_batch(cropped_im,points, cropped_im_size, crop_box, orig_size)\n data.cat(batch_data)\n del batch_data\n\n keep_by_nms = batched_nms(\n data[\"boxes\"].float(),\n data[\"iou_preds\"],\n torch.zeros(len(data[\"boxes\"])), # categories\n iou_threshold=self.box_nms_thresh,\n )\n # import ipdb; ipdb.set_trace()\n data.filter(keep_by_nms)\n # import ipdb; ipdb.set_trace()\n # Return to the original image frame\n data[\"boxes\"] = uncrop_boxes_xyxy(data[\"boxes\"], crop_box)\n data[\"crop_boxes\"] = torch.tensor([crop_box for _ in range(len(data[\"rles\"]))])\n\n return data\n\n def _process_batch(\n self,\n images,\n points: np.ndarray,\n im_size: Tuple[int, ...],\n crop_box: List[int],\n orig_size: Tuple[int, ...],\n ) -> MaskData:\n orig_h, orig_w = orig_size\n\n data = {\"image\": images, \"height\": orig_h, \"width\": orig_w}\n points=torch.tensor(points,dtype=torch.float).to(images.device)\n points = torch.cat([points, points.new_tensor([[0.005, 0.005]]).repeat(len(points), 1)], dim=-1)\n data['targets'] = [dict()]\n data['targets'][0]['points']=points\n data['targets'][0]['pb']=points.new_tensor([0.]*len(points))\n batch_inputs = [data]\n if self.enc_features is None:\n masks, iou_preds,mask_features,multi_scale_features= self.predictor.model.evaluate_demo(batch_inputs,None,None,return_features=True, level=self.level)\n self.enc_features=(mask_features,multi_scale_features)\n else:\n masks, iou_preds= self.predictor.model.evaluate_demo(batch_inputs,None,None,self.enc_features[0],self.enc_features[1], level=self.level)\n\n data = MaskData(\n masks=masks,\n iou_preds=iou_preds.flatten(),\n points=torch.as_tensor(points[:,None].repeat(1,len(self.level), 1).view(-1,4)),\n )\n del masks\n # Filter by predicted IoU\n keep_mask = data[\"iou_preds\"] > self.pred_iou_thresh\n data.filter(keep_mask)\n\n # Calculate stability score\n data[\"stability_score\"] = calculate_stability_score(\n data[\"masks\"], 0.0, self.stability_score_offset\n )\n # if self.stability_score_thresh > 0.0:\n keep_mask = data[\"stability_score\"] >= self.stability_score_thresh\n data.filter(keep_mask)\n\n # Threshold masks and calculate boxes\n data[\"masks\"] = data[\"masks\"] > 0.0\n data[\"boxes\"] = batched_mask_to_box(data[\"masks\"])\n\n # Filter boxes that touch crop boundaries\n keep_mask = ~is_box_near_crop_edge(data[\"boxes\"], crop_box, [0, 0, orig_w, orig_h])\n if not torch.all(keep_mask):\n data.filter(keep_mask)\n\n # Compress to RLE\n data[\"masks\"] = uncrop_masks(data[\"masks\"], crop_box, orig_h, orig_w)\n data[\"rles\"] = mask_to_rle_pytorch(data[\"masks\"])\n del data[\"masks\"]\n\n return data\n\n @staticmethod\n def postprocess_small_regions(\n mask_data: MaskData, min_area: int, nms_thresh: float\n ) -> MaskData:\n \"\"\"\n Removes small disconnected regions and holes in masks, then reruns\n box NMS to remove any new duplicates.\n\n Edits mask_data in place.\n\n Requires open-cv as a dependency.\n \"\"\"\n if len(mask_data[\"rles\"]) == 0:\n return mask_data\n\n # Filter small disconnected regions and holes\n new_masks = []\n scores = []\n for rle in mask_data[\"rles\"]:\n mask = rle_to_mask(rle)\n\n mask, changed = remove_small_regions(mask, min_area, mode=\"holes\")\n unchanged = not changed\n mask, changed = remove_small_regions(mask, min_area, mode=\"islands\")\n unchanged = unchanged and not changed\n\n new_masks.append(torch.as_tensor(mask).unsqueeze(0))\n # Give score=0 to changed masks and score=1 to unchanged masks\n # so NMS will prefer ones that didn't need postprocessing\n scores.append(float(unchanged))\n\n # Recalculate boxes and remove any new duplicates\n masks = torch.cat(new_masks, dim=0)\n boxes = batched_mask_to_box(masks)\n keep_by_nms = batched_nms(\n boxes.float(),\n torch.as_tensor(scores),\n torch.zeros(len(boxes)), # categories\n iou_threshold=nms_thresh,\n )\n\n # Only recalculate RLEs for masks that have changed\n for i_mask in keep_by_nms:\n if scores[i_mask] == 0.0:\n mask_torch = masks[i_mask].unsqueeze(0)\n mask_data[\"rles\"][i_mask] = mask_to_rle_pytorch(mask_torch)[0]\n mask_data[\"boxes\"][i_mask] = boxes[i_mask] # update res directly\n mask_data.filter(keep_by_nms)\n\n return mask_data" } ]
import torch import numpy as np import matplotlib.pyplot as plt import cv2 import io import cv2 # type: ignore from torchvision import transforms from task_adapter.utils.visualizer import Visualizer from typing import Tuple from PIL import Image from detectron2.data import MetadataCatalog from .automatic_mask_generator import SemanticSamAutomaticMaskGenerator from task_adapter.utils.visualizer import Visualizer
15,586
# -------------------------------------------------------- # Semantic-SAM: Segment and Recognize Anything at Any Granularity # Copyright (c) 2023 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Hao Zhang ([email protected]) # -------------------------------------------------------- metadata = MetadataCatalog.get('coco_2017_train_panoptic') def inference_semsam_m2m_auto(model, image, level, all_classes, all_parts, thresh, text_size, hole_scale, island_scale, semantic, refimg=None, reftxt=None, audio_pth=None, video_pth=None, label_mode='1', alpha=0.1, anno_mode=['Mask']): t = [] t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC)) transform1 = transforms.Compose(t) image_ori = transform1(image) image_ori = np.asarray(image_ori) images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda() mask_generator = SemanticSamAutomaticMaskGenerator(model,points_per_side=32, pred_iou_thresh=0.88, stability_score_thresh=0.92, min_mask_region_area=10, level=level, ) outputs = mask_generator.generate(images)
# -------------------------------------------------------- # Semantic-SAM: Segment and Recognize Anything at Any Granularity # Copyright (c) 2023 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Hao Zhang ([email protected]) # -------------------------------------------------------- metadata = MetadataCatalog.get('coco_2017_train_panoptic') def inference_semsam_m2m_auto(model, image, level, all_classes, all_parts, thresh, text_size, hole_scale, island_scale, semantic, refimg=None, reftxt=None, audio_pth=None, video_pth=None, label_mode='1', alpha=0.1, anno_mode=['Mask']): t = [] t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC)) transform1 = transforms.Compose(t) image_ori = transform1(image) image_ori = np.asarray(image_ori) images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda() mask_generator = SemanticSamAutomaticMaskGenerator(model,points_per_side=32, pred_iou_thresh=0.88, stability_score_thresh=0.92, min_mask_region_area=10, level=level, ) outputs = mask_generator.generate(images)
visual = Visualizer(image_ori, metadata=metadata)
0
2023-10-16 03:39:26+00:00
24k
hkchengrex/Cutie
gui/main_controller.py
[ { "identifier": "CUTIE", "path": "cutie/model/cutie.py", "snippet": "class CUTIE(nn.Module):\n def __init__(self, cfg: DictConfig, *, single_object=False):\n super().__init__()\n model_cfg = cfg.model\n self.ms_dims = model_cfg.pixel_encoder.ms_dims\n self.key_dim = model_cfg.key_dim\n self.value_dim = model_cfg.value_dim\n self.sensory_dim = model_cfg.sensory_dim\n self.pixel_dim = model_cfg.pixel_dim\n self.embed_dim = model_cfg.embed_dim\n self.single_object = single_object\n\n log.info(f'Single object: {self.single_object}')\n\n self.pixel_encoder = PixelEncoder(model_cfg)\n self.pix_feat_proj = nn.Conv2d(self.ms_dims[0], self.pixel_dim, kernel_size=1)\n self.key_proj = KeyProjection(model_cfg)\n self.mask_encoder = MaskEncoder(model_cfg, single_object=single_object)\n self.mask_decoder = MaskDecoder(model_cfg)\n self.pixel_fuser = PixelFeatureFuser(model_cfg, single_object=single_object)\n self.object_transformer = QueryTransformer(model_cfg)\n self.object_summarizer = ObjectSummarizer(model_cfg)\n self.aux_computer = AuxComputer(cfg)\n\n self.register_buffer(\"pixel_mean\", torch.Tensor(model_cfg.pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(model_cfg.pixel_std).view(-1, 1, 1), False)\n\n def _get_others(self, masks: torch.Tensor) -> torch.Tensor:\n # for each object, return the sum of masks of all other objects\n if self.single_object:\n return None\n\n num_objects = masks.shape[1]\n if num_objects >= 1:\n others = (masks.sum(dim=1, keepdim=True) - masks).clamp(0, 1)\n else:\n others = torch.zeros_like(masks)\n return others\n\n def encode_image(self, image: torch.Tensor) -> (Iterable[torch.Tensor], torch.Tensor):\n image = (image - self.pixel_mean) / self.pixel_std\n ms_image_feat = self.pixel_encoder(image)\n return ms_image_feat, self.pix_feat_proj(ms_image_feat[0])\n\n def encode_mask(\n self,\n image: torch.Tensor,\n ms_features: List[torch.Tensor],\n sensory: torch.Tensor,\n masks: torch.Tensor,\n *,\n deep_update: bool = True,\n chunk_size: int = -1,\n need_weights: bool = False) -> (torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor):\n image = (image - self.pixel_mean) / self.pixel_std\n others = self._get_others(masks)\n mask_value, new_sensory = self.mask_encoder(image,\n ms_features,\n sensory,\n masks,\n others,\n deep_update=deep_update,\n chunk_size=chunk_size)\n object_summaries, object_logits = self.object_summarizer(masks, mask_value, need_weights)\n return mask_value, new_sensory, object_summaries, object_logits\n\n def transform_key(self,\n final_pix_feat: torch.Tensor,\n *,\n need_sk: bool = True,\n need_ek: bool = True) -> (torch.Tensor, torch.Tensor, torch.Tensor):\n key, shrinkage, selection = self.key_proj(final_pix_feat, need_s=need_sk, need_e=need_ek)\n return key, shrinkage, selection\n\n # Used in training only.\n # This step is replaced by MemoryManager in test time\n def read_memory(self, query_key: torch.Tensor, query_selection: torch.Tensor,\n memory_key: torch.Tensor, memory_shrinkage: torch.Tensor,\n msk_value: torch.Tensor, obj_memory: torch.Tensor, pix_feat: torch.Tensor,\n sensory: torch.Tensor, last_mask: torch.Tensor,\n selector: torch.Tensor) -> (torch.Tensor, Dict[str, torch.Tensor]):\n \"\"\"\n query_key : B * CK * H * W\n query_selection : B * CK * H * W\n memory_key : B * CK * T * H * W\n memory_shrinkage: B * 1 * T * H * W\n msk_value : B * num_objects * CV * T * H * W\n obj_memory : B * num_objects * T * num_summaries * C\n pixel_feature : B * C * H * W\n \"\"\"\n batch_size, num_objects = msk_value.shape[:2]\n\n # read using visual attention\n with torch.cuda.amp.autocast(enabled=False):\n affinity = get_affinity(memory_key.float(), memory_shrinkage.float(), query_key.float(),\n query_selection.float())\n\n msk_value = msk_value.flatten(start_dim=1, end_dim=2).float()\n\n # B * (num_objects*CV) * H * W\n pixel_readout = readout(affinity, msk_value)\n pixel_readout = pixel_readout.view(batch_size, num_objects, self.value_dim,\n *pixel_readout.shape[-2:])\n pixel_readout = self.pixel_fusion(pix_feat, pixel_readout, sensory, last_mask)\n\n # read from query transformer\n mem_readout, aux_features = self.readout_query(pixel_readout, obj_memory, selector=selector)\n\n aux_output = {\n 'sensory': sensory,\n 'q_logits': aux_features['logits'] if aux_features else None,\n 'attn_mask': aux_features['attn_mask'] if aux_features else None,\n }\n\n return mem_readout, aux_output\n\n def pixel_fusion(self,\n pix_feat: torch.Tensor,\n pixel: torch.Tensor,\n sensory: torch.Tensor,\n last_mask: torch.Tensor,\n *,\n chunk_size: int = -1) -> torch.Tensor:\n last_mask = F.interpolate(last_mask, size=sensory.shape[-2:], mode='area')\n last_others = self._get_others(last_mask)\n fused = self.pixel_fuser(pix_feat,\n pixel,\n sensory,\n last_mask,\n last_others,\n chunk_size=chunk_size)\n return fused\n\n def readout_query(self,\n pixel_readout,\n obj_memory,\n *,\n selector=None,\n need_weights=False) -> (torch.Tensor, Dict[str, torch.Tensor]):\n return self.object_transformer(pixel_readout,\n obj_memory,\n selector=selector,\n need_weights=need_weights)\n\n def segment(self,\n ms_image_feat: List[torch.Tensor],\n memory_readout: torch.Tensor,\n sensory: torch.Tensor,\n *,\n selector: bool = None,\n chunk_size: int = -1,\n update_sensory: bool = True) -> (torch.Tensor, torch.Tensor, torch.Tensor):\n \"\"\"\n multi_scale_features is from the key encoder for skip-connection\n memory_readout is from working/long-term memory\n sensory is the sensory memory\n last_mask is the mask from the last frame, supplementing sensory memory\n selector is 1 if an object exists, and 0 otherwise. We use it to filter padded objects\n during training.\n \"\"\"\n sensory, logits = self.mask_decoder(ms_image_feat,\n memory_readout,\n sensory,\n chunk_size=chunk_size,\n update_sensory=update_sensory)\n\n prob = torch.sigmoid(logits)\n if selector is not None:\n prob = prob * selector\n\n # Softmax over all objects[]\n logits = aggregate(prob, dim=1)\n logits = F.interpolate(logits, scale_factor=4, mode='bilinear', align_corners=False)\n prob = F.softmax(logits, dim=1)\n\n return sensory, logits, prob\n\n def compute_aux(self, pix_feat: torch.Tensor, aux_inputs: Dict[str, torch.Tensor],\n selector: torch.Tensor) -> Dict[str, torch.Tensor]:\n return self.aux_computer(pix_feat, aux_inputs, selector)\n\n def forward(self, *args, **kwargs):\n raise NotImplementedError\n\n def load_weights(self, src_dict, init_as_zero_if_needed=False) -> None:\n if not self.single_object:\n # Map single-object weight to multi-object weight (4->5 out channels in conv1)\n for k in list(src_dict.keys()):\n if k == 'mask_encoder.conv1.weight':\n if src_dict[k].shape[1] == 4:\n log.info(f'Converting {k} from single object to multiple objects.')\n pads = torch.zeros((64, 1, 7, 7), device=src_dict[k].device)\n if not init_as_zero_if_needed:\n nn.init.orthogonal_(pads)\n log.info(f'Randomly initialized padding for {k}.')\n else:\n log.info(f'Zero-initialized padding for {k}.')\n src_dict[k] = torch.cat([src_dict[k], pads], 1)\n elif k == 'pixel_fuser.sensory_compress.weight':\n if src_dict[k].shape[1] == self.sensory_dim + 1:\n log.info(f'Converting {k} from single object to multiple objects.')\n pads = torch.zeros((self.value_dim, 1, 1, 1), device=src_dict[k].device)\n if not init_as_zero_if_needed:\n nn.init.orthogonal_(pads)\n log.info(f'Randomly initialized padding for {k}.')\n else:\n log.info(f'Zero-initialized padding for {k}.')\n src_dict[k] = torch.cat([src_dict[k], pads], 1)\n elif self.single_object:\n \"\"\"\n If the model is multiple-object and we are training in single-object, \n we strip the last channel of conv1.\n This is not supposed to happen in standard training except when users are trying to\n finetune a trained model with single object datasets.\n \"\"\"\n if src_dict['mask_encoder.conv1.weight'].shape[1] == 5:\n log.warning(f'Converting {k} from multiple objects to single object.'\n 'This is not supposed to happen in standard training.')\n src_dict[k] = src_dict[k][:, :-1]\n\n for k in src_dict:\n if k not in self.state_dict():\n log.info(f'Key {k} found in src_dict but not in self.state_dict()!!!')\n for k in self.state_dict():\n if k not in src_dict:\n log.info(f'Key {k} found in self.state_dict() but not in src_dict!!!')\n\n self.load_state_dict(src_dict, strict=False)\n\n @property\n def device(self) -> torch.device:\n return self.pixel_mean.device" }, { "identifier": "InferenceCore", "path": "cutie/inference/inference_core.py", "snippet": "class InferenceCore:\n def __init__(self,\n network: CUTIE,\n cfg: DictConfig,\n *,\n image_feature_store: ImageFeatureStore = None):\n self.network = network\n self.cfg = cfg\n self.mem_every = cfg.mem_every\n stagger_updates = cfg.stagger_updates\n self.chunk_size = cfg.chunk_size\n self.save_aux = cfg.save_aux\n self.max_internal_size = cfg.max_internal_size\n self.flip_aug = cfg.flip_aug\n\n self.curr_ti = -1\n self.last_mem_ti = 0\n # at which time indices should we update the sensory memory\n if stagger_updates >= self.mem_every:\n self.stagger_ti = set(range(1, self.mem_every + 1))\n else:\n self.stagger_ti = set(\n np.round(np.linspace(1, self.mem_every, stagger_updates)).astype(int))\n self.object_manager = ObjectManager()\n self.memory = MemoryManager(cfg=cfg, object_manager=self.object_manager)\n\n if image_feature_store is None:\n self.image_feature_store = ImageFeatureStore(self.network)\n else:\n self.image_feature_store = image_feature_store\n\n self.last_mask = None\n\n def clear_memory(self):\n self.curr_ti = -1\n self.last_mem_ti = 0\n self.memory = MemoryManager(cfg=self.cfg, object_manager=self.object_manager)\n\n def clear_non_permanent_memory(self):\n self.curr_ti = -1\n self.last_mem_ti = 0\n self.memory.clear_non_permanent_memory()\n\n def clear_sensory_memory(self):\n self.curr_ti = -1\n self.last_mem_ti = 0\n self.memory.clear_sensory_memory()\n\n def update_config(self, cfg):\n self.mem_every = cfg['mem_every']\n self.memory.update_config(cfg)\n\n def _add_memory(self,\n image: torch.Tensor,\n pix_feat: torch.Tensor,\n prob: torch.Tensor,\n key: torch.Tensor,\n shrinkage: torch.Tensor,\n selection: torch.Tensor,\n *,\n is_deep_update: bool = True,\n force_permanent: bool = False) -> None:\n \"\"\"\n Memorize the given segmentation in all memory stores.\n\n The batch dimension is 1 if flip augmentation is not used.\n image: RGB image, (1/2)*3*H*W\n pix_feat: from the key encoder, (1/2)*_*H*W\n prob: (1/2)*num_objects*H*W, in [0, 1]\n key/shrinkage/selection: for anisotropic l2, (1/2)*_*H*W\n selection can be None if not using long-term memory\n is_deep_update: whether to use deep update (e.g. with the mask encoder)\n force_permanent: whether to force the memory to be permanent\n \"\"\"\n if prob.shape[1] == 0:\n # nothing to add\n log.warn('Trying to add an empty object mask to memory!')\n return\n\n if force_permanent:\n as_permanent = 'all'\n else:\n as_permanent = 'first'\n\n self.memory.initialize_sensory_if_needed(key, self.object_manager.all_obj_ids)\n msk_value, sensory, obj_value, self.obj_logits = self.network.encode_mask(\n image,\n pix_feat,\n self.memory.get_sensory(self.object_manager.all_obj_ids),\n prob,\n deep_update=is_deep_update,\n chunk_size=self.chunk_size,\n need_weights=self.save_aux)\n self.memory.add_memory(key,\n shrinkage,\n msk_value,\n obj_value,\n self.object_manager.all_obj_ids,\n selection=selection,\n as_permanent=as_permanent)\n self.last_mem_ti = self.curr_ti\n if is_deep_update:\n self.memory.update_sensory(sensory, self.object_manager.all_obj_ids)\n\n def _segment(self,\n key: torch.Tensor,\n selection: torch.Tensor,\n pix_feat: torch.Tensor,\n ms_features: Iterable[torch.Tensor],\n update_sensory: bool = True) -> torch.Tensor:\n \"\"\"\n Produce a segmentation using the given features and the memory\n\n The batch dimension is 1 if flip augmentation is not used.\n key/selection: for anisotropic l2: (1/2) * _ * H * W\n pix_feat: from the key encoder, (1/2) * _ * H * W\n ms_features: an iterable of multiscale features from the encoder, each is (1/2)*_*H*W\n with strides 16, 8, and 4 respectively\n update_sensory: whether to update the sensory memory\n\n Returns: (num_objects+1)*H*W normalized probability; the first channel is the background\n \"\"\"\n bs = key.shape[0]\n if self.flip_aug:\n assert bs == 2\n else:\n assert bs == 1\n\n if not self.memory.engaged:\n log.warn('Trying to segment without any memory!')\n return torch.zeros((1, key.shape[-2] * 16, key.shape[-1] * 16),\n device=key.device,\n dtype=key.dtype)\n\n memory_readout = self.memory.read(pix_feat, key, selection, self.last_mask, self.network)\n memory_readout = self.object_manager.realize_dict(memory_readout)\n sensory, _, pred_prob_with_bg = self.network.segment(ms_features,\n memory_readout,\n self.memory.get_sensory(\n self.object_manager.all_obj_ids),\n chunk_size=self.chunk_size,\n update_sensory=update_sensory)\n # remove batch dim\n if self.flip_aug:\n # average predictions of the non-flipped and flipped version\n pred_prob_with_bg = (pred_prob_with_bg[0] +\n torch.flip(pred_prob_with_bg[1], dims=[-1])) / 2\n else:\n pred_prob_with_bg = pred_prob_with_bg[0]\n if update_sensory:\n self.memory.update_sensory(sensory, self.object_manager.all_obj_ids)\n return pred_prob_with_bg\n\n def step(self,\n image: torch.Tensor,\n mask: Optional[torch.Tensor] = None,\n objects: Optional[List[int]] = None,\n *,\n idx_mask: bool = True,\n end: bool = False,\n delete_buffer: bool = True,\n force_permanent: bool = False) -> torch.Tensor:\n \"\"\"\n Take a step with a new incoming image.\n If there is an incoming mask with new objects, we will memorize them.\n If there is no incoming mask, we will segment the image using the memory.\n In both cases, we will update the memory and return a segmentation.\n\n image: 3*H*W\n mask: H*W (if idx mask) or len(objects)*H*W or None\n objects: list of object ids that are valid in the mask Tensor.\n The ids themselves do not need to be consecutive/in order, but they need to be \n in the same position in the list as the corresponding mask\n in the tensor in non-idx-mask mode.\n objects is ignored if the mask is None. \n If idx_mask is False and objects is None, we sequentially infer the object ids.\n idx_mask: if True, mask is expected to contain an object id at every pixel.\n If False, mask should have multiple channels with each channel representing one object.\n end: if we are at the end of the sequence, we do not need to update memory\n if unsure just set it to False \n delete_buffer: whether to delete the image feature buffer after this step\n force_permanent: the memory recorded this frame will be added to the permanent memory\n \"\"\"\n if objects is None and mask is not None:\n assert not idx_mask\n objects = list(range(1, mask.shape[0] + 1))\n\n # resize input if needed -- currently only used for the GUI\n resize_needed = False\n if self.max_internal_size > 0:\n h, w = image.shape[-2:]\n min_side = min(h, w)\n if min_side > self.max_internal_size:\n resize_needed = True\n new_h = int(h / min_side * self.max_internal_size)\n new_w = int(w / min_side * self.max_internal_size)\n image = F.interpolate(image.unsqueeze(0),\n size=(new_h, new_w),\n mode='bilinear',\n align_corners=False)[0]\n if mask is not None:\n if idx_mask:\n mask = F.interpolate(mask.unsqueeze(0).unsqueeze(0).float(),\n size=(new_h, new_w),\n mode='nearest',\n align_corners=False)[0, 0].round().long()\n else:\n mask = F.interpolate(mask.unsqueeze(0),\n size=(new_h, new_w),\n mode='bilinear',\n align_corners=False)[0]\n\n self.curr_ti += 1\n\n image, self.pad = pad_divide_by(image, 16)\n image = image.unsqueeze(0) # add the batch dimension\n if self.flip_aug:\n image = torch.cat([image, torch.flip(image, dims=[-1])], dim=0)\n\n # whether to update the working memory\n is_mem_frame = ((self.curr_ti - self.last_mem_ti >= self.mem_every) or\n (mask is not None)) and (not end)\n # segment when there is no input mask or when the input mask is incomplete\n need_segment = (mask is None) or (self.object_manager.num_obj > 0\n and not self.object_manager.has_all(objects))\n update_sensory = ((self.curr_ti - self.last_mem_ti) in self.stagger_ti) and (not end)\n\n # encoding the image\n ms_feat, pix_feat = self.image_feature_store.get_features(self.curr_ti, image)\n key, shrinkage, selection = self.image_feature_store.get_key(self.curr_ti, image)\n\n # segmentation from memory if needed\n if need_segment:\n pred_prob_with_bg = self._segment(key,\n selection,\n pix_feat,\n ms_feat,\n update_sensory=update_sensory)\n\n # use the input mask if provided\n if mask is not None:\n # inform the manager of the new objects, and get a list of temporary id\n # temporary ids -- indicates the position of objects in the tensor\n # (starts with 1 due to the background channel)\n corresponding_tmp_ids, _ = self.object_manager.add_new_objects(objects)\n\n mask, _ = pad_divide_by(mask, 16)\n if need_segment:\n # merge predicted mask with the incomplete input mask\n pred_prob_no_bg = pred_prob_with_bg[1:]\n # use the mutual exclusivity of segmentation\n if idx_mask:\n pred_prob_no_bg[:, mask > 0] = 0\n else:\n pred_prob_no_bg[:, mask.max(0) > 0.5] = 0\n\n new_masks = []\n for mask_id, tmp_id in enumerate(corresponding_tmp_ids):\n if idx_mask:\n this_mask = (mask == objects[mask_id]).type_as(pred_prob_no_bg)\n else:\n this_mask = mask[tmp_id]\n if tmp_id > pred_prob_no_bg.shape[0]:\n new_masks.append(this_mask.unsqueeze(0))\n else:\n # +1 for padding the background channel\n pred_prob_no_bg[tmp_id - 1] = this_mask\n # new_masks are always in the order of tmp_id\n mask = torch.cat([pred_prob_no_bg, *new_masks], dim=0)\n elif idx_mask:\n # simply convert cls to one-hot representation\n if len(objects) == 0:\n if delete_buffer:\n self.image_feature_store.delete(self.curr_ti)\n log.warn('Trying to insert an empty mask as memory!')\n return torch.zeros((1, key.shape[-2] * 16, key.shape[-1] * 16),\n device=key.device,\n dtype=key.dtype)\n mask = torch.stack(\n [mask == objects[mask_id] for mask_id, _ in enumerate(corresponding_tmp_ids)],\n dim=0)\n pred_prob_with_bg = aggregate(mask, dim=0)\n pred_prob_with_bg = torch.softmax(pred_prob_with_bg, dim=0)\n\n self.last_mask = pred_prob_with_bg[1:].unsqueeze(0)\n if self.flip_aug:\n self.last_mask = torch.cat(\n [self.last_mask, torch.flip(self.last_mask, dims=[-1])], dim=0)\n\n # save as memory if needed\n if is_mem_frame or force_permanent:\n self._add_memory(image,\n pix_feat,\n self.last_mask,\n key,\n shrinkage,\n selection,\n force_permanent=force_permanent)\n\n if delete_buffer:\n self.image_feature_store.delete(self.curr_ti)\n\n output_prob = unpad(pred_prob_with_bg, self.pad)\n if resize_needed:\n # restore output to the original size\n output_prob = F.interpolate(output_prob.unsqueeze(0),\n size=(h, w),\n mode='bilinear',\n align_corners=False)[0]\n\n return output_prob\n\n def get_aux_outputs(self, image: torch.Tensor) -> Dict[str, torch.Tensor]:\n image, pads = pad_divide_by(image, 16)\n image = image.unsqueeze(0) # add the batch dimension\n _, pix_feat = self.image_feature_store.get_features(self.curr_ti, image)\n\n aux_inputs = self.memory.aux\n aux_outputs = self.network.compute_aux(pix_feat, aux_inputs, selector=None)\n aux_outputs['q_weights'] = aux_inputs['q_weights']\n aux_outputs['p_weights'] = aux_inputs['p_weights']\n\n for k, v in aux_outputs.items():\n if len(v.shape) == 5:\n aux_outputs[k] = F.interpolate(v[0],\n size=image.shape[-2:],\n mode='bilinear',\n align_corners=False)\n elif 'weights' in k:\n b, num_objects, num_heads, num_queries, h, w = v.shape\n v = v.view(num_objects * num_heads, num_queries, h, w)\n v = F.interpolate(v, size=image.shape[-2:], mode='bilinear', align_corners=False)\n aux_outputs[k] = v.view(num_objects, num_heads, num_queries, *image.shape[-2:])\n else:\n aux_outputs[k] = F.interpolate(v,\n size=image.shape[-2:],\n mode='bilinear',\n align_corners=False)[0]\n aux_outputs[k] = unpad(aux_outputs[k], pads)\n if 'weights' in k:\n weights = aux_outputs[k]\n weights = weights / (weights.max(-1, keepdim=True)[0].max(-2, keepdim=True)[0] +\n 1e-8)\n aux_outputs[k] = (weights * 255).cpu().numpy()\n else:\n aux_outputs[k] = (aux_outputs[k].softmax(dim=0) * 255).cpu().numpy()\n\n self.image_feature_store.delete(self.curr_ti)\n return aux_outputs\n\n def get_aux_object_weights(self, image: torch.Tensor) -> np.ndarray:\n image, pads = pad_divide_by(image, 16)\n # B*num_objects*H*W*num_queries -> num_objects*num_queries*H*W\n # weights = F.softmax(self.obj_logits, dim=-1)[0]\n weights = F.sigmoid(self.obj_logits)[0]\n weights = weights.permute(0, 3, 1, 2).contiguous()\n weights = F.interpolate(weights,\n size=image.shape[-2:],\n mode='bilinear',\n align_corners=False)\n # weights = weights / (weights.max(-1, keepdim=True)[0].max(-2, keepdim=True)[0])\n weights = unpad(weights, pads)\n weights = (weights * 255).cpu().numpy()\n return weights" }, { "identifier": "ResourceManager", "path": "gui/resource_manager.py", "snippet": "class ResourceManager:\n def __init__(self, cfg: DictConfig):\n # determine inputs\n images = cfg['images']\n video = cfg['video']\n self.workspace = cfg['workspace']\n self.max_size = cfg['max_overall_size']\n self.palette = davis_palette\n\n # create temporary workspace if not specified\n if self.workspace is None:\n if images is not None:\n basename = path.basename(images)\n elif video is not None:\n basename = path.basename(video)[:-4]\n else:\n raise NotImplementedError('Either images, video, or workspace has to be specified')\n\n self.workspace = path.join('./workspace', basename)\n\n print(f'Workspace is in: {self.workspace}')\n with open_dict(cfg):\n cfg['workspace'] = self.workspace\n\n # determine the location of input images\n need_decoding = False\n need_resizing = False\n if path.exists(path.join(self.workspace, 'images')):\n pass\n elif images is not None:\n need_resizing = True\n elif video is not None:\n # will decode video into frames later\n need_decoding = True\n\n # create workspace subdirectories\n self.image_dir = path.join(self.workspace, 'images')\n self.mask_dir = path.join(self.workspace, 'masks')\n self.visualization_dir = path.join(self.workspace, 'visualization')\n self.soft_mask_dir = path.join(self.workspace, 'soft_masks')\n os.makedirs(self.image_dir, exist_ok=True)\n os.makedirs(self.mask_dir, exist_ok=True)\n os.makedirs(self.visualization_dir, exist_ok=True)\n os.makedirs(self.soft_mask_dir, exist_ok=True)\n\n # create all soft mask sub-directories\n for i in range(1, cfg['num_objects'] + 1):\n os.makedirs(path.join(self.soft_mask_dir, f'{i}'), exist_ok=True)\n\n # convert read functions to be buffered\n self.get_image = LRU(self._get_image_unbuffered, maxsize=cfg['buffer_size'])\n self.get_mask = LRU(self._get_mask_unbuffered, maxsize=cfg['buffer_size'])\n\n # extract frames from video\n if need_decoding:\n self._extract_frames(video)\n\n # copy/resize existing images to the workspace\n if need_resizing:\n self._copy_resize_frames(images)\n\n # read all frame names\n self.names = sorted(os.listdir(self.image_dir))\n self.names = [f[:-4] for f in self.names] # remove extensions\n self.length = len(self.names)\n\n assert self.length > 0, f'No images found! Check {self.workspace}/images. Remove folder if necessary.'\n\n print(f'{self.length} images found.')\n\n self.height, self.width = self.get_image(0).shape[:2]\n\n # create the saver threads for saving the masks/visualizations\n self.save_queue = Queue(maxsize=cfg['save_queue_size'])\n self.num_save_threads = cfg['num_save_threads']\n self.save_threads = [\n Thread(target=self.save_thread, args=(self.save_queue, ))\n for _ in range(self.num_save_threads)\n ]\n for t in self.save_threads:\n t.daemon = True\n t.start()\n\n def __del__(self):\n for _ in range(self.num_save_threads):\n self.save_queue.put(None)\n self.save_queue.join()\n for t in self.save_threads:\n t.join()\n\n def save_thread(self, queue: Queue):\n while True:\n args: SaveItem = queue.get()\n if args is None:\n queue.task_done()\n break\n if args.type == 'mask':\n # PIL image\n args.data.save(path.join(self.mask_dir, args.name + '.png'))\n elif args.type.startswith('visualization'):\n # numpy array, save with cv2\n vis_mode = args.type.split('_')[-1]\n data = cv2.cvtColor(args.data, cv2.COLOR_RGB2BGR)\n os.makedirs(path.join(self.visualization_dir, vis_mode), exist_ok=True)\n cv2.imwrite(path.join(self.visualization_dir, vis_mode, args.name + '.jpg'), data)\n elif args.type == 'soft_mask':\n # numpy array, save each channel with cv2\n num_channels = args.data.shape[0]\n # first channel is background -- ignore\n for i in range(1, num_channels):\n data = args.data[i]\n data = (data * 255).astype(np.uint8)\n cv2.imwrite(path.join(self.soft_mask_dir, f'{i}', args.name + '.png'), data)\n else:\n raise NotImplementedError\n queue.task_done()\n\n def _extract_frames(self, video: str):\n cap = cv2.VideoCapture(video)\n frame_index = 0\n print(f'Extracting frames from {video} into {self.image_dir}...')\n with tqdm() as bar:\n while (cap.isOpened()):\n _, frame = cap.read()\n if frame is None:\n break\n h, w = frame.shape[:2]\n if self.max_size > 0 and min(h, w) > self.max_size:\n new_w = (w * self.max_size // min(w, h))\n new_h = (h * self.max_size // min(w, h))\n frame = cv2.resize(frame, dsize=(new_w, new_h), interpolation=cv2.INTER_AREA)\n cv2.imwrite(path.join(self.image_dir, f'{frame_index:07d}.jpg'), frame)\n frame_index += 1\n bar.update()\n print('Done!')\n\n def _copy_resize_frames(self, images: str):\n image_list = os.listdir(images)\n print(f'Copying/resizing frames into {self.image_dir}...')\n for image_name in tqdm(image_list):\n if self.max_size < 0:\n # just copy\n shutil.copy2(path.join(images, image_name), self.image_dir)\n else:\n frame = cv2.imread(path.join(images, image_name))\n h, w = frame.shape[:2]\n if self.max_size > 0 and min(h, w) > self.max_size:\n new_w = (w * self.max_size // min(w, h))\n new_h = (h * self.max_size // min(w, h))\n frame = cv2.resize(frame, dsize=(new_w, new_h), interpolation=cv2.INTER_AREA)\n cv2.imwrite(path.join(self.image_dir, image_name), frame)\n print('Done!')\n\n def add_to_queue_with_warning(self, item: SaveItem):\n if self.save_queue.full():\n print(\n 'The save queue is full! You need more threads or faster IO. Program might pause.')\n self.save_queue.put(item)\n\n def save_mask(self, ti: int, mask: np.ndarray):\n # mask should be uint8 H*W without channels\n assert 0 <= ti < self.length\n assert isinstance(mask, np.ndarray)\n\n mask = Image.fromarray(mask)\n mask.putpalette(self.palette)\n self.invalidate(ti)\n self.add_to_queue_with_warning(SaveItem('mask', mask, self.names[ti]))\n\n def save_visualization(self, ti: int, vis_mode: str, image: np.ndarray):\n # image should be uint8 3*H*W\n assert 0 <= ti < self.length\n assert isinstance(image, np.ndarray)\n\n self.add_to_queue_with_warning(SaveItem(f'visualization_{vis_mode}', image, self.names[ti]))\n\n def save_soft_mask(self, ti: int, prob: np.ndarray):\n # mask should be float (num_objects+1)*H*W np array\n assert 0 <= ti < self.length\n assert isinstance(prob, np.ndarray)\n\n self.add_to_queue_with_warning(SaveItem('soft_mask', prob, self.names[ti]))\n\n def _get_image_unbuffered(self, ti: int):\n # returns H*W*3 uint8 array\n assert 0 <= ti < self.length\n\n image = Image.open(path.join(self.image_dir, self.names[ti] + '.jpg')).convert('RGB')\n image = np.array(image)\n return image\n\n def _get_mask_unbuffered(self, ti: int):\n # returns H*W uint8 array\n assert 0 <= ti < self.length\n\n mask_path = path.join(self.mask_dir, self.names[ti] + '.png')\n if path.exists(mask_path):\n mask = Image.open(mask_path)\n mask = np.array(mask)\n return mask\n else:\n return None\n\n def import_mask(self, file_name: str, size: Optional[Tuple[int, int]] = None):\n # read an mask file and resize it to exactly match the canvas size\n image = Image.open(file_name)\n if size is not None:\n # PIL uses (width, height)\n image = image.resize((size[1], size[0]), resample=Image.Resampling.NEAREST)\n image = np.array(image)\n return image\n\n def import_layer(self, file_name: str, size: Tuple[int, int]):\n # read a RGBA/RGB file and resize it such that the entire layer is visible in the canvas\n # and then pad it to the canvas size (h, w)\n image = Image.open(file_name).convert('RGBA')\n im_w, im_h = image.size\n im_ratio = im_w / im_h\n canvas_ratio = size[1] / size[0]\n if im_ratio < canvas_ratio:\n # fit height\n new_h = size[0]\n new_w = int(new_h * im_ratio)\n else:\n # fit width\n new_w = size[1]\n new_h = int(new_w / im_ratio)\n image = image.resize((new_w, new_h), resample=Image.Resampling.BILINEAR)\n image = np.array(image)\n # padding\n pad_h = (size[0] - new_h) // 2\n pad_w = (size[1] - new_w) // 2\n image = np.pad(image,\n ((pad_h, size[0] - new_h - pad_h), (pad_w, size[1] - new_w - pad_w), (0, 0)),\n mode='constant',\n constant_values=0)\n\n return image\n\n def invalidate(self, ti: int):\n # the image buffer is never invalidated\n self.get_mask.invalidate((ti, ))\n\n def __len__(self):\n return self.length\n\n @property\n def T(self) -> int:\n return self.length\n\n @property\n def h(self) -> int:\n return self.height\n\n @property\n def w(self) -> int:\n return self.width" }, { "identifier": "GUI", "path": "gui/gui.py", "snippet": "class GUI(QWidget):\n def __init__(self, controller, cfg: DictConfig) -> None:\n super().__init__()\n\n # callbacks to be set by the controller\n self.on_mouse_motion_xy = None\n self.click_fn = None\n\n self.controller = controller\n self.cfg = cfg\n self.h = controller.h\n self.w = controller.w\n self.T = controller.T\n\n # set up the window\n self.setWindowTitle(f'Cutie demo: {cfg[\"workspace\"]}')\n self.setGeometry(100, 100, self.w + 200, self.h + 200)\n self.setWindowIcon(QIcon('docs/icon.png'))\n\n # set up some buttons\n self.play_button = QPushButton('Play video')\n self.play_button.clicked.connect(self.on_play_video)\n self.commit_button = QPushButton('Commit to permanent memory')\n self.commit_button.clicked.connect(controller.on_commit)\n self.export_video_button = QPushButton('Export as video')\n self.export_video_button.clicked.connect(controller.on_export_visualization)\n self.export_binary_button = QPushButton('Export binary masks')\n self.export_binary_button.clicked.connect(controller.on_export_binary)\n\n self.forward_run_button = QPushButton('Propagate forward')\n self.forward_run_button.clicked.connect(controller.on_forward_propagation)\n self.forward_run_button.setMinimumWidth(150)\n\n self.backward_run_button = QPushButton('Propagate backward')\n self.backward_run_button.clicked.connect(controller.on_backward_propagation)\n self.backward_run_button.setMinimumWidth(150)\n\n # universal progressbar\n self.progressbar = QProgressBar()\n self.progressbar.setMinimum(0)\n self.progressbar.setMaximum(100)\n self.progressbar.setValue(0)\n self.progressbar.setMinimumWidth(200)\n\n self.reset_frame_button = QPushButton('Reset frame')\n self.reset_frame_button.clicked.connect(controller.on_reset_mask)\n self.reset_object_button = QPushButton('Reset object')\n self.reset_object_button.clicked.connect(controller.on_reset_object)\n\n # set up the LCD\n self.lcd = QTextEdit()\n self.lcd.setReadOnly(True)\n self.lcd.setMaximumHeight(28)\n self.lcd.setMaximumWidth(150)\n self.lcd.setText('{: 5d} / {: 5d}'.format(0, controller.T - 1))\n\n # current object id\n self.object_dial = QSpinBox()\n self.object_dial.setReadOnly(False)\n self.object_dial.setMinimumSize(50, 30)\n self.object_dial.setMinimum(1)\n self.object_dial.setMaximum(controller.num_objects)\n self.object_dial.editingFinished.connect(controller.on_object_dial_change)\n\n self.object_color = QLabel()\n self.object_color.setMinimumSize(100, 30)\n self.object_color.setAlignment(Qt.AlignmentFlag.AlignCenter)\n\n self.frame_name = QLabel()\n self.frame_name.setMinimumSize(100, 30)\n self.frame_name.setAlignment(Qt.AlignmentFlag.AlignLeft)\n\n # timeline slider\n self.tl_slider = QSlider(Qt.Orientation.Horizontal)\n self.tl_slider.valueChanged.connect(controller.on_slider_update)\n self.tl_slider.setMinimum(0)\n self.tl_slider.setMaximum(controller.T - 1)\n self.tl_slider.setValue(0)\n self.tl_slider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.tl_slider.setTickInterval(1)\n\n # combobox\n self.combo = QComboBox(self)\n self.combo.addItem(\"mask\")\n self.combo.addItem(\"davis\")\n self.combo.addItem(\"fade\")\n self.combo.addItem(\"light\")\n self.combo.addItem(\"popup\")\n self.combo.addItem(\"layer\")\n self.combo.setCurrentText('davis')\n self.combo.currentTextChanged.connect(controller.set_vis_mode)\n\n self.save_visualization_checkbox = QCheckBox(self)\n self.save_visualization_checkbox.toggled.connect(controller.on_save_visualization_toggle)\n self.save_visualization_checkbox.setChecked(False)\n\n self.save_soft_mask_checkbox = QCheckBox(self)\n self.save_soft_mask_checkbox.toggled.connect(controller.on_save_soft_mask_toggle)\n self.save_soft_mask_checkbox.setChecked(False)\n\n # controls for output FPS and bitrate\n self.fps_dial = QSpinBox()\n self.fps_dial.setReadOnly(False)\n self.fps_dial.setMinimumSize(40, 30)\n self.fps_dial.setMinimum(1)\n self.fps_dial.setMaximum(60)\n self.fps_dial.setValue(cfg['output_fps'])\n self.fps_dial.editingFinished.connect(controller.on_fps_dial_change)\n\n self.bitrate_dial = QSpinBox()\n self.bitrate_dial.setReadOnly(False)\n self.bitrate_dial.setMinimumSize(40, 30)\n self.bitrate_dial.setMinimum(1)\n self.bitrate_dial.setMaximum(100)\n self.bitrate_dial.setValue(cfg['output_bitrate'])\n self.bitrate_dial.editingFinished.connect(controller.on_bitrate_dial_change)\n\n # Main canvas -> QLabel\n self.main_canvas = QLabel()\n self.main_canvas.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding)\n self.main_canvas.setAlignment(Qt.AlignmentFlag.AlignCenter)\n self.main_canvas.setMinimumSize(100, 100)\n\n self.main_canvas.mousePressEvent = self.on_mouse_press\n self.main_canvas.mouseMoveEvent = self.on_mouse_motion\n self.main_canvas.setMouseTracking(True) # Required for all-time tracking\n self.main_canvas.mouseReleaseEvent = self.on_mouse_release\n\n # clearing memory\n self.clear_all_mem_button = QPushButton('Reset all memory')\n self.clear_all_mem_button.clicked.connect(controller.on_clear_memory)\n self.clear_non_perm_mem_button = QPushButton('Reset non-permanent memory')\n self.clear_non_perm_mem_button.clicked.connect(controller.on_clear_non_permanent_memory)\n\n # displaying memory usage\n self.perm_mem_gauge, self.perm_mem_gauge_layout = create_gauge('Permanent memory size')\n self.work_mem_gauge, self.work_mem_gauge_layout = create_gauge('Working memory size')\n self.long_mem_gauge, self.long_mem_gauge_layout = create_gauge('Long-term memory size')\n self.gpu_mem_gauge, self.gpu_mem_gauge_layout = create_gauge(\n 'GPU mem. (all proc, w/ caching)')\n self.torch_mem_gauge, self.torch_mem_gauge_layout = create_gauge(\n 'GPU mem. (torch, w/o caching)')\n\n # Parameters setting\n self.work_mem_min, self.work_mem_min_layout = create_parameter_box(\n 1, 100, 'Min. working memory frames', callback=controller.on_work_min_change)\n self.work_mem_max, self.work_mem_max_layout = create_parameter_box(\n 2, 100, 'Max. working memory frames', callback=controller.on_work_max_change)\n self.long_mem_max, self.long_mem_max_layout = create_parameter_box(\n 1000,\n 100000,\n 'Max. long-term memory size',\n step=1000,\n callback=controller.update_config)\n self.mem_every_box, self.mem_every_box_layout = create_parameter_box(\n 1, 100, 'Memory frame every (r)', callback=controller.update_config)\n\n # import mask/layer\n self.import_mask_button = QPushButton('Import mask')\n self.import_mask_button.clicked.connect(controller.on_import_mask)\n self.import_layer_button = QPushButton('Import layer')\n self.import_layer_button.clicked.connect(controller.on_import_layer)\n\n # Console on the GUI\n self.console = QPlainTextEdit()\n self.console.setReadOnly(True)\n self.console.setMinimumHeight(100)\n self.console.setMaximumHeight(100)\n\n # Tips for the users\n self.tips = QTextEdit()\n self.tips.setReadOnly(True)\n self.tips.setTextInteractionFlags(Qt.NoTextInteraction)\n self.tips.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding)\n with open('./gui/TIPS.md') as f:\n self.tips.setMarkdown(f.read())\n\n # navigator\n navi = QHBoxLayout()\n\n interact_subbox = QVBoxLayout()\n interact_topbox = QHBoxLayout()\n interact_botbox = QHBoxLayout()\n interact_topbox.setAlignment(Qt.AlignmentFlag.AlignCenter)\n interact_topbox.addWidget(self.lcd)\n interact_topbox.addWidget(self.play_button)\n interact_topbox.addWidget(self.reset_frame_button)\n interact_topbox.addWidget(self.reset_object_button)\n interact_botbox.addWidget(QLabel('Current object ID:'))\n interact_botbox.addWidget(self.object_dial)\n interact_botbox.addWidget(self.object_color)\n interact_botbox.addWidget(self.frame_name)\n interact_subbox.addLayout(interact_topbox)\n interact_subbox.addLayout(interact_botbox)\n interact_botbox.setAlignment(Qt.AlignmentFlag.AlignLeft)\n navi.addLayout(interact_subbox)\n\n apply_fixed_size_policy = lambda x: x.setSizePolicy(QSizePolicy.Policy.Fixed, QSizePolicy.\n Policy.Fixed)\n apply_to_all_children_widget(interact_topbox, apply_fixed_size_policy)\n apply_to_all_children_widget(interact_botbox, apply_fixed_size_policy)\n\n navi.addStretch(1)\n navi.addStretch(1)\n overlay_subbox = QVBoxLayout()\n overlay_topbox = QHBoxLayout()\n overlay_botbox = QHBoxLayout()\n overlay_topbox.setAlignment(Qt.AlignmentFlag.AlignLeft)\n overlay_botbox.setAlignment(Qt.AlignmentFlag.AlignLeft)\n overlay_topbox.addWidget(QLabel('Overlay mode'))\n overlay_topbox.addWidget(self.combo)\n overlay_topbox.addWidget(QLabel('Save soft mask during propagation'))\n overlay_topbox.addWidget(self.save_soft_mask_checkbox)\n overlay_topbox.addWidget(self.export_binary_button)\n overlay_botbox.addWidget(QLabel('Save overlay'))\n overlay_botbox.addWidget(self.save_visualization_checkbox)\n overlay_botbox.addWidget(self.export_video_button)\n overlay_botbox.addWidget(QLabel('Output FPS: '))\n overlay_botbox.addWidget(self.fps_dial)\n overlay_botbox.addWidget(QLabel('Output bitrate (Mbps): '))\n overlay_botbox.addWidget(self.bitrate_dial)\n overlay_subbox.addLayout(overlay_topbox)\n overlay_subbox.addLayout(overlay_botbox)\n navi.addLayout(overlay_subbox)\n apply_to_all_children_widget(overlay_topbox, apply_fixed_size_policy)\n apply_to_all_children_widget(overlay_botbox, apply_fixed_size_policy)\n\n navi.addStretch(1)\n control_subbox = QVBoxLayout()\n control_topbox = QHBoxLayout()\n control_botbox = QHBoxLayout()\n control_topbox.addWidget(self.commit_button)\n control_topbox.addWidget(self.forward_run_button)\n control_topbox.addWidget(self.backward_run_button)\n control_botbox.addWidget(self.progressbar)\n control_subbox.addLayout(control_topbox)\n control_subbox.addLayout(control_botbox)\n navi.addLayout(control_subbox)\n\n # Drawing area main canvas\n draw_area = QHBoxLayout()\n draw_area.addWidget(self.main_canvas, 4)\n\n # right area\n right_area = QVBoxLayout()\n right_area.setAlignment(Qt.AlignmentFlag.AlignBottom)\n right_area.addWidget(self.tips)\n # right_area.addStretch(1)\n\n # Parameters\n right_area.addLayout(self.perm_mem_gauge_layout)\n right_area.addLayout(self.work_mem_gauge_layout)\n right_area.addLayout(self.long_mem_gauge_layout)\n right_area.addLayout(self.gpu_mem_gauge_layout)\n right_area.addLayout(self.torch_mem_gauge_layout)\n right_area.addWidget(self.clear_all_mem_button)\n right_area.addWidget(self.clear_non_perm_mem_button)\n right_area.addLayout(self.work_mem_min_layout)\n right_area.addLayout(self.work_mem_max_layout)\n right_area.addLayout(self.long_mem_max_layout)\n right_area.addLayout(self.mem_every_box_layout)\n\n # import mask/layer\n import_area = QHBoxLayout()\n import_area.setAlignment(Qt.AlignmentFlag.AlignBottom)\n import_area.addWidget(self.import_mask_button)\n import_area.addWidget(self.import_layer_button)\n right_area.addLayout(import_area)\n\n # console\n right_area.addWidget(self.console)\n\n draw_area.addLayout(right_area, 1)\n\n layout = QVBoxLayout()\n layout.addLayout(draw_area)\n layout.addWidget(self.tl_slider)\n layout.addLayout(navi)\n self.setLayout(layout)\n\n # timer to play video\n self.timer = QTimer()\n self.timer.setSingleShot(False)\n self.timer.timeout.connect(controller.on_play_video_timer)\n\n # timer to update GPU usage\n self.gpu_timer = QTimer()\n self.gpu_timer.setSingleShot(False)\n self.gpu_timer.timeout.connect(controller.on_gpu_timer)\n self.gpu_timer.setInterval(2000)\n self.gpu_timer.start()\n\n # Objects shortcuts\n for i in range(1, controller.num_objects + 1):\n QShortcut(QKeySequence(str(i)),\n self).activated.connect(functools.partial(controller.hit_number_key, i))\n QShortcut(QKeySequence(f\"Ctrl+{i}\"),\n self).activated.connect(functools.partial(controller.hit_number_key, i))\n\n # <- and -> shortcuts\n QShortcut(QKeySequence(Qt.Key.Key_Left), self).activated.connect(controller.on_prev_frame)\n QShortcut(QKeySequence(Qt.Key.Key_Right), self).activated.connect(controller.on_next_frame)\n\n def resizeEvent(self, event):\n self.controller.show_current_frame()\n\n def text(self, text):\n self.console.moveCursor(QTextCursor.MoveOperation.End)\n self.console.insertPlainText(text + '\\n')\n\n def set_canvas(self, image):\n height, width, channel = image.shape\n bytesPerLine = 3 * width\n\n qImg = QImage(image.data, width, height, bytesPerLine, QImage.Format.Format_RGB888)\n self.main_canvas.setPixmap(\n QPixmap(\n qImg.scaled(self.main_canvas.size(), Qt.AspectRatioMode.KeepAspectRatio,\n Qt.TransformationMode.FastTransformation)))\n\n self.main_canvas_size = self.main_canvas.size()\n self.image_size = qImg.size()\n\n def update_slider(self, value):\n self.lcd.setText('{: 3d} / {: 3d}'.format(value, self.controller.T - 1))\n self.tl_slider.setValue(value)\n\n def pixel_pos_to_image_pos(self, x, y):\n # Un-scale and un-pad the label coordinates into image coordinates\n oh, ow = self.image_size.height(), self.image_size.width()\n nh, nw = self.main_canvas_size.height(), self.main_canvas_size.width()\n\n h_ratio = nh / oh\n w_ratio = nw / ow\n dominate_ratio = min(h_ratio, w_ratio)\n\n # Solve scale\n x /= dominate_ratio\n y /= dominate_ratio\n\n # Solve padding\n fh, fw = nh / dominate_ratio, nw / dominate_ratio\n x -= (fw - ow) / 2\n y -= (fh - oh) / 2\n\n return x, y\n\n def is_pos_out_of_bound(self, x, y):\n x, y = self.pixel_pos_to_image_pos(x, y)\n\n out_of_bound = ((x < 0) or (y < 0) or (x > self.w - 1) or (y > self.h - 1))\n\n return out_of_bound\n\n def get_scaled_pos(self, x, y):\n x, y = self.pixel_pos_to_image_pos(x, y)\n\n x = max(0, min(self.w - 1, x))\n y = max(0, min(self.h - 1, y))\n\n return x, y\n\n def forward_propagation_start(self):\n self.backward_run_button.setEnabled(False)\n self.forward_run_button.setText('Pause propagation')\n\n def backward_propagation_start(self):\n self.forward_run_button.setEnabled(False)\n self.backward_run_button.setText('Pause propagation')\n\n def pause_propagation(self):\n self.forward_run_button.setEnabled(True)\n self.backward_run_button.setEnabled(True)\n self.clear_all_mem_button.setEnabled(True)\n self.clear_non_perm_mem_button.setEnabled(True)\n self.forward_run_button.setText('Propagate forward')\n self.backward_run_button.setText('propagate backward')\n self.tl_slider.setEnabled(True)\n\n def process_events(self):\n QApplication.processEvents()\n\n def on_mouse_press(self, event):\n if self.is_pos_out_of_bound(event.position().x(), event.position().y()):\n return\n\n ex, ey = self.get_scaled_pos(event.position().x(), event.position().y())\n if event.button() == Qt.MouseButton.LeftButton:\n action = 'left'\n elif event.button() == Qt.MouseButton.RightButton:\n action = 'right'\n elif event.button() == Qt.MouseButton.MiddleButton:\n action = 'middle'\n\n self.click_fn(action, ex, ey)\n\n def on_mouse_motion(self, event):\n ex, ey = self.get_scaled_pos(event.position().x(), event.position().y())\n self.on_mouse_motion_xy(ex, ey)\n\n def on_mouse_release(self, event):\n pass\n\n def on_play_video(self):\n if self.timer.isActive():\n self.timer.stop()\n self.play_button.setText('Play video')\n else:\n self.timer.start(1000 // 30)\n self.play_button.setText('Stop video')\n\n def open_file(self, prompt):\n options = QFileDialog.Options()\n file_name, _ = QFileDialog.getOpenFileName(self,\n prompt,\n \"\",\n \"Image files (*)\",\n options=options)\n return file_name\n\n def set_object_color(self, object_id: int):\n r, g, b = davis_palette_np[object_id]\n rgb = f'rgb({r},{g},{b})'\n self.object_color.setStyleSheet('QLabel {background: ' + rgb + ';}')\n self.object_color.setText(f'{object_id}')\n\n def progressbar_update(self, progress: float):\n self.progressbar.setValue(int(progress * 100))\n self.process_events()" }, { "identifier": "ClickController", "path": "gui/click_controller.py", "snippet": "class ClickController:\n def __init__(self, checkpoint_path: str, device: str = 'cuda', max_size: int = 800):\n model = utils.load_is_model(checkpoint_path, device, cpu_dist_maps=True)\n\n # Predictor params\n zoomin_params = {\n 'skip_clicks': 1,\n 'target_size': 480,\n 'expansion_ratio': 1.4,\n }\n\n predictor_params = {\n 'brs_mode': 'f-BRS-B',\n # 'brs_mode': 'NoBRS',\n 'prob_thresh': 0.5,\n 'zoom_in_params': zoomin_params,\n 'predictor_params': {\n 'net_clicks_limit': 8,\n 'max_size': max_size,\n },\n 'brs_opt_func_params': {\n 'min_iou_diff': 1e-3\n },\n 'lbfgs_params': {\n 'maxfun': 20\n },\n 'with_flip': True,\n }\n\n self.controller = InteractiveController(model, device, predictor_params)\n self.anchored = False\n self.device = device\n\n def unanchor(self):\n self.anchored = False\n\n def interact(self, image: torch.Tensor, x: int, y: int, is_positive: bool,\n prev_mask: torch.Tensor):\n if not self.anchored:\n image = image.to(self.device, non_blocking=True)\n self.controller.set_image(image)\n self.controller.reset_predictor()\n self.anchored = True\n\n self.controller.add_click(x, y, is_positive, prev_mask=prev_mask)\n # return self.controller.result_mask\n return self.controller.probs_history[-1][1]\n # return (self.controller.probs_history[-1][1] > 0.5).float()\n\n def undo(self):\n self.controller.undo_click()\n if len(self.controller.probs_history) == 0:\n return None\n else:\n return (self.controller.probs_history[-1][1] > 0.5).float()" }, { "identifier": "PropagationReader", "path": "gui/reader.py", "snippet": "class PropagationReader(Dataset):\n def __init__(self, res_man: ResourceManager, start_ti: int, direction: Literal['forward',\n 'backward']):\n self.res_man = res_man\n self.start_ti = start_ti\n self.direction = direction\n\n # skip the first frame\n if self.direction == 'forward':\n self.start_ti += 1\n self.length = self.res_man.T - self.start_ti\n elif self.direction == 'backward':\n self.start_ti -= 1\n self.length = self.start_ti + 1\n else:\n raise NotImplementedError\n\n self.to_tensor = ToTensor()\n\n def __getitem__(self, index: int):\n if self.direction == 'forward':\n ti = self.start_ti + index\n elif self.direction == 'backward':\n ti = self.start_ti - index\n else:\n raise NotImplementedError\n\n assert 0 <= ti < self.res_man.T\n\n image = self.res_man.get_image(ti)\n image_torch = self.to_tensor(image)\n\n return image, image_torch\n\n def __len__(self):\n return self.length" }, { "identifier": "get_data_loader", "path": "gui/reader.py", "snippet": "def get_data_loader(dataset: Dataset, num_workers: int):\n if 'linux' in sys.platform:\n loader = DataLoader(dataset,\n batch_size=None,\n shuffle=False,\n num_workers=num_workers,\n collate_fn=lambda x: x)\n else:\n print(f'Non-linux platform {sys.platform} detected, using single-threaded dataloader')\n loader = DataLoader(dataset,\n batch_size=None,\n shuffle=False,\n num_workers=0,\n collate_fn=lambda x: x)\n return loader" }, { "identifier": "convert_frames_to_video", "path": "gui/exporter.py", "snippet": "def convert_frames_to_video(\n image_folder: str,\n output_path: str,\n fps: int = 24,\n bitrate: int = 1, # in Mbps\n progress_callback=None) -> None:\n images = [img for img in sorted(os.listdir(image_folder)) if img.endswith(\".jpg\")]\n frame = cv2.imread(os.path.join(image_folder, images[0]))\n height, width, layers = frame.shape\n\n output = av.open(output_path, mode=\"w\")\n\n stream = output.add_stream(\"h264\", rate=fps)\n stream.width = width\n stream.height = height\n stream.pix_fmt = \"yuv420p\"\n stream.bit_rate = bitrate * (10**7)\n\n for i, img_path in enumerate(images):\n img = cv2.imread(os.path.join(image_folder, img_path))\n frame = av.VideoFrame.from_ndarray(img, format='bgr24')\n packet = stream.encode(frame)\n output.mux(packet)\n\n if progress_callback is not None and i % 10 == 0:\n progress_callback(i / len(images))\n\n # flush\n packet = stream.encode(None)\n output.mux(packet)\n\n output.close()" }, { "identifier": "convert_mask_to_binary", "path": "gui/exporter.py", "snippet": "def convert_mask_to_binary(mask_folder: str,\n output_path: str,\n target_objects: List[int],\n progress_callback=None) -> None:\n masks = [img for img in sorted(os.listdir(mask_folder)) if img.endswith(\".png\")]\n\n for i, mask_path in enumerate(masks):\n mask = Image.open(os.path.join(mask_folder, mask_path))\n mask = np.array(mask)\n mask = np.where(np.isin(mask, target_objects), 255, 0)\n cv2.imwrite(os.path.join(output_path, mask_path), mask)\n\n if progress_callback is not None and i % 10 == 0:\n progress_callback(i / len(masks))" }, { "identifier": "download_models_if_needed", "path": "scripts/download_models.py", "snippet": "def download_models_if_needed():\n os.makedirs('weights', exist_ok=True)\n for link, md5 in _links:\n # download file if not exists with a progressbar\n filename = link.split('/')[-1]\n if not os.path.exists(os.path.join('weights', filename)) or hashlib.md5(open(os.path.join('weights', filename), 'rb').read()).hexdigest() != md5:\n print(f'Downloading {filename}...')\n r = requests.get(link, stream=True)\n total_size = int(r.headers.get('content-length', 0))\n block_size = 1024\n t = tqdm(total=total_size, unit='iB', unit_scale=True)\n with open(os.path.join('weights', filename), 'wb') as f:\n for data in r.iter_content(block_size):\n t.update(len(data))\n f.write(data)\n t.close()\n if total_size != 0 and t.n != total_size:\n raise RuntimeError('Error while downloading %s' % filename)" } ]
import os import logging import cv2 import torch import numpy as np from os import path from typing import Literal from torch import mps from torch import autocast from torchvision.transforms.functional import to_tensor from omegaconf import DictConfig, open_dict from cutie.model.cutie import CUTIE from cutie.inference.inference_core import InferenceCore from gui.interaction import * from gui.interactive_utils import * from gui.resource_manager import ResourceManager from gui.gui import GUI from gui.click_controller import ClickController from gui.reader import PropagationReader, get_data_loader from gui.exporter import convert_frames_to_video, convert_mask_to_binary from scripts.download_models import download_models_if_needed
15,849
# fix conflicts between qt5 and cv2 os.environ.pop("QT_QPA_PLATFORM_PLUGIN_PATH") try: except: print('torch.MPS not available.') log = logging.getLogger() class MainController(): def __init__(self, cfg: DictConfig) -> None: super().__init__() self.initialized = False # setting up the workspace if cfg["workspace"] is None: if cfg["images"] is not None: basename = path.basename(cfg["images"]) elif cfg["video"] is not None: basename = path.basename(cfg["video"])[:-4] else: raise NotImplementedError('Either images, video, or workspace has to be specified') cfg["workspace"] = path.join(cfg['workspace_root'], basename) # reading arguments self.cfg = cfg self.num_objects = cfg['num_objects'] self.device = cfg['device'] self.amp = cfg['amp'] # initializing the network(s) self.initialize_networks() # main components self.res_man = ResourceManager(cfg) self.processor = InferenceCore(self.cutie, self.cfg) self.gui = GUI(self, self.cfg) # initialize control info self.length: int = self.res_man.length self.interaction: Interaction = None self.interaction_type: str = 'Click' self.curr_ti: int = 0 self.curr_object: int = 1 self.propagating: bool = False self.propagate_direction: Literal['forward', 'backward', 'none'] = 'none' self.last_ex = self.last_ey = 0 # current frame info self.curr_frame_dirty: bool = False self.curr_image_np: np.ndarray = np.zeros((self.h, self.w, 3), dtype=np.uint8) self.curr_image_torch: torch.Tensor = None self.curr_mask: np.ndarray = np.zeros((self.h, self.w), dtype=np.uint8) self.curr_prob: torch.Tensor = torch.zeros((self.num_objects + 1, self.h, self.w), dtype=torch.float).to(self.device) self.curr_prob[0] = 1 # visualization info self.vis_mode: str = 'davis' self.vis_image: np.ndarray = None self.save_visualization: bool = False self.save_soft_mask: bool = False self.interacted_prob: torch.Tensor = None self.overlay_layer: np.ndarray = None self.overlay_layer_torch: torch.Tensor = None # the object id used for popup/layer overlay self.vis_target_objects = list(range(1, self.num_objects + 1)) self.load_current_image_mask() self.show_current_frame() # initialize stuff self.update_memory_gauges() self.update_gpu_gauges() self.gui.work_mem_min.setValue(self.processor.memory.min_mem_frames) self.gui.work_mem_max.setValue(self.processor.memory.max_mem_frames) self.gui.long_mem_max.setValue(self.processor.memory.max_long_tokens) self.gui.mem_every_box.setValue(self.processor.mem_every) # for exporting videos self.output_fps = cfg['output_fps'] self.output_bitrate = cfg['output_bitrate'] # set callbacks self.gui.on_mouse_motion_xy = self.on_mouse_motion_xy self.gui.click_fn = self.click_fn self.gui.show() self.gui.text('Initialized.') self.initialized = True # try to load the default overlay self._try_load_layer('./docs/uiuc.png') self.gui.set_object_color(self.curr_object) self.update_config() def initialize_networks(self) -> None: download_models_if_needed() self.cutie = CUTIE(self.cfg).eval().to(self.device) model_weights = torch.load(self.cfg.weights, map_location=self.device) self.cutie.load_weights(model_weights)
# fix conflicts between qt5 and cv2 os.environ.pop("QT_QPA_PLATFORM_PLUGIN_PATH") try: except: print('torch.MPS not available.') log = logging.getLogger() class MainController(): def __init__(self, cfg: DictConfig) -> None: super().__init__() self.initialized = False # setting up the workspace if cfg["workspace"] is None: if cfg["images"] is not None: basename = path.basename(cfg["images"]) elif cfg["video"] is not None: basename = path.basename(cfg["video"])[:-4] else: raise NotImplementedError('Either images, video, or workspace has to be specified') cfg["workspace"] = path.join(cfg['workspace_root'], basename) # reading arguments self.cfg = cfg self.num_objects = cfg['num_objects'] self.device = cfg['device'] self.amp = cfg['amp'] # initializing the network(s) self.initialize_networks() # main components self.res_man = ResourceManager(cfg) self.processor = InferenceCore(self.cutie, self.cfg) self.gui = GUI(self, self.cfg) # initialize control info self.length: int = self.res_man.length self.interaction: Interaction = None self.interaction_type: str = 'Click' self.curr_ti: int = 0 self.curr_object: int = 1 self.propagating: bool = False self.propagate_direction: Literal['forward', 'backward', 'none'] = 'none' self.last_ex = self.last_ey = 0 # current frame info self.curr_frame_dirty: bool = False self.curr_image_np: np.ndarray = np.zeros((self.h, self.w, 3), dtype=np.uint8) self.curr_image_torch: torch.Tensor = None self.curr_mask: np.ndarray = np.zeros((self.h, self.w), dtype=np.uint8) self.curr_prob: torch.Tensor = torch.zeros((self.num_objects + 1, self.h, self.w), dtype=torch.float).to(self.device) self.curr_prob[0] = 1 # visualization info self.vis_mode: str = 'davis' self.vis_image: np.ndarray = None self.save_visualization: bool = False self.save_soft_mask: bool = False self.interacted_prob: torch.Tensor = None self.overlay_layer: np.ndarray = None self.overlay_layer_torch: torch.Tensor = None # the object id used for popup/layer overlay self.vis_target_objects = list(range(1, self.num_objects + 1)) self.load_current_image_mask() self.show_current_frame() # initialize stuff self.update_memory_gauges() self.update_gpu_gauges() self.gui.work_mem_min.setValue(self.processor.memory.min_mem_frames) self.gui.work_mem_max.setValue(self.processor.memory.max_mem_frames) self.gui.long_mem_max.setValue(self.processor.memory.max_long_tokens) self.gui.mem_every_box.setValue(self.processor.mem_every) # for exporting videos self.output_fps = cfg['output_fps'] self.output_bitrate = cfg['output_bitrate'] # set callbacks self.gui.on_mouse_motion_xy = self.on_mouse_motion_xy self.gui.click_fn = self.click_fn self.gui.show() self.gui.text('Initialized.') self.initialized = True # try to load the default overlay self._try_load_layer('./docs/uiuc.png') self.gui.set_object_color(self.curr_object) self.update_config() def initialize_networks(self) -> None: download_models_if_needed() self.cutie = CUTIE(self.cfg).eval().to(self.device) model_weights = torch.load(self.cfg.weights, map_location=self.device) self.cutie.load_weights(model_weights)
self.click_ctrl = ClickController(self.cfg.ritm_weights, device=self.device)
4
2023-10-19 17:49:24+00:00
24k
ZhengyiLuo/PerpetualHumanoidControl
poselib/poselib/skeleton/tests/test_skeleton.py
[ { "identifier": "SkeletonTree", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonTree(Serializable):\n \"\"\"\n A skeleton tree gives a complete description of a rigid skeleton. It describes a tree structure\n over a list of nodes with their names indicated by strings. Each edge in the tree has a local\n translation associated with it which describes the distance between the two nodes that it\n connects. \n\n Basic Usage:\n >>> t = SkeletonTree.from_mjcf(SkeletonTree.__example_mjcf_path__)\n >>> t\n SkeletonTree(\n node_names=['torso', 'front_left_leg', 'aux_1', 'front_left_foot', 'front_right_leg', 'aux_2', 'front_right_foot', 'left_back_leg', 'aux_3', 'left_back_foot', 'right_back_leg', 'aux_4', 'right_back_foot'],\n parent_indices=tensor([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11]),\n local_translation=tensor([[ 0.0000, 0.0000, 0.7500],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, -0.2000, 0.0000],\n [ 0.2000, -0.2000, 0.0000]])\n )\n >>> t.node_names\n ['torso', 'front_left_leg', 'aux_1', 'front_left_foot', 'front_right_leg', 'aux_2', 'front_right_foot', 'left_back_leg', 'aux_3', 'left_back_foot', 'right_back_leg', 'aux_4', 'right_back_foot']\n >>> t.parent_indices\n tensor([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11])\n >>> t.local_translation\n tensor([[ 0.0000, 0.0000, 0.7500],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, -0.2000, 0.0000],\n [ 0.2000, -0.2000, 0.0000]])\n >>> t.parent_of('front_left_leg')\n 'torso'\n >>> t.index('front_right_foot')\n 6\n >>> t[2]\n 'aux_1'\n \"\"\"\n\n __example_mjcf_path__ = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"tests/ant.xml\")\n\n def __init__(self, node_names, parent_indices, local_translation):\n \"\"\"\n :param node_names: a list of names for each tree node\n :type node_names: List[str]\n :param parent_indices: an int32-typed tensor that represents the edge to its parent.\\\n -1 represents the root node\n :type parent_indices: Tensor\n :param local_translation: a 3d vector that gives local translation information\n :type local_translation: Tensor\n \"\"\"\n ln, lp, ll = len(node_names), len(parent_indices), len(local_translation)\n assert len(set((ln, lp, ll))) == 1\n self._node_names = node_names\n self._parent_indices = parent_indices.long()\n self._local_translation = local_translation\n self._node_indices = {self.node_names[i]: i for i in range(len(self))}\n\n def __len__(self):\n \"\"\" number of nodes in the skeleton tree \"\"\"\n return len(self.node_names)\n\n def __iter__(self):\n \"\"\" iterator that iterate through the name of each node \"\"\"\n yield from self.node_names\n\n def __getitem__(self, item):\n \"\"\" get the name of the node given the index \"\"\"\n return self.node_names[item]\n\n def __repr__(self):\n return (\"SkeletonTree(\\n node_names={},\\n parent_indices={},\"\n \"\\n local_translation={}\\n)\".format(\n self._indent(repr(self.node_names)),\n self._indent(repr(self.parent_indices)),\n self._indent(repr(self.local_translation)),\n ))\n\n def _indent(self, s):\n return \"\\n \".join(s.split(\"\\n\"))\n\n @property\n def node_names(self):\n return self._node_names\n\n @property\n def parent_indices(self):\n return self._parent_indices\n\n @property\n def local_translation(self):\n return self._local_translation\n\n @property\n def num_joints(self):\n \"\"\" number of nodes in the skeleton tree \"\"\"\n return len(self)\n\n @classmethod\n def from_dict(cls, dict_repr, *args, **kwargs):\n return cls(\n list(map(str, dict_repr[\"node_names\"])),\n TensorUtils.from_dict(dict_repr[\"parent_indices\"], *args, **kwargs),\n TensorUtils.from_dict(dict_repr[\"local_translation\"], *args, **kwargs),\n )\n\n def to_dict(self):\n return OrderedDict([\n (\"node_names\", self.node_names),\n (\"parent_indices\", tensor_to_dict(self.parent_indices)),\n (\"local_translation\", tensor_to_dict(self.local_translation)),\n ])\n\n @classmethod\n def from_mjcf(cls, path: str) -> \"SkeletonTree\":\n \"\"\"\n Parses a mujoco xml scene description file and returns a Skeleton Tree.\n We use the model attribute at the root as the name of the tree.\n \n :param path:\n :type path: string\n :return: The skeleton tree constructed from the mjcf file\n :rtype: SkeletonTree\n \"\"\"\n tree = ET.parse(path)\n xml_doc_root = tree.getroot()\n xml_world_body = xml_doc_root.find(\"worldbody\")\n if xml_world_body is None:\n raise ValueError(\"MJCF parsed incorrectly please verify it.\")\n # assume this is the root\n xml_body_root = xml_world_body.find(\"body\")\n if xml_body_root is None:\n raise ValueError(\"MJCF parsed incorrectly please verify it.\")\n\n node_names = []\n parent_indices = []\n local_translation = []\n\n # recursively adding all nodes into the skel_tree\n def _add_xml_node(xml_node, parent_index, node_index):\n node_name = xml_node.attrib.get(\"name\")\n # parse the local translation into float list\n pos = np.fromstring(xml_node.attrib.get(\"pos\"), dtype=float, sep=\" \")\n node_names.append(node_name)\n parent_indices.append(parent_index)\n local_translation.append(pos)\n curr_index = node_index\n node_index += 1\n for next_node in xml_node.findall(\"body\"):\n node_index = _add_xml_node(next_node, curr_index, node_index)\n return node_index\n\n _add_xml_node(xml_body_root, -1, 0)\n\n return cls(\n node_names,\n torch.from_numpy(np.array(parent_indices, dtype=np.int32)),\n torch.from_numpy(np.array(local_translation, dtype=np.float32)),\n )\n\n def parent_of(self, node_name):\n \"\"\" get the name of the parent of the given node\n\n :param node_name: the name of the node\n :type node_name: string\n :rtype: string\n \"\"\"\n return self[int(self.parent_indices[self.index(node_name)].item())]\n\n def index(self, node_name):\n \"\"\" get the index of the node\n \n :param node_name: the name of the node\n :type node_name: string\n :rtype: int\n \"\"\"\n return self._node_indices[node_name]\n\n def drop_nodes_by_names(self, node_names: List[str], pairwise_translation=None) -> \"SkeletonTree\":\n new_length = len(self) - len(node_names)\n new_node_names = []\n new_local_translation = torch.zeros(new_length, 3, dtype=self.local_translation.dtype)\n new_parent_indices = torch.zeros(new_length, dtype=self.parent_indices.dtype)\n parent_indices = self.parent_indices.numpy()\n new_node_indices: dict = {}\n new_node_index = 0\n for node_index in range(len(self)):\n if self[node_index] in node_names:\n continue\n tb_node_index = parent_indices[node_index]\n if tb_node_index != -1:\n local_translation = self.local_translation[node_index, :]\n while tb_node_index != -1 and self[tb_node_index] in node_names:\n local_translation += self.local_translation[tb_node_index, :]\n tb_node_index = parent_indices[tb_node_index]\n assert tb_node_index != -1, \"the root node cannot be dropped\"\n\n if pairwise_translation is not None:\n local_translation = pairwise_translation[tb_node_index, node_index, :]\n else:\n local_translation = self.local_translation[node_index, :]\n\n new_node_names.append(self[node_index])\n new_local_translation[new_node_index, :] = local_translation\n if tb_node_index == -1:\n new_parent_indices[new_node_index] = -1\n else:\n new_parent_indices[new_node_index] = new_node_indices[self[tb_node_index]]\n new_node_indices[self[node_index]] = new_node_index\n new_node_index += 1\n\n return SkeletonTree(new_node_names, new_parent_indices, new_local_translation)\n\n def keep_nodes_by_names(self, node_names: List[str], pairwise_translation=None) -> \"SkeletonTree\":\n nodes_to_drop = list(filter(lambda x: x not in node_names, self))\n return self.drop_nodes_by_names(nodes_to_drop, pairwise_translation)" }, { "identifier": "SkeletonState", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonState(Serializable):\n \"\"\"\n A skeleton state contains all the information needed to describe a static state of a skeleton.\n It requires a skeleton tree, local/global rotation at each joint and the root translation.\n\n Example:\n >>> t = SkeletonTree.from_mjcf(SkeletonTree.__example_mjcf_path__)\n >>> zero_pose = SkeletonState.zero_pose(t)\n >>> plot_skeleton_state(zero_pose) # can be imported from `.visualization.common`\n [plot of the ant at zero pose\n >>> local_rotation = zero_pose.local_rotation.clone()\n >>> local_rotation[2] = torch.tensor([0, 0, 1, 0])\n >>> new_pose = SkeletonState.from_rotation_and_root_translation(\n ... skeleton_tree=t,\n ... r=local_rotation,\n ... t=zero_pose.root_translation,\n ... is_local=True\n ... )\n >>> new_pose.local_rotation\n tensor([[0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 1., 0., 0.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.]])\n >>> plot_skeleton_state(new_pose) # you should be able to see one of ant's leg is bent\n [plot of the ant with the new pose\n >>> new_pose.global_rotation # the local rotation is propagated to the global rotation at joint #3\n tensor([[0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 1., 0., 0.],\n [0., 1., 0., 0.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.]])\n\n Global/Local Representation (cont. from the previous example)\n >>> new_pose.is_local\n True\n >>> new_pose.tensor # this will return the local rotation followed by the root translation\n tensor([0., 0., 0., 1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0.,\n 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.,\n 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0.,\n 0.])\n >>> new_pose.tensor.shape # 4 * 13 (joint rotation) + 3 (root translatio\n torch.Size([55])\n >>> new_pose.global_repr().is_local\n False\n >>> new_pose.global_repr().tensor # this will return the global rotation followed by the root translation instead\n tensor([0., 0., 0., 1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 1., 0., 0., 0., 0.,\n 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.,\n 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0.,\n 0.])\n >>> new_pose.global_repr().tensor.shape # 4 * 13 (joint rotation) + 3 (root translation\n torch.Size([55])\n \"\"\"\n\n def __init__(self, tensor_backend, skeleton_tree, is_local):\n self._skeleton_tree = skeleton_tree\n self._is_local = is_local\n self.tensor = tensor_backend.clone()\n\n def __len__(self):\n return self.tensor.shape[0]\n\n @property\n def rotation(self):\n if not hasattr(self, \"_rotation\"):\n self._rotation = self.tensor[..., :self.num_joints * 4].reshape(*(self.tensor.shape[:-1] + (self.num_joints, 4)))\n return self._rotation\n\n @property\n def _local_rotation(self):\n if self._is_local:\n return self.rotation\n else:\n return None\n\n @property\n def _global_rotation(self):\n if not self._is_local:\n return self.rotation\n else:\n return None\n\n @property\n def is_local(self):\n \"\"\" is the rotation represented in local frame? \n \n :rtype: bool\n \"\"\"\n return self._is_local\n\n @property\n def invariant_property(self):\n return {\"skeleton_tree\": self.skeleton_tree, \"is_local\": self.is_local}\n\n @property\n def num_joints(self):\n \"\"\" number of joints in the skeleton tree \n \n :rtype: int\n \"\"\"\n return self.skeleton_tree.num_joints\n\n @property\n def skeleton_tree(self):\n \"\"\" skeleton tree \n \n :rtype: SkeletonTree\n \"\"\"\n return self._skeleton_tree\n\n @property\n def root_translation(self):\n \"\"\" root translation \n \n :rtype: Tensor\n \"\"\"\n if not hasattr(self, \"_root_translation\"):\n self._root_translation = self.tensor[..., self.num_joints * 4:self.num_joints * 4 + 3]\n return self._root_translation\n\n @property\n def global_transformation(self):\n \"\"\" global transformation of each joint (transform from joint frame to global frame) \"\"\"\n # Forward Kinematics\n if not hasattr(self, \"_global_transformation\"):\n local_transformation = self.local_transformation\n global_transformation = []\n parent_indices = self.skeleton_tree.parent_indices.numpy()\n # global_transformation = local_transformation.identity_like()\n for node_index in range(len(self.skeleton_tree)):\n parent_index = parent_indices[node_index]\n if parent_index == -1:\n global_transformation.append(local_transformation[..., node_index, :])\n else:\n global_transformation.append(transform_mul(\n global_transformation[parent_index],\n local_transformation[..., node_index, :],\n ))\n self._global_transformation = torch.stack(global_transformation, axis=-2)\n return self._global_transformation\n\n @property\n def global_rotation(self):\n \"\"\" global rotation of each joint (rotation matrix to rotate from joint's F.O.R to global\n F.O.R) \"\"\"\n if self._global_rotation is None:\n if not hasattr(self, \"_comp_global_rotation\"):\n self._comp_global_rotation = transform_rotation(self.global_transformation)\n return self._comp_global_rotation\n else:\n return self._global_rotation\n\n @property\n def global_translation(self):\n \"\"\" global translation of each joint \"\"\"\n if not hasattr(self, \"_global_translation\"):\n self._global_translation = transform_translation(self.global_transformation)\n return self._global_translation\n\n @property\n def global_translation_xy(self):\n \"\"\" global translation in xy \"\"\"\n trans_xy_data = self.global_translation.zeros_like()\n trans_xy_data[..., 0:2] = self.global_translation[..., 0:2]\n return trans_xy_data\n\n @property\n def global_translation_xz(self):\n \"\"\" global translation in xz \"\"\"\n trans_xz_data = self.global_translation.zeros_like()\n trans_xz_data[..., 0:1] = self.global_translation[..., 0:1]\n trans_xz_data[..., 2:3] = self.global_translation[..., 2:3]\n return trans_xz_data\n\n @property\n def local_rotation(self):\n \"\"\" the rotation from child frame to parent frame given in the order of child nodes appeared\n in `.skeleton_tree.node_names` \"\"\"\n if self._local_rotation is None:\n if not hasattr(self, \"_comp_local_rotation\"):\n local_rotation = quat_identity_like(self.global_rotation)\n for node_index in range(len(self.skeleton_tree)):\n parent_index = self.skeleton_tree.parent_indices[node_index]\n if parent_index == -1:\n local_rotation[..., node_index, :] = self.global_rotation[..., node_index, :]\n else:\n local_rotation[..., node_index, :] = quat_mul_norm(\n quat_inverse(self.global_rotation[..., parent_index, :]),\n self.global_rotation[..., node_index, :],\n )\n self._comp_local_rotation = local_rotation\n return self._comp_local_rotation\n else:\n return self._local_rotation\n\n @property\n def local_transformation(self):\n \"\"\" local translation + local rotation. It describes the transformation from child frame to \n parent frame given in the order of child nodes appeared in `.skeleton_tree.node_names` \"\"\"\n if not hasattr(self, \"_local_transformation\"):\n self._local_transformation = transform_from_rotation_translation(r=self.local_rotation, t=self.local_translation)\n return self._local_transformation\n\n @property\n def local_translation(self):\n \"\"\" local translation of the skeleton state. It is identical to the local translation in\n `.skeleton_tree.local_translation` except the root translation. The root translation is\n identical to `.root_translation` \"\"\"\n if not hasattr(self, \"_local_translation\"):\n broadcast_shape = (tuple(self.tensor.shape[:-1]) + (len(self.skeleton_tree),) + tuple(self.skeleton_tree.local_translation.shape[-1:]))\n local_translation = self.skeleton_tree.local_translation.broadcast_to(*broadcast_shape).clone()\n local_translation[..., 0, :] = self.root_translation\n self._local_translation = local_translation\n return self._local_translation\n\n # Root Properties\n @property\n def root_translation_xy(self):\n \"\"\" root translation on xy \"\"\"\n if not hasattr(self, \"_root_translation_xy\"):\n self._root_translation_xy = self.global_translation_xy[..., 0, :]\n return self._root_translation_xy\n\n @property\n def global_root_rotation(self):\n \"\"\" root rotation \"\"\"\n if not hasattr(self, \"_global_root_rotation\"):\n self._global_root_rotation = self.global_rotation[..., 0, :]\n return self._global_root_rotation\n\n @property\n def global_root_yaw_rotation(self):\n \"\"\" root yaw rotation \"\"\"\n if not hasattr(self, \"_global_root_yaw_rotation\"):\n self._global_root_yaw_rotation = self.global_root_rotation.yaw_rotation()\n return self._global_root_yaw_rotation\n\n # Properties relative to root\n @property\n def local_translation_to_root(self):\n \"\"\" The 3D translation from joint frame to the root frame. \"\"\"\n if not hasattr(self, \"_local_translation_to_root\"):\n self._local_translation_to_root = (self.global_translation - self.root_translation.unsqueeze(-1))\n return self._local_translation_to_root\n\n @property\n def local_rotation_to_root(self):\n \"\"\" The 3D rotation from joint frame to the root frame. It is equivalent to \n The root_R_world * world_R_node \"\"\"\n return (quat_inverse(self.global_root_rotation).unsqueeze(-1) * self.global_rotation)\n\n def compute_forward_vector(\n self,\n left_shoulder_index,\n right_shoulder_index,\n left_hip_index,\n right_hip_index,\n gaussian_filter_width=20,\n ):\n \"\"\" Computes forward vector based on cross product of the up vector with \n average of the right->left shoulder and hip vectors \"\"\"\n global_positions = self.global_translation\n # Perpendicular to the forward direction.\n # Uses the shoulders and hips to find this.\n side_direction = (global_positions[:, left_shoulder_index].numpy() - global_positions[:, right_shoulder_index].numpy() + global_positions[:, left_hip_index].numpy() - global_positions[:, right_hip_index].numpy())\n side_direction = (side_direction / np.sqrt((side_direction**2).sum(axis=-1))[..., np.newaxis])\n\n # Forward direction obtained by crossing with the up direction.\n forward_direction = np.cross(side_direction, np.array([[0, 1, 0]]))\n\n # Smooth the forward direction with a Gaussian.\n # Axis 0 is the time/frame axis.\n forward_direction = filters.gaussian_filter1d(forward_direction, gaussian_filter_width, axis=0, mode=\"nearest\")\n forward_direction = (forward_direction / np.sqrt((forward_direction**2).sum(axis=-1))[..., np.newaxis])\n\n return torch.from_numpy(forward_direction)\n\n @staticmethod\n def _to_state_vector(rot, rt):\n state_shape = rot.shape[:-2]\n vr = rot.reshape(*(state_shape + (-1,)))\n vt = rt.broadcast_to(*state_shape + rt.shape[-1:]).reshape(*(state_shape + (-1,)))\n v = torch.cat([vr, vt], axis=-1)\n return v\n\n @classmethod\n def from_dict(cls: Type[\"SkeletonState\"], dict_repr: OrderedDict, *args, **kwargs) -> \"SkeletonState\":\n rot = TensorUtils.from_dict(dict_repr[\"rotation\"], *args, **kwargs)\n rt = TensorUtils.from_dict(dict_repr[\"root_translation\"], *args, **kwargs)\n return cls(\n SkeletonState._to_state_vector(rot, rt),\n SkeletonTree.from_dict(dict_repr[\"skeleton_tree\"], *args, **kwargs),\n dict_repr[\"is_local\"],\n )\n\n def to_dict(self) -> OrderedDict:\n return OrderedDict([\n (\"rotation\", tensor_to_dict(self.rotation)),\n (\"root_translation\", tensor_to_dict(self.root_translation)),\n (\"skeleton_tree\", self.skeleton_tree.to_dict()),\n (\"is_local\", self.is_local),\n ])\n\n @classmethod\n def from_rotation_and_root_translation(cls, skeleton_tree, r, t, is_local=True):\n \"\"\"\n Construct a skeleton state from rotation and root translation\n\n :param skeleton_tree: the skeleton tree\n :type skeleton_tree: SkeletonTree\n :param r: rotation (either global or local)\n :type r: Tensor\n :param t: root translation\n :type t: Tensor\n :param is_local: to indicate that whether the rotation is local or global\n :type is_local: bool, optional, default=True\n \"\"\"\n assert (r.dim() > 0), \"the rotation needs to have at least 1 dimension (dim = {})\".format(r.dim)\n state_vec = SkeletonState._to_state_vector(r, t)\n\n return cls(\n state_vec,\n skeleton_tree=skeleton_tree,\n is_local=is_local,\n )\n\n @classmethod\n def zero_pose(cls, skeleton_tree):\n \"\"\"\n Construct a zero-pose skeleton state from the skeleton tree by assuming that all the local\n rotation is 0 and root translation is also 0.\n\n :param skeleton_tree: the skeleton tree as the rigid body\n :type skeleton_tree: SkeletonTree\n \"\"\"\n return cls.from_rotation_and_root_translation(\n skeleton_tree=skeleton_tree,\n r=quat_identity([skeleton_tree.num_joints]),\n t=torch.zeros(3, dtype=skeleton_tree.local_translation.dtype),\n is_local=True,\n )\n\n def local_repr(self):\n \"\"\" \n Convert the skeleton state into local representation. This will only affects the values of\n .tensor. If the skeleton state already has `is_local=True`. This method will do nothing. \n\n :rtype: SkeletonState\n \"\"\"\n if self.is_local:\n return self\n return SkeletonState.from_rotation_and_root_translation(\n self.skeleton_tree,\n r=self.local_rotation,\n t=self.root_translation,\n is_local=True,\n )\n\n def global_repr(self):\n \"\"\" \n Convert the skeleton state into global representation. This will only affects the values of\n .tensor. If the skeleton state already has `is_local=False`. This method will do nothing. \n\n :rtype: SkeletonState\n \"\"\"\n if not self.is_local:\n return self\n return SkeletonState.from_rotation_and_root_translation(\n self.skeleton_tree,\n r=self.global_rotation,\n t=self.root_translation,\n is_local=False,\n )\n\n def _get_pairwise_average_translation(self):\n global_transform_inv = transform_inverse(self.global_transformation)\n p1 = global_transform_inv.unsqueeze(-2)\n p2 = self.global_transformation.unsqueeze(-3)\n\n pairwise_translation = (transform_translation(transform_mul(p1, p2)).reshape(-1, len(self.skeleton_tree), len(self.skeleton_tree), 3).mean(axis=0))\n return pairwise_translation\n\n def _transfer_to(self, new_skeleton_tree: SkeletonTree):\n old_indices = list(map(self.skeleton_tree.index, new_skeleton_tree))\n return SkeletonState.from_rotation_and_root_translation(\n new_skeleton_tree,\n r=self.global_rotation[..., old_indices, :],\n t=self.root_translation,\n is_local=False,\n )\n\n def drop_nodes_by_names(self, node_names: List[str], estimate_local_translation_from_states: bool = True) -> \"SkeletonState\":\n \"\"\" \n Drop a list of nodes from the skeleton and re-compute the local rotation to match the \n original joint position as much as possible. \n\n :param node_names: a list node names that specifies the nodes need to be dropped\n :type node_names: List of strings\n :param estimate_local_translation_from_states: the boolean indicator that specifies whether\\\n or not to re-estimate the local translation from the states (avg.)\n :type estimate_local_translation_from_states: boolean\n :rtype: SkeletonState\n \"\"\"\n if estimate_local_translation_from_states:\n pairwise_translation = self._get_pairwise_average_translation()\n else:\n pairwise_translation = None\n new_skeleton_tree = self.skeleton_tree.drop_nodes_by_names(node_names, pairwise_translation)\n return self._transfer_to(new_skeleton_tree)\n\n def keep_nodes_by_names(self, node_names: List[str], estimate_local_translation_from_states: bool = True) -> \"SkeletonState\":\n \"\"\" \n Keep a list of nodes and drop all other nodes from the skeleton and re-compute the local \n rotation to match the original joint position as much as possible. \n\n :param node_names: a list node names that specifies the nodes need to be dropped\n :type node_names: List of strings\n :param estimate_local_translation_from_states: the boolean indicator that specifies whether\\\n or not to re-estimate the local translation from the states (avg.)\n :type estimate_local_translation_from_states: boolean\n :rtype: SkeletonState\n \"\"\"\n return self.drop_nodes_by_names(\n list(filter(lambda x: (x not in node_names), self)),\n estimate_local_translation_from_states,\n )\n\n def _remapped_to(self, joint_mapping: Dict[str, str], target_skeleton_tree: SkeletonTree):\n joint_mapping_inv = {target: source for source, target in joint_mapping.items()}\n reduced_target_skeleton_tree = target_skeleton_tree.keep_nodes_by_names(list(joint_mapping_inv))\n n_joints = (\n len(joint_mapping),\n len(self.skeleton_tree),\n len(reduced_target_skeleton_tree),\n )\n assert (len(set(n_joints)) == 1), \"the joint mapping is not consistent with the skeleton trees\"\n source_indices = list(map(\n lambda x: self.skeleton_tree.index(joint_mapping_inv[x]),\n reduced_target_skeleton_tree,\n ))\n target_local_rotation = self.local_rotation[..., source_indices, :]\n return SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=reduced_target_skeleton_tree,\n r=target_local_rotation,\n t=self.root_translation,\n is_local=True,\n )\n\n def retarget_to(\n self,\n joint_mapping: Dict[str, str],\n source_tpose_local_rotation,\n source_tpose_root_translation: np.ndarray,\n target_skeleton_tree: SkeletonTree,\n target_tpose_local_rotation,\n target_tpose_root_translation: np.ndarray,\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n z_up: bool = True,\n ) -> \"SkeletonState\":\n \"\"\" \n Retarget the skeleton state to a target skeleton tree. This is a naive retarget\n implementation with rough approximations. The function follows the procedures below.\n\n Steps:\n 1. Drop the joints from the source (self) that do not belong to the joint mapping\\\n with an implementation that is similar to \"keep_nodes_by_names()\" - take a\\\n look at the function doc for more details (same for source_tpose)\n \n 2. Rotate the source state and the source tpose by \"rotation_to_target_skeleton\"\\\n to align the source with the target orientation\n \n 3. Extract the root translation and normalize it to match the scale of the target\\\n skeleton\n \n 4. Extract the global rotation from source state relative to source tpose and\\\n re-apply the relative rotation to the target tpose to construct the global\\\n rotation after retargetting\n \n 5. Combine the computed global rotation and the root translation from 3 and 4 to\\\n complete the retargeting.\n \n 6. Make feet on the ground (global translation z)\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose_local_rotation: the local rotation of the source skeleton\n :type source_tpose_local_rotation: Tensor\n \n :param source_tpose_root_translation: the root translation of the source tpose\n :type source_tpose_root_translation: np.ndarray\n \n :param target_skeleton_tree: the target skeleton tree\n :type target_skeleton_tree: SkeletonTree\n \n :param target_tpose_local_rotation: the local rotation of the target skeleton\n :type target_tpose_local_rotation: Tensor\n \n :param target_tpose_root_translation: the root translation of the target tpose\n :type target_tpose_root_translation: Tensor\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonState\n \"\"\"\n\n # STEP 0: Preprocess\n source_tpose = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=self.skeleton_tree,\n r=source_tpose_local_rotation,\n t=source_tpose_root_translation,\n is_local=True,\n )\n target_tpose = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=target_skeleton_tree,\n r=target_tpose_local_rotation,\n t=target_tpose_root_translation,\n is_local=True,\n )\n\n # STEP 1: Drop the irrelevant joints\n pairwise_translation = self._get_pairwise_average_translation()\n node_names = list(joint_mapping)\n new_skeleton_tree = self.skeleton_tree.keep_nodes_by_names(node_names, pairwise_translation)\n\n # TODO: combine the following steps before STEP 3\n source_tpose = source_tpose._transfer_to(new_skeleton_tree)\n source_state = self._transfer_to(new_skeleton_tree)\n\n source_tpose = source_tpose._remapped_to(joint_mapping, target_skeleton_tree)\n source_state = source_state._remapped_to(joint_mapping, target_skeleton_tree)\n\n # STEP 2: Rotate the source to align with the target\n new_local_rotation = source_tpose.local_rotation.clone()\n new_local_rotation[..., 0, :] = quat_mul_norm(rotation_to_target_skeleton, source_tpose.local_rotation[..., 0, :])\n\n source_tpose = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=source_tpose.skeleton_tree,\n r=new_local_rotation,\n t=quat_rotate(rotation_to_target_skeleton, source_tpose.root_translation),\n is_local=True,\n )\n\n new_local_rotation = source_state.local_rotation.clone()\n new_local_rotation[..., 0, :] = quat_mul_norm(rotation_to_target_skeleton, source_state.local_rotation[..., 0, :])\n source_state = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=source_state.skeleton_tree,\n r=new_local_rotation,\n t=quat_rotate(rotation_to_target_skeleton, source_state.root_translation),\n is_local=True,\n )\n\n # STEP 3: Normalize to match the target scale\n root_translation_diff = (source_state.root_translation - source_tpose.root_translation) * scale_to_target_skeleton\n\n # STEP 4: the global rotation from source state relative to source tpose and\n # re-apply to the target\n current_skeleton_tree = source_state.skeleton_tree\n target_tpose_global_rotation = source_state.global_rotation[0, :].clone()\n for current_index, name in enumerate(current_skeleton_tree):\n if name in target_tpose.skeleton_tree:\n target_tpose_global_rotation[current_index, :] = target_tpose.global_rotation[target_tpose.skeleton_tree.index(name), :]\n\n global_rotation_diff = quat_mul_norm(source_state.global_rotation, quat_inverse(source_tpose.global_rotation))\n new_global_rotation = quat_mul_norm(global_rotation_diff, target_tpose_global_rotation)\n\n # STEP 5: Putting 3 and 4 together\n current_skeleton_tree = source_state.skeleton_tree\n shape = source_state.global_rotation.shape[:-1]\n shape = shape[:-1] + target_tpose.global_rotation.shape[-2:-1]\n new_global_rotation_output = quat_identity(shape)\n for current_index, name in enumerate(target_skeleton_tree):\n while name not in current_skeleton_tree:\n name = target_skeleton_tree.parent_of(name)\n parent_index = current_skeleton_tree.index(name)\n new_global_rotation_output[:, current_index, :] = new_global_rotation[:, parent_index, :]\n\n source_state = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=target_skeleton_tree,\n r=new_global_rotation_output,\n t=target_tpose.root_translation + root_translation_diff,\n is_local=False,\n ).local_repr()\n\n return source_state\n\n def retarget_to_by_tpose(\n self,\n joint_mapping: Dict[str, str],\n source_tpose: \"SkeletonState\",\n target_tpose: \"SkeletonState\",\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n ) -> \"SkeletonState\":\n \"\"\" \n Retarget the skeleton state to a target skeleton tree. This is a naive retarget\n implementation with rough approximations. See the method `retarget_to()` for more information\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose: t-pose of the source skeleton\n :type source_tpose: SkeletonState\n \n :param target_tpose: t-pose of the target skeleton\n :type target_tpose: SkeletonState\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonState\n \"\"\"\n assert (len(source_tpose.shape) == 0 and len(target_tpose.shape) == 0), \"the retargeting script currently doesn't support vectorized operations\"\n return self.retarget_to(\n joint_mapping,\n source_tpose.local_rotation,\n source_tpose.root_translation,\n target_tpose.skeleton_tree,\n target_tpose.local_rotation,\n target_tpose.root_translation,\n rotation_to_target_skeleton,\n scale_to_target_skeleton,\n )" }, { "identifier": "SkeletonMotion", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonMotion(SkeletonState):\n\n def __init__(self, tensor_backend, skeleton_tree, is_local, fps, *args, **kwargs):\n self._fps = fps\n super().__init__(tensor_backend, skeleton_tree, is_local, *args, **kwargs)\n\n def clone(self):\n return SkeletonMotion(self.tensor.clone(), self.skeleton_tree, self._is_local, self._fps)\n\n @property\n def invariant_property(self):\n return {\n \"skeleton_tree\": self.skeleton_tree,\n \"is_local\": self.is_local,\n \"fps\": self.fps,\n }\n\n @property\n def global_velocity(self):\n \"\"\" global velocity \"\"\"\n curr_index = self.num_joints * 4 + 3\n return self.tensor[..., curr_index:curr_index + self.num_joints * 3].reshape(*(self.tensor.shape[:-1] + (self.num_joints, 3)))\n\n @property\n def global_angular_velocity(self):\n \"\"\" global angular velocity \"\"\"\n curr_index = self.num_joints * 7 + 3\n return self.tensor[..., curr_index:curr_index + self.num_joints * 3].reshape(*(self.tensor.shape[:-1] + (self.num_joints, 3)))\n\n @property\n def fps(self):\n \"\"\" number of frames per second \"\"\"\n return self._fps\n\n @property\n def time_delta(self):\n \"\"\" time between two adjacent frames \"\"\"\n return 1.0 / self.fps\n\n @property\n def global_root_velocity(self):\n \"\"\" global root velocity \"\"\"\n return self.global_velocity[..., 0, :]\n\n @property\n def global_root_angular_velocity(self):\n \"\"\" global root angular velocity \"\"\"\n return self.global_angular_velocity[..., 0, :]\n\n @classmethod\n def from_state_vector_and_velocity(\n cls,\n skeleton_tree,\n state_vector,\n global_velocity,\n global_angular_velocity,\n is_local,\n fps,\n ):\n \"\"\"\n Construct a skeleton motion from a skeleton state vector, global velocity and angular\n velocity at each joint.\n\n :param skeleton_tree: the skeleton tree that the motion is based on \n :type skeleton_tree: SkeletonTree\n :param state_vector: the state vector from the skeleton state by `.tensor`\n :type state_vector: Tensor\n :param global_velocity: the global velocity at each joint\n :type global_velocity: Tensor\n :param global_angular_velocity: the global angular velocity at each joint\n :type global_angular_velocity: Tensor\n :param is_local: if the rotation ins the state vector is given in local frame\n :type is_local: boolean\n :param fps: number of frames per second\n :type fps: int\n\n :rtype: SkeletonMotion\n \"\"\"\n state_shape = state_vector.shape[:-1]\n v = global_velocity.reshape(*(state_shape + (-1,)))\n av = global_angular_velocity.reshape(*(state_shape + (-1,)))\n new_state_vector = torch.cat([state_vector, v, av], axis=-1)\n return cls(\n new_state_vector,\n skeleton_tree=skeleton_tree,\n is_local=is_local,\n fps=fps,\n )\n\n @classmethod\n def from_skeleton_state(cls: Type[\"SkeletonMotion\"], skeleton_state: SkeletonState, fps: int):\n \"\"\"\n Construct a skeleton motion from a skeleton state. The velocities are estimated using second\n order guassian filter along the last axis. The skeleton state must have at least .dim >= 1\n\n :param skeleton_state: the skeleton state that the motion is based on \n :type skeleton_state: SkeletonState\n :param fps: number of frames per second\n :type fps: int\n\n :rtype: SkeletonMotion\n \"\"\"\n assert (type(skeleton_state) == SkeletonState), \"expected type of {}, got {}\".format(SkeletonState, type(skeleton_state))\n global_velocity = SkeletonMotion._compute_velocity(p=skeleton_state.global_translation, time_delta=1 / fps)\n global_angular_velocity = SkeletonMotion._compute_angular_velocity(r=skeleton_state.global_rotation, time_delta=1 / fps)\n return cls.from_state_vector_and_velocity(\n skeleton_tree=skeleton_state.skeleton_tree,\n state_vector=skeleton_state.tensor,\n global_velocity=global_velocity,\n global_angular_velocity=global_angular_velocity,\n is_local=skeleton_state.is_local,\n fps=fps,\n )\n\n @staticmethod\n def _to_state_vector(rot, rt, vel, avel):\n state_shape = rot.shape[:-2]\n skeleton_state_v = SkeletonState._to_state_vector(rot, rt)\n v = vel.reshape(*(state_shape + (-1,)))\n av = avel.reshape(*(state_shape + (-1,)))\n skeleton_motion_v = torch.cat([skeleton_state_v, v, av], axis=-1)\n return skeleton_motion_v\n\n @classmethod\n def from_dict(cls: Type[\"SkeletonMotion\"], dict_repr: OrderedDict, *args, **kwargs) -> \"SkeletonMotion\":\n rot = TensorUtils.from_dict(dict_repr[\"rotation\"], *args, **kwargs)\n rt = TensorUtils.from_dict(dict_repr[\"root_translation\"], *args, **kwargs)\n vel = TensorUtils.from_dict(dict_repr[\"global_velocity\"], *args, **kwargs)\n avel = TensorUtils.from_dict(dict_repr[\"global_angular_velocity\"], *args, **kwargs)\n return cls(\n SkeletonMotion._to_state_vector(rot, rt, vel, avel),\n skeleton_tree=SkeletonTree.from_dict(dict_repr[\"skeleton_tree\"], *args, **kwargs),\n is_local=dict_repr[\"is_local\"],\n fps=dict_repr[\"fps\"],\n )\n\n def to_dict(self) -> OrderedDict:\n return OrderedDict([\n (\"rotation\", tensor_to_dict(self.rotation)),\n (\"root_translation\", tensor_to_dict(self.root_translation)),\n (\"global_velocity\", tensor_to_dict(self.global_velocity)),\n (\"global_angular_velocity\", tensor_to_dict(self.global_angular_velocity)),\n (\"skeleton_tree\", self.skeleton_tree.to_dict()),\n (\"is_local\", self.is_local),\n (\"fps\", self.fps),\n ])\n\n @classmethod\n def from_fbx(\n cls: Type[\"SkeletonMotion\"],\n fbx_file_path,\n fbx_configs,\n skeleton_tree=None,\n is_local=True,\n fps=120,\n root_joint=\"\",\n root_trans_index=0,\n *args,\n **kwargs,\n ) -> \"SkeletonMotion\":\n \"\"\"\n Construct a skeleton motion from a fbx file (TODO - generalize this). If the skeleton tree\n is not given, it will use the first frame of the mocap to construct the skeleton tree.\n\n :param fbx_file_path: the path of the fbx file\n :type fbx_file_path: string\n :param fbx_configs: the configuration in terms of {\"tmp_path\": ..., \"fbx_py27_path\": ...}\n :type fbx_configs: dict\n :param skeleton_tree: the optional skeleton tree that the rotation will be applied to\n :type skeleton_tree: SkeletonTree, optional\n :param is_local: the state vector uses local or global rotation as the representation\n :type is_local: bool, optional, default=True\n :rtype: SkeletonMotion\n \"\"\"\n joint_names, joint_parents, transforms, fps = fbx_to_array(fbx_file_path, fbx_configs, root_joint, fps)\n # swap the last two axis to match the convention\n local_transform = euclidean_to_transform(transformation_matrix=torch.from_numpy(np.swapaxes(np.array(transforms), -1, -2),).float())\n local_rotation = transform_rotation(local_transform)\n root_translation = transform_translation(local_transform)[..., root_trans_index, :]\n joint_parents = torch.from_numpy(np.array(joint_parents)).int()\n\n if skeleton_tree is None:\n local_translation = transform_translation(local_transform).reshape(-1, len(joint_parents), 3)[0]\n skeleton_tree = SkeletonTree(joint_names, joint_parents, local_translation)\n skeleton_state = SkeletonState.from_rotation_and_root_translation(skeleton_tree, r=local_rotation, t=root_translation, is_local=True)\n if not is_local:\n skeleton_state = skeleton_state.global_repr()\n return cls.from_skeleton_state(skeleton_state=skeleton_state, fps=fps)\n\n @staticmethod\n def _compute_velocity(p, time_delta, guassian_filter=True):\n velocity = np.gradient(p.numpy(), axis=-3) / time_delta\n if guassian_filter:\n velocity = torch.from_numpy(filters.gaussian_filter1d(velocity, 2, axis=-3, mode=\"nearest\")).to(p)\n else:\n velocity = torch.from_numpy(velocity).to(p)\n\n return velocity\n\n @staticmethod\n def _compute_angular_velocity(r, time_delta: float, guassian_filter=True):\n # assume the second last dimension is the time axis\n diff_quat_data = quat_identity_like(r).to(r)\n diff_quat_data[..., :-1, :, :] = quat_mul_norm(r[..., 1:, :, :], quat_inverse(r[..., :-1, :, :]))\n diff_angle, diff_axis = quat_angle_axis(diff_quat_data)\n angular_velocity = diff_axis * diff_angle.unsqueeze(-1) / time_delta\n if guassian_filter:\n angular_velocity = torch.from_numpy(filters.gaussian_filter1d(angular_velocity.numpy(), 2, axis=-3, mode=\"nearest\"),)\n return angular_velocity\n\n def crop(self, start: int, end: int, fps: Optional[int] = None):\n \"\"\"\n Crop the motion along its last axis. This is equivalent to performing a slicing on the\n object with [..., start: end: skip_every] where skip_every = old_fps / fps. Note that the\n new fps provided must be a factor of the original fps. \n\n :param start: the beginning frame index\n :type start: int\n :param end: the ending frame index\n :type end: int\n :param fps: number of frames per second in the output (if not given the original fps will be used)\n :type fps: int, optional\n :rtype: SkeletonMotion\n \"\"\"\n if fps is None:\n new_fps = int(self.fps)\n old_fps = int(self.fps)\n else:\n new_fps = int(fps)\n old_fps = int(self.fps)\n assert old_fps % fps == 0, (\"the resampling doesn't support fps with non-integer division \"\n \"from the original fps: {} => {}\".format(old_fps, fps))\n skip_every = old_fps // new_fps\n s = slice(start, end, skip_every)\n z = self[..., s]\n\n rot = z.local_rotation if z.is_local else z.global_rotation\n rt = z.root_translation\n vel = z.global_velocity\n avel = z.global_angular_velocity\n return SkeletonMotion(\n SkeletonMotion._to_state_vector(rot, rt, vel, avel),\n skeleton_tree=z.skeleton_tree,\n is_local=z.is_local,\n fps=new_fps,\n )\n\n def retarget_to(\n self,\n joint_mapping: Dict[str, str],\n source_tpose_local_rotation,\n source_tpose_root_translation: np.ndarray,\n target_skeleton_tree: \"SkeletonTree\",\n target_tpose_local_rotation,\n target_tpose_root_translation: np.ndarray,\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n z_up: bool = True,\n ) -> \"SkeletonMotion\":\n \"\"\" \n Same as the one in :class:`SkeletonState`. This method discards all velocity information before\n retargeting and re-estimate the velocity after the retargeting. The same fps is used in the\n new retargetted motion.\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose_local_rotation: the local rotation of the source skeleton\n :type source_tpose_local_rotation: Tensor\n \n :param source_tpose_root_translation: the root translation of the source tpose\n :type source_tpose_root_translation: np.ndarray\n \n :param target_skeleton_tree: the target skeleton tree\n :type target_skeleton_tree: SkeletonTree\n \n :param target_tpose_local_rotation: the local rotation of the target skeleton\n :type target_tpose_local_rotation: Tensor\n \n :param target_tpose_root_translation: the root translation of the target tpose\n :type target_tpose_root_translation: Tensor\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonMotion\n \"\"\"\n return SkeletonMotion.from_skeleton_state(\n super().retarget_to(\n joint_mapping,\n source_tpose_local_rotation,\n source_tpose_root_translation,\n target_skeleton_tree,\n target_tpose_local_rotation,\n target_tpose_root_translation,\n rotation_to_target_skeleton,\n scale_to_target_skeleton,\n z_up,\n ),\n self.fps,\n )\n\n def retarget_to_by_tpose(\n self,\n joint_mapping: Dict[str, str],\n source_tpose: \"SkeletonState\",\n target_tpose: \"SkeletonState\",\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n z_up: bool = True,\n ) -> \"SkeletonMotion\":\n \"\"\" \n Same as the one in :class:`SkeletonState`. This method discards all velocity information before\n retargeting and re-estimate the velocity after the retargeting. The same fps is used in the\n new retargetted motion.\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose: t-pose of the source skeleton\n :type source_tpose: SkeletonState\n \n :param target_tpose: t-pose of the target skeleton\n :type target_tpose: SkeletonState\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonMotion\n \"\"\"\n return self.retarget_to(\n joint_mapping,\n source_tpose.local_rotation,\n source_tpose.root_translation,\n target_tpose.skeleton_tree,\n target_tpose.local_rotation,\n target_tpose.root_translation,\n rotation_to_target_skeleton,\n scale_to_target_skeleton,\n z_up,\n )" }, { "identifier": "plot_skeleton_state", "path": "poselib/poselib/visualization/common.py", "snippet": "def plot_skeleton_state(skeleton_state, task_name=\"\"):\n \"\"\"\n Visualize a skeleton state\n\n :param skeleton_state:\n :param task_name:\n :type skeleton_state: SkeletonState\n :type task_name: string, optional\n \"\"\"\n logger.info(\"plotting {}\".format(task_name))\n task = Draw3DSkeletonState(task_name=task_name, skeleton_state=skeleton_state)\n plotter = Matplotlib3DPlotter(task)\n plotter.show()" }, { "identifier": "plot_skeleton_motion_interactive", "path": "poselib/poselib/visualization/common.py", "snippet": "def plot_skeleton_motion_interactive(skeleton_motion, task_name=\"\"):\n \"\"\"\n Visualize a skeleton motion along its first dimension interactively.\n\n :param skeleton_motion:\n :param task_name:\n :type skeleton_motion: SkeletonMotion\n :type task_name: string, optional\n \"\"\"\n for _ in plot_skeleton_motion_interactive_base(skeleton_motion, task_name):\n pass" }, { "identifier": "Matplotlib3DPlotter", "path": "poselib/poselib/visualization/plt_plotter.py", "snippet": "class Matplotlib3DPlotter(BasePlotter):\n _fig: plt.figure # plt figure\n _ax: p3.Axes3D # plt 3d axis\n # stores artist objects for each task (task name as the key)\n _artist_cache: Dict[str, Any]\n # callables for each task primitives\n _create_impl_callables: Dict[str, Callable]\n _update_impl_callables: Dict[str, Callable]\n\n def __init__(self, task: \"BasePlotterTask\") -> None:\n self._fig = plt.figure()\n self._ax = p3.Axes3D(self._fig)\n self._artist_cache = {}\n\n self._create_impl_callables = {\n \"Draw3DLines\": self._lines_create_impl,\n \"Draw3DDots\": self._dots_create_impl,\n \"Draw3DTrail\": self._trail_create_impl,\n }\n self._update_impl_callables = {\n \"Draw3DLines\": self._lines_update_impl,\n \"Draw3DDots\": self._dots_update_impl,\n \"Draw3DTrail\": self._trail_update_impl,\n }\n self._init_lim()\n super().__init__(task)\n\n @property\n def ax(self):\n return self._ax\n\n @property\n def fig(self):\n return self._fig\n\n def show(self):\n plt.show()\n\n def _min(self, x, y):\n if x is None:\n return y\n if y is None:\n return x\n return min(x, y)\n\n def _max(self, x, y):\n if x is None:\n return y\n if y is None:\n return x\n return max(x, y)\n\n def _init_lim(self):\n self._curr_x_min = None\n self._curr_y_min = None\n self._curr_z_min = None\n self._curr_x_max = None\n self._curr_y_max = None\n self._curr_z_max = None\n\n def _update_lim(self, xs, ys, zs):\n self._curr_x_min = self._min(np.min(xs), self._curr_x_min)\n self._curr_y_min = self._min(np.min(ys), self._curr_y_min)\n self._curr_z_min = self._min(np.min(zs), self._curr_z_min)\n self._curr_x_max = self._max(np.max(xs), self._curr_x_max)\n self._curr_y_max = self._max(np.max(ys), self._curr_y_max)\n self._curr_z_max = self._max(np.max(zs), self._curr_z_max)\n\n def _set_lim(self):\n if not (\n self._curr_x_min is None\n or self._curr_x_max is None\n or self._curr_y_min is None\n or self._curr_y_max is None\n or self._curr_z_min is None\n or self._curr_z_max is None\n ):\n self._ax.set_xlim3d(self._curr_x_min, self._curr_x_max)\n self._ax.set_ylim3d(self._curr_y_min, self._curr_y_max)\n self._ax.set_zlim3d(self._curr_z_min, self._curr_z_max)\n self._init_lim()\n\n @staticmethod\n def _lines_extract_xyz_impl(index, lines_task):\n return lines_task[index, :, 0], lines_task[index, :, 1], lines_task[index, :, 2]\n\n @staticmethod\n def _trail_extract_xyz_impl(index, trail_task):\n return (\n trail_task[index : index + 2, 0],\n trail_task[index : index + 2, 1],\n trail_task[index : index + 2, 2],\n )\n\n def _lines_create_impl(self, lines_task):\n color = lines_task.color\n self._artist_cache[lines_task.task_name] = [\n self._ax.plot(\n *Matplotlib3DPlotter._lines_extract_xyz_impl(i, lines_task),\n color=color,\n linewidth=lines_task.line_width,\n alpha=lines_task.alpha\n )[0]\n for i in range(len(lines_task))\n ]\n\n def _lines_update_impl(self, lines_task):\n lines_artists = self._artist_cache[lines_task.task_name]\n for i in range(len(lines_task)):\n artist = lines_artists[i]\n xs, ys, zs = Matplotlib3DPlotter._lines_extract_xyz_impl(i, lines_task)\n artist.set_data(xs, ys)\n artist.set_3d_properties(zs)\n if lines_task.influence_lim:\n self._update_lim(xs, ys, zs)\n\n def _dots_create_impl(self, dots_task):\n color = dots_task.color\n self._artist_cache[dots_task.task_name] = self._ax.plot(\n dots_task[:, 0],\n dots_task[:, 1],\n dots_task[:, 2],\n c=color,\n linestyle=\"\",\n marker=\".\",\n markersize=dots_task.marker_size,\n alpha=dots_task.alpha,\n )[0]\n\n def _dots_update_impl(self, dots_task):\n dots_artist = self._artist_cache[dots_task.task_name]\n dots_artist.set_data(dots_task[:, 0], dots_task[:, 1])\n dots_artist.set_3d_properties(dots_task[:, 2])\n if dots_task.influence_lim:\n self._update_lim(dots_task[:, 0], dots_task[:, 1], dots_task[:, 2])\n\n def _trail_create_impl(self, trail_task):\n color = trail_task.color\n trail_length = len(trail_task) - 1\n self._artist_cache[trail_task.task_name] = [\n self._ax.plot(\n *Matplotlib3DPlotter._trail_extract_xyz_impl(i, trail_task),\n color=trail_task.color,\n linewidth=trail_task.line_width,\n alpha=trail_task.alpha * (1.0 - i / (trail_length - 1))\n )[0]\n for i in range(trail_length)\n ]\n\n def _trail_update_impl(self, trail_task):\n trails_artists = self._artist_cache[trail_task.task_name]\n for i in range(len(trail_task) - 1):\n artist = trails_artists[i]\n xs, ys, zs = Matplotlib3DPlotter._trail_extract_xyz_impl(i, trail_task)\n artist.set_data(xs, ys)\n artist.set_3d_properties(zs)\n if trail_task.influence_lim:\n self._update_lim(xs, ys, zs)\n\n def _create_impl(self, task_list):\n for task in task_list:\n self._create_impl_callables[task.task_type](task)\n self._draw()\n\n def _update_impl(self, task_list):\n for task in task_list:\n self._update_impl_callables[task.task_type](task)\n self._draw()\n\n def _set_aspect_equal_3d(self):\n xlim = self._ax.get_xlim3d()\n ylim = self._ax.get_ylim3d()\n zlim = self._ax.get_zlim3d()\n\n xmean = np.mean(xlim)\n ymean = np.mean(ylim)\n zmean = np.mean(zlim)\n\n plot_radius = max(\n [\n abs(lim - mean_)\n for lims, mean_ in ((xlim, xmean), (ylim, ymean), (zlim, zmean))\n for lim in lims\n ]\n )\n\n self._ax.set_xlim3d([xmean - plot_radius, xmean + plot_radius])\n self._ax.set_ylim3d([ymean - plot_radius, ymean + plot_radius])\n self._ax.set_zlim3d([zmean - plot_radius, zmean + plot_radius])\n\n def _draw(self):\n self._set_lim()\n self._set_aspect_equal_3d()\n self._fig.canvas.draw()\n self._fig.canvas.flush_events()\n plt.pause(0.00001)" }, { "identifier": "Draw3DSkeletonMotion", "path": "poselib/poselib/visualization/skeleton_plotter_tasks.py", "snippet": "class Draw3DSkeletonMotion(BasePlotterTask):\n def __init__(\n self,\n task_name: str,\n skeleton_motion,\n frame_index=None,\n joints_color=\"red\",\n lines_color=\"blue\",\n velocity_color=\"green\",\n angular_velocity_color=\"purple\",\n trail_color=\"black\",\n trail_length=10,\n alpha=1.0,\n ) -> None:\n super().__init__(task_name=task_name, task_type=\"3DSkeletonMotion\")\n self._trail_length = trail_length\n self._skeleton_motion = skeleton_motion\n # if frame_index is None:\n curr_skeleton_motion = self._skeleton_motion.clone()\n if frame_index is not None:\n curr_skeleton_motion.tensor = self._skeleton_motion.tensor[frame_index, :]\n # else:\n # curr_skeleton_motion = self._skeleton_motion[frame_index, :]\n self._skeleton_state_task = Draw3DSkeletonState(\n self.get_scoped_name(\"skeleton_state\"),\n curr_skeleton_motion,\n joints_color=joints_color,\n lines_color=lines_color,\n alpha=alpha,\n )\n vel_lines, avel_lines = Draw3DSkeletonMotion._get_vel_and_avel(\n curr_skeleton_motion\n )\n self._com_pos = curr_skeleton_motion.root_translation.numpy()[\n np.newaxis, ...\n ].repeat(trail_length, axis=0)\n self._vel_task = Draw3DLines(\n self.get_scoped_name(\"velocity\"),\n vel_lines,\n velocity_color,\n influence_lim=False,\n alpha=alpha,\n )\n self._avel_task = Draw3DLines(\n self.get_scoped_name(\"angular_velocity\"),\n avel_lines,\n angular_velocity_color,\n influence_lim=False,\n alpha=alpha,\n )\n self._com_trail_task = Draw3DTrail(\n self.get_scoped_name(\"com_trail\"),\n self._com_pos,\n trail_color,\n marker_size=2,\n influence_lim=True,\n alpha=alpha,\n )\n\n @property\n def name(self):\n return \"3DSkeletonMotion\"\n\n def update(self, frame_index=None, reset_trail=False, skeleton_motion=None) -> None:\n if skeleton_motion is not None:\n self._skeleton_motion = skeleton_motion\n\n curr_skeleton_motion = self._skeleton_motion.clone()\n if frame_index is not None:\n curr_skeleton_motion.tensor = curr_skeleton_motion.tensor[frame_index, :]\n if reset_trail:\n self._com_pos = curr_skeleton_motion.root_translation.numpy()[\n np.newaxis, ...\n ].repeat(self._trail_length, axis=0)\n else:\n self._com_pos = np.concatenate(\n (\n curr_skeleton_motion.root_translation.numpy()[np.newaxis, ...],\n self._com_pos[:-1],\n ),\n axis=0,\n )\n self._skeleton_state_task.update(curr_skeleton_motion)\n self._com_trail_task.update(self._com_pos)\n self._update(*Draw3DSkeletonMotion._get_vel_and_avel(curr_skeleton_motion))\n\n @staticmethod\n def _get_vel_and_avel(skeleton_motion):\n \"\"\"Get all the velocity and angular velocity lines\n \"\"\"\n pos = skeleton_motion.global_translation.numpy()\n vel = skeleton_motion.global_velocity.numpy()\n avel = skeleton_motion.global_angular_velocity.numpy()\n\n vel_lines = np.stack((pos, pos + vel * 0.02), axis=1)\n avel_lines = np.stack((pos, pos + avel * 0.01), axis=1)\n return vel_lines, avel_lines\n\n def _update(self, vel_lines, avel_lines) -> None:\n self._vel_task.update(vel_lines)\n self._avel_task.update(avel_lines)\n\n def __iter__(self):\n yield from self._skeleton_state_task\n yield from self._vel_task\n yield from self._avel_task\n yield from self._com_trail_task" }, { "identifier": "Draw3DSkeletonState", "path": "poselib/poselib/visualization/skeleton_plotter_tasks.py", "snippet": "class Draw3DSkeletonState(BasePlotterTask):\n _lines_task: Draw3DLines # sub-task for drawing lines\n _dots_task: Draw3DDots # sub-task for drawing dots\n\n def __init__(\n self,\n task_name: str,\n skeleton_state,\n joints_color: str = \"red\",\n lines_color: str = \"blue\",\n alpha=1.0,\n ) -> None:\n super().__init__(task_name=task_name, task_type=\"3DSkeletonState\")\n lines, dots = Draw3DSkeletonState._get_lines_and_dots(skeleton_state)\n self._lines_task = Draw3DLines(\n self.get_scoped_name(\"bodies\"), lines, joints_color, alpha=alpha\n )\n self._dots_task = Draw3DDots(\n self.get_scoped_name(\"joints\"), dots, lines_color, alpha=alpha\n )\n\n @property\n def name(self):\n return \"3DSkeleton\"\n\n def update(self, skeleton_state) -> None:\n self._update(*Draw3DSkeletonState._get_lines_and_dots(skeleton_state))\n\n @staticmethod\n def _get_lines_and_dots(skeleton_state):\n \"\"\"Get all the lines and dots needed to draw the skeleton state\n \"\"\"\n assert (\n len(skeleton_state.tensor.shape) == 1\n ), \"the state has to be zero dimensional\"\n dots = skeleton_state.global_translation.numpy()\n skeleton_tree = skeleton_state.skeleton_tree\n parent_indices = skeleton_tree.parent_indices.numpy()\n lines = []\n for node_index in range(len(skeleton_tree)):\n parent_index = parent_indices[node_index]\n if parent_index != -1:\n lines.append([dots[node_index], dots[parent_index]])\n lines = np.array(lines)\n return lines, dots\n\n def _update(self, lines, dots) -> None:\n self._lines_task.update(lines)\n self._dots_task.update(dots)\n\n def __iter__(self):\n yield from self._lines_task\n yield from self._dots_task" } ]
from ...core import * from ..skeleton3d import SkeletonTree, SkeletonState, SkeletonMotion from ...visualization.common import ( plot_skeleton_state, plot_skeleton_motion_interactive, ) from ...visualization.plt_plotter import Matplotlib3DPlotter from ...visualization.skeleton_plotter_tasks import ( Draw3DSkeletonMotion, Draw3DSkeletonState, ) import numpy as np import torch
18,285
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. def test_skel_tree(): skel_tree = SkeletonTree.from_mjcf( "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml", backend="pytorch", ) skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch") # assert skel_tree.to_str() == skel_tree_rec.to_str() print(skel_tree.node_names) print(skel_tree.local_translation) print(skel_tree.parent_indices) skel_state = SkeletonState.zero_pose(skeleton_tree=skel_tree) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) skel_state = skel_state.drop_nodes_by_names(["right_hip", "left_hip"]) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) def test_skel_motion(): skel_motion = SkeletonMotion.from_file( "/tmp/tmp.npy", backend="pytorch", load_context=True ) plot_skeleton_motion_interactive(skel_motion) def test_grad(): source_motion = SkeletonMotion.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\motions\\JogFlatTerrain_01_ase.npy", backend="pytorch", device="cuda:0", ) source_tpose = SkeletonState.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\fox_tpose.npy", backend="pytorch", device="cuda:0", ) target_tpose = SkeletonState.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\flex_tpose.npy", backend="pytorch", device="cuda:0", ) target_skeleton_tree = target_tpose.skeleton_tree joint_mapping = { "upArm_r": "right_shoulder", "upArm_l": "left_shoulder", "loArm_r": "right_elbow", "loArm_l": "left_elbow", "upLeg_r": "right_hip", "upLeg_l": "left_hip", "loLeg_r": "right_knee", "loLeg_l": "left_knee", "foot_r": "right_ankle", "foot_l": "left_ankle", "hips": "pelvis", "neckA": "neck", "spineA": "abdomen", } rotation_to_target_skeleton = quat_from_angle_axis( angle=torch.tensor(90.0).float(), axis=torch.tensor([1, 0, 0]).float(), degree=True, ) target_motion = source_motion.retarget_to( joint_mapping=joint_mapping, source_tpose_local_rotation=source_tpose.local_rotation, source_tpose_root_translation=source_tpose.root_translation, target_skeleton_tree=target_skeleton_tree, target_tpose_local_rotation=target_tpose.local_rotation, target_tpose_root_translation=target_tpose.root_translation, rotation_to_target_skeleton=rotation_to_target_skeleton, scale_to_target_skeleton=0.01, ) target_state = SkeletonState( target_motion.tensor[800, :], target_motion.skeleton_tree, target_motion.is_local, ) skeleton_tree = target_state.skeleton_tree root_translation = target_state.root_translation global_translation = target_state.global_translation q = np.zeros((len(skeleton_tree), 4), dtype=np.float32) q[..., 3] = 1.0 q = torch.from_numpy(q) max_its = 10000 task = Draw3DSkeletonState(task_name="", skeleton_state=target_state)
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. def test_skel_tree(): skel_tree = SkeletonTree.from_mjcf( "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml", backend="pytorch", ) skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch") # assert skel_tree.to_str() == skel_tree_rec.to_str() print(skel_tree.node_names) print(skel_tree.local_translation) print(skel_tree.parent_indices) skel_state = SkeletonState.zero_pose(skeleton_tree=skel_tree) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) skel_state = skel_state.drop_nodes_by_names(["right_hip", "left_hip"]) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) def test_skel_motion(): skel_motion = SkeletonMotion.from_file( "/tmp/tmp.npy", backend="pytorch", load_context=True ) plot_skeleton_motion_interactive(skel_motion) def test_grad(): source_motion = SkeletonMotion.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\motions\\JogFlatTerrain_01_ase.npy", backend="pytorch", device="cuda:0", ) source_tpose = SkeletonState.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\fox_tpose.npy", backend="pytorch", device="cuda:0", ) target_tpose = SkeletonState.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\flex_tpose.npy", backend="pytorch", device="cuda:0", ) target_skeleton_tree = target_tpose.skeleton_tree joint_mapping = { "upArm_r": "right_shoulder", "upArm_l": "left_shoulder", "loArm_r": "right_elbow", "loArm_l": "left_elbow", "upLeg_r": "right_hip", "upLeg_l": "left_hip", "loLeg_r": "right_knee", "loLeg_l": "left_knee", "foot_r": "right_ankle", "foot_l": "left_ankle", "hips": "pelvis", "neckA": "neck", "spineA": "abdomen", } rotation_to_target_skeleton = quat_from_angle_axis( angle=torch.tensor(90.0).float(), axis=torch.tensor([1, 0, 0]).float(), degree=True, ) target_motion = source_motion.retarget_to( joint_mapping=joint_mapping, source_tpose_local_rotation=source_tpose.local_rotation, source_tpose_root_translation=source_tpose.root_translation, target_skeleton_tree=target_skeleton_tree, target_tpose_local_rotation=target_tpose.local_rotation, target_tpose_root_translation=target_tpose.root_translation, rotation_to_target_skeleton=rotation_to_target_skeleton, scale_to_target_skeleton=0.01, ) target_state = SkeletonState( target_motion.tensor[800, :], target_motion.skeleton_tree, target_motion.is_local, ) skeleton_tree = target_state.skeleton_tree root_translation = target_state.root_translation global_translation = target_state.global_translation q = np.zeros((len(skeleton_tree), 4), dtype=np.float32) q[..., 3] = 1.0 q = torch.from_numpy(q) max_its = 10000 task = Draw3DSkeletonState(task_name="", skeleton_state=target_state)
plotter = Matplotlib3DPlotter(task)
5
2023-10-15 19:05:47+00:00
24k
e4s2023/E4S2023
our_swap_face_video_pipeline2.py
[ { "identifier": "Net3", "path": "models/networks.py", "snippet": "class Net3(nn.Module):\n \"\"\" FSEncoder + styleGAN2 \"\"\"\n\n def __init__(self,opts,):\n super(Net3, self).__init__()\n self.opts=opts\n assert self.opts.fsencoder_type in [\"psp\",\"sean\"]\n if self.opts.fsencoder_type==\"psp\":\n self.encoder = FSEncoder_PSP(mode='ir_se', opts=self.opts)\n dim_s_code = 256 + 512 + 512\n else:\n self.encoder = FSEncoder_SEAN(input_nc=3, output_nc=512,in_size = 256)\n dim_s_code = 512\n \n self.split_layer_idx = 5\n self.remaining_layer_idx = self.opts.remaining_layer_idx\n \n # 区分component 的 W+ space 的 MLPs\n self.MLPs = nn.ModuleList()\n for i in range(self.opts.num_seg_cls):\n self.MLPs.append(\n LocalMLP(\n dim_component=dim_s_code,\n dim_style=512,\n num_w_layers= self.remaining_layer_idx if self.remaining_layer_idx != 17 else 18\n )\n )\n \n self.G = Generator(size=self.opts.out_size, style_dim=512, n_mlp=8, split_layer_idx = self.split_layer_idx, remaining_layer_idx = self.remaining_layer_idx)\n\n # styleGAN的参数是否更新\n if not self.opts.train_G:\n for param in self.G.parameters():\n param.requires_grad = False\n # 注意,styleGAN的8层FC是永远不更新的\n else:\n for param in self.G.style.parameters():\n param.requires_grad = False\n \n # styleGAN的倒数几层不更新 (包括convs 和 ToRGBs)\n if self.remaining_layer_idx != 17:\n for param in self.G.convs[-(17-self.remaining_layer_idx):].parameters():\n param.requires_grad = False\n for param in self.G.to_rgbs[-(17-self.remaining_layer_idx)//2 - 1:].parameters():\n param.requires_grad = False\n \n \n def forward(self, img,mask, resize=False, randomize_noise=True,return_latents=False):\n \"\"\"输入一张RGB图和对应的mask,\n (1) encoder 得到对应的F/S空间的特征,\n (2) 再送到styleGAN得到一张输出的图片\n\n Args:\n img (Tensor): 一对RGB图, each with shape [bs,3,1024,1024]\n mask ([type]): 一对RGB图对应的mask图, each with shape [bs,#seg_cls,1024,1024]\n resize (bool, optional): G生成的图片是否 resize. Defaults to True.\n randomize_noise (bool, optional): 是否加入随机噪声. Defaults to True.\n return_latents (bool, optional): 是否返回style codes. Defaults to False.\n\n Returns:\n [type]: [description]\n \"\"\"\n if self.opts.fsencoder_type==\"psp\":\n codes_vector, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n else:\n codes_vector, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n codes=[]\n bs, num_comp = codes_vector.size(0), codes_vector.size(1)\n for i in range(num_comp):\n codes.append(self.MLPs[i](codes_vector[:,i,:])) \n codes=torch.stack(codes,dim=1) # [bs, #seg_cls, 13, 512]\n \n \n # # 剩下的几层不用分component\n # remaining_codes=[]\n # for i in range(len(self.remain_MLPs)):\n # remaining_codes.append(self.remain_MLPs[i](codes_vector.view(bs, -1)))\n # remaining_codes = torch.stack(remaining_codes,dim=1) # [bs,5,512]\n\n # normalize with respect to the center of an average face\n if self.opts.start_from_latent_avg:\n if self.opts.learn_in_w:\n # 为了保持接口统一,将后3层的 style code 也扩展出一个 #seg_cls 维度\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1) \n codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n if self.remaining_layer_idx != 17:\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1, 1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1, 1) \n codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n codes = codes + self.latent_avg.repeat(codes.shape[0],codes.shape[1],1, 1)\n \n # 1. 完全使用 style code i.e., G(w)\n images1, result_latent, structure_feats_GT = self.G([codes], structure_feats, mask, input_is_latent=True,\n randomize_noise=randomize_noise,return_latents=return_latents,\n use_structure_code=False)\n \n \n # # 2. 使用 style code 和 strcture code i.e., G(w,F)\n # images2, _ , _ = self.G([codes], structure_feats, mask, input_is_latent=True,\n # randomize_noise=randomize_noise,return_latents=return_latents,\n # use_structure_code=True)\n \n if return_latents:\n return images1, structure_feats_GT, result_latent\n else:\n return images1, structure_feats_GT\n\n def get_style(self, img, mask):\n \"\"\"输入一张RGB图和对应的mask, 得到各个component 对应的style codes\n \n Args:\n img (Tensor): RGB图, each with shape [bs,3,1024,1024]\n mask (Tensor): RGB图对应的mask图, each with shape [bs,#seg_cls,1024,1024]\n \n Returns:\n structure_feats(Tensor): 图片的structure code, with shape [bs,512,32,32], 注意,这里其实是相对于StyleGAN第层输出的残差\n all_codes(Tensor): 各个component 对应的style codes, with shape [bs,#comp,18,512]。\n !!! 注意,前7层的各个compnent其实没有意义,只是为了统一接口让shape保持一致,用的时候只用第1个即可 !!!\n \"\"\"\n if self.opts.fsencoder_type==\"psp\":\n codes_vector, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n else:\n codes_vector, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n codes=[]\n bs, num_comp = codes_vector.size(0), codes_vector.size(1)\n for i in range(num_comp):\n codes.append(self.MLPs[i](codes_vector[:,i,:])) \n codes=torch.stack(codes,dim=1) # [bs, #seg_cls, 11,512]\n\n # # 剩下的几层不用分component\n # remaining_codes=[]\n # for i in range(len(self.remain_MLPs)):\n # remaining_codes.append(self.remain_MLPs[i](codes_vector.view(bs, -1)))\n # remaining_codes = torch.stack(remaining_codes,dim=1) # [bs,5,512]\n\n # normalize with respect to the center of an average face\n if self.opts.start_from_latent_avg:\n if self.opts.learn_in_w:\n # 为了保持接口统一,将后3层的 style code 也扩展出一个 #seg_cls 维度\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1) \n style_codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n if self.remaining_layer_idx != 17:\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1, 1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1, 1) \n style_codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n style_codes = codes + self.latent_avg.repeat(codes.shape[0],codes.shape[1],1, 1)\n \n return structure_feats, style_codes\n\n def get_style_vectors(self, img, mask):\n \"\"\"输入一张RGB图和对应的mask, 得到各个component 对应的style vectors\n \n Args:\n img (Tensor): RGB图, each with shape [bs,3,1024,1024]\n mask (Tensor): RGB图对应的mask图, each with shape [bs,#seg_cls,1024,1024]\n \n Returns:\n style_vectors(Tensor): with shape [bs,#seg_cls,512]\n \"\"\"\n if self.opts.fsencoder_type==\"psp\":\n style_vectors, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n else:\n style_vectors, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n \n return style_vectors, structure_feats\n \n def cal_style_codes(self,style_vectors):\n \"\"\"根据每个compnent的 style vector转到styleGAN的style code\"\"\"\n \n codes=[]\n bs, num_comp = style_vectors.size(0), style_vectors.size(1)\n for i in range(num_comp):\n codes.append(self.MLPs[i](style_vectors[:,i,:])) \n codes=torch.stack(codes,dim=1) # [bs, #seg_cls, 11,512]\n\n # # 剩下的几层不用分component\n # remaining_codes=[]\n # for i in range(len(self.remain_MLPs)):\n # remaining_codes.append(self.remain_MLPs[i](style_vectors.view(bs, -1)))\n # remaining_codes = torch.stack(remaining_codes,dim=1) # [bs,5,512]\n\n # normalize with respect to the center of an average face\n if self.opts.start_from_latent_avg:\n if self.opts.learn_in_w:\n # 为了保持接口统一,将后3层的 style code 也扩展出一个 #seg_cls 维度\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1) \n style_codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n if self.remaining_layer_idx != 17:\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1, 1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1, 1) \n style_codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n style_codes = codes + self.latent_avg.repeat(codes.shape[0],codes.shape[1],1, 1)\n \n return style_codes\n\n def gen_img(self, struc_codes, style_codes, mask, randomize_noise=True, noise=None, return_latents=False):\n \"\"\"输入一张mask 和 对应各components的style codes,以及这张图片的structure code, 生成一张图片\n \n Args:\n style_codes (Tensor): 各个component 对应的style codes, with shape [bs,#comp,18,512]\n struc_codes (Tensor)\n mask (Tensor): mask图, with shape [bs,#seg_cls,1024,1024]\n \n randomize_noise (bool, optional): 是否加入随机噪声. Defaults to True.\n return_latents (bool, optional): 是否返回style codes. Defaults to False.\n\n Returns:\n [type]: [description]\n \"\"\"\n \n images, result_latent, structure_feats = self.G([style_codes], struc_codes, mask, input_is_latent=True,\n randomize_noise=randomize_noise,noise=noise,return_latents=return_latents,\n use_structure_code=False)\n\n if return_latents:\n return images, result_latent, structure_feats\n else:\n return images,-1, structure_feats" }, { "identifier": "get_transforms", "path": "datasets/dataset.py", "snippet": "def get_transforms(normalize=True, toTensor=True):\n transform_list = []\n if toTensor:\n transform_list += [transforms.ToTensor()]\n\n if normalize:\n transform_list += [transforms.Normalize((0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5))]\n return transforms.Compose(transform_list)" }, { "identifier": "TO_TENSOR", "path": "datasets/dataset.py", "snippet": "TO_TENSOR = transforms.ToTensor()" }, { "identifier": "NORMALIZE", "path": "datasets/dataset.py", "snippet": "NORMALIZE = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))" }, { "identifier": "torch_utils", "path": "utils/torch_utils.py", "snippet": "def saveTensorToFile(tensor, save_path):\ndef interpolate(img, size):\ndef readImgAsTensor(img_path, gray=False, to_tensor=True, size=1024):\ndef featMap2im(var):\ndef tensor2im(var, is_zero_center: bool = True, ):\ndef im2tensor(var, add_c_dim: bool = False, norm: bool = True, std: bool = False):\ndef tensor2map(var,shown_mask_indices=None):\ndef vis_mask_in_color(mask):\ndef get_colors():\ndef vis_faces(log_hooks1):\ndef vis_faces_no_id(hooks_dict1, fig, gs, i):\ndef aggregate_loss_dict(agg_loss_dict):\ndef labelMap2OneHot(label, num_cls):\ndef remove_module_prefix(state_dict,prefix):\ndef requires_grad(model, flag=True):\ndef accumulate(model1, model2, decay=0.999):\n C, H, W = tensor.size()" }, { "identifier": "VideoSwapPTICoach", "path": "training/video_swap_ft_coach.py", "snippet": "class VideoSwapPTICoach:\n def __init__(self, opts, e4s_net=None, num_targets=50, erode=False,\n ):\n self.opts = opts\n\n self.erode = erode\n self.device = torch.device(\"cuda\", 0)\n # self.opts.device = self.device\n \n # 定义数据集\n self.dataset = self.configure_dataset(num_targets)\n if num_targets == -1:\n num_targets = len(self.dataset)\n self.num_targets = num_targets\n \n # 定义 loss function\n self.mse_loss = nn.MSELoss().to(self.device).eval()\n if self.opts.lpips_lambda > 0:\n self.lpips_loss = LPIPS(net_type='alex').to(self.device).eval()\n if self.opts.id_lambda > 0:\n self.id_loss = IDLoss(self.opts).to(self.device).eval()\n if self.opts.face_parsing_lambda > 0:\n self.face_parsing_loss = FaceParsingLoss(self.opts).to(self.device).eval()\n \n # 初始化网络\n if e4s_net is None:\n self.net = Net3(self.opts)\n # print(self.device)\n self.net = nn.SyncBatchNorm.convert_sync_batchnorm(self.net)\n self.net = self.net.to(self.device)\n else:\n self.net = e4s_net\n \n # 加载整个模型预训练好的参数,作为初始化\n assert self.opts.checkpoint_path is not None, \"必须提供预训练好的参数!\"\n ckpt_dict = torch.load(self.opts.checkpoint_path)\n self.net.latent_avg = ckpt_dict['latent_avg'].to(self.device)\n self.net.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict[\"state_dict\"],prefix=\"module.\"))\n print(\"Load pre-trained model success!\") \n \n # 初始化优化器\n self.optimizer = self.configure_optimizer()\n\n # # 保存优化后模型的地址\n # self.checkpoint_dir = os.path.join(self.opts.exp_dir, 'checkpoints')\n # os.makedirs(self.checkpoint_dir, exist_ok=True)\n \n # Initialize tensorborad logger\n self.log_dir = os.path.join(self.opts.exp_dir, 'logs_lr%f_iters%d_erode%d_run2'%(self.opts.pti_learning_rate, self.opts.max_pti_steps, self.opts.erode_radius))\n os.makedirs(self.log_dir, exist_ok=True)\n self.logger = SummaryWriter(logdir = self.log_dir)\n \n def configure_dataset(self, num_targets: int = -1):\n save_dir = self.opts.exp_dir\n ds = VideoFaceSwappingDataset(\n driven = sorted(glob.glob(os.path.join(save_dir,\"imgs\", \"D_*.png\")))[:num_targets],\n driven_recolor = sorted(glob.glob(os.path.join(save_dir, \"imgs\", \"D_recolor_*.png\")))[:num_targets],\n driven_mask = sorted(glob.glob(os.path.join(save_dir,\"mask\",\"D_mask_*.png\")))[:num_targets],\n driven_style_vector = sorted(glob.glob(os.path.join(save_dir,\"styleVec\",\"D_style_vec_*.pt\")))[:num_targets],\n target = sorted(glob.glob(os.path.join(save_dir,\"imgs\", \"T_*.png\")))[:num_targets],\n target_mask = sorted(glob.glob(os.path.join(save_dir,\"mask\",\"T_mask_*.png\")))[:num_targets],\n target_style_vector = sorted(glob.glob(os.path.join(save_dir,\"styleVec\",\"T_style_vec_*.pt\")))[:num_targets],\n img_transform=transforms.Compose([TO_TENSOR, NORMALIZE]),\n label_transform=transforms.Compose([TO_TENSOR])\n ) \n \n return ds\n \n def configure_optimizer(self):\n self.params = list(filter(lambda p: p.requires_grad ,list(self.net.parameters())))\n if self.opts.optim_name == 'adam':\n optimizer = torch.optim.Adam(self.params, lr=self.opts.pti_learning_rate)\n else:\n optimizer = Ranger(self.params, lr=self.opts.pti_learning_rate)\n return optimizer\n \n def calc_loss(self, img, img_recon, foreground_mask=None):\n \"\"\"\n img: target 图片\n img_recon: 当前得到的结果 \n \"\"\"\n loss_dict = {}\n loss = 0.0\n id_logs = None\n \n if foreground_mask is not None:\n img_recon = img_recon * foreground_mask\n img = img * foreground_mask \n \n if self.opts.id_lambda > 0:\n loss_id, sim_improvement, id_logs = self.id_loss(img_recon, img)\n loss_dict['loss_id'] = float(loss_id)\n loss_dict['id_improve'] = float(sim_improvement)\n loss += loss_id * self.opts.id_lambda\n if self.opts.l2_lambda > 0:\n loss_l2 = F.mse_loss(img_recon, img)\n loss_dict['loss_l2'] = float(loss_l2)\n loss += loss_l2 * self.opts.l2_lambda\n if self.opts.lpips_lambda > 0:\n loss_lpips = 0\n for i in range(3):\n loss_lpips_1 = self.lpips_loss(\n F.adaptive_avg_pool2d(img_recon,(1024//2**i,1024//2**i)), \n F.adaptive_avg_pool2d(img,(1024//2**i,1024//2**i))\n )\n loss_lpips += loss_lpips_1\n \n loss_dict['loss_lpips'] = float(loss_lpips)\n loss += loss_lpips * self.opts.lpips_lambda\n if self.opts.face_parsing_lambda > 0:\n loss_face_parsing, face_parsing_sim_improvement = self.face_parsing_loss(img_recon, img)\n loss_dict['loss_face_parsing'] = float(loss_face_parsing)\n loss_dict['face_parsing_improve'] = float(face_parsing_sim_improvement)\n loss += loss_face_parsing * self.opts.face_parsing_lambda\n \n loss_dict['loss'] = float(loss)\n return loss, loss_dict, id_logs\n \n @torch.no_grad()\n def recon_driven(self):\n self.net.eval()\n\n print('Reconstrcution driven videos...')\n for idx, (driven_image, driven_m, driven_s_v,\n target_image, target_m, target_s_v,\n driven_recolor_pil, driven_pil, target_pil) in tqdm(enumerate(self.dataset)): # 从idx = 0 开始\n \n driven_m = (driven_m*255).long().to(self.opts.device).unsqueeze(0)\n driven_onehot = torch_utils.labelMap2OneHot(driven_m, num_cls=self.opts.num_seg_cls)\n driven_style_vector = driven_s_v.to(self.opts.device).float()\n driven_style_code = self.net.cal_style_codes(driven_style_vector)\n \n recon_i, _, structure_feats_i = self.net.gen_img(torch.zeros(1,512,32,32).to(self.opts.device), driven_style_code, driven_onehot)\n # randomize_noise=False,noise=noise)\n torch_utils.tensor2im(recon_i[0]).save(os.path.join(self.opts.exp_dir, \"imgs\", \"D_finetuned_recon_%04d.png\"%idx))\n\n def train(self):\n self.train_e4s()\n\n def train_e4s(self):\n self.net.train()\n \n print('Fine tuning the network...')\n for step in trange(self.opts.max_pti_steps):\n step_loss_dict = defaultdict(list)\n t = (step + 1) / self.opts.max_pti_steps\n\n verbose_recon = None\n for idx, (driven_image, driven_m, driven_s_v,\n target_image, target_m, target_s_v,\n driven_recolor_pil, driven_pil, target_pil) in enumerate(tqdm(self.dataset,\n desc=f\"tuning e4s_g {step}/{self.opts.max_pti_steps}\",\n position=0,\n )): # 从idx = 0 开始\n driven_m = (driven_m*255).long().to(self.opts.device).unsqueeze(0)\n\n if self.erode:\n driven_pil = Image.fromarray(np.transpose((255*(driven_image.numpy()+1)/2).astype(np.uint8), (1,2,0)))\n driven_m_np = driven_m[0,0,:,:].cpu().numpy().astype(np.uint8)\n driven_m_eroded, erode_verbose = erode_mask(driven_m_np, driven_pil , radius=self.opts.erode_radius, verbose=True)\n driven_m = torch.from_numpy(driven_m_eroded).long().to(self.opts.device).unsqueeze(0).unsqueeze(0)\n\n driven_image = driven_image.to(self.opts.device).float().unsqueeze(0)\n driven_onehot = torch_utils.labelMap2OneHot(driven_m, num_cls=self.opts.num_seg_cls)\n driven_style_vector = driven_s_v.to(self.opts.device).float()\n driven_style_code = self.net.cal_style_codes(driven_style_vector)\n\n zero_latent = torch.zeros((1,512,32,32), requires_grad=False).to(self.opts.device)\n recon_i, _, structure_feats_i = self.net.gen_img(zero_latent, driven_style_code, driven_onehot)\n # in [-1,1]\n\n ''' also guided by recolor net '''\n recolor_i = torch_utils.im2tensor(driven_recolor_pil, std=False)\n\n mask_bg_and_hair = logical_or_reduce(*[driven_m == clz for clz in [0, 4, 11]])\n is_foreground = torch.logical_not(mask_bg_and_hair)\n foreground_mask = is_foreground.float()\n foreground_mask = F.interpolate(foreground_mask, (1024, 1024), mode='bilinear', align_corners=False)\n if self.erode:\n loss, loss_dict, id_logs = self.calc_loss(driven_image, recon_i, foreground_mask=foreground_mask)\n else:\n loss, loss_dict, id_logs = self.calc_loss(driven_image, recon_i)\n\n loss_recolor, _, _ = self.calc_loss(recolor_i, recon_i, foreground_mask=foreground_mask)\n loss += loss_recolor * self.opts.recolor_lambda # default: 0.5?\n \n if idx == 0:\n verbose_recon = np.array(torch_utils.tensor2im(recon_i[0]))\n\n step_loss_dict['loss'].append(loss.item())\n for k,v in loss_dict.items():\n if \"loss_\" in k:\n step_loss_dict[k].append(v)\n \n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n \n # 记录视频序列中第一张图片在每个step后的结果\n self.logger.add_image(\"image_recon\", verbose_recon, step, dataformats='HWC')\n\n # 记录 每个step 视频中所有帧的 平均loss\n log_dict = {}\n for key, losses in step_loss_dict.items():\n loss_mean = sum(losses) / len(losses)\n loss_max = max(losses)\n \n self.logger.add_scalar(f'loss_mean/{key}', loss_mean, step)\n self.logger.add_scalar(f'loss_max/{key}', loss_max, step)\n \n if step+1 == 100:\n save_dict = self.get_save_dict()\n torch.save(save_dict, os.path.join(self.opts.exp_dir, \"finetuned_G_lr%f_iters%d.pth\"%(self.opts.pti_learning_rate, step+1)))\n \n print('Finished fine-tuning e4s generator!')\n\n def checkpoint_me(self):\n save_name = 'finetuned_model_%d.pt'%self.opts.max_pti_steps\n save_dict = self.get_save_dict()\n checkpoint_path = os.path.join(self.checkpoint_dir, save_name)\n torch.save(save_dict, checkpoint_path)\n \n def get_save_dict(self):\n save_dict = {\n 'state_dict': self.net.state_dict(),\n 'opts': vars(self.opts),\n }\n # save the latent avg in state_dict for inference if truncation of w was used during training\n if self.opts.start_from_latent_avg:\n save_dict['latent_avg'] = self.net.latent_avg\n \n return save_dict\n\n def freeze_e4s_g(self):\n self.net.requires_grad_(False)" }, { "identifier": "OurSwapFacePipelineOptions", "path": "options/our_swap_face_pipeline_options.py", "snippet": "class OurSwapFacePipelineOptions:\n\n\tdef __init__(self):\n\t\tself.parser = ArgumentParser()\n\t\tself.initialize()\n\n\tdef initialize(self):\n\t\tself.parser.add_argument('--exp_dir', type=str, default=\"./tmp_exp\", help='Path to experiment output directory')\n\t\tself.parser.add_argument('--num_seg_cls', type=int, default=12,help='Segmentation mask class number')\n\t\tself.parser.add_argument('--source_frame_name', type=str, default=\"28494\", help='source frame number')\n\t\tself.parser.add_argument('--target_video_name', type=str, default=\"874\",help='target video name')\n # ================= 模型设置 相关 =====================\n\t\tself.parser.add_argument('--out_size', type=int, default=1024, help='output image size') \n\t\tself.parser.add_argument('--fsencoder_type', type=str, default=\"psp\", help='FS Encode网络类型') \n\t\tself.parser.add_argument('--remaining_layer_idx', type=int, default=13, help='剩余的几层不用mask')\n\t\tself.parser.add_argument('--outer_dilation', type=int, default=15, help='dilation 的宽度')\n\t\tself.parser.add_argument('--erode_radius', type=int, default=3, help='erode 的宽度')\n \n # ================= 数据集 相关 =====================\n\t\tself.parser.add_argument('--batch_size', default=1, type=int, help='Batch size for training')\n\t\tself.parser.add_argument('--workers', default=4, type=int, help='Number of train dataloader workers')\n\t\tself.parser.add_argument('--target_images_dir', default='/apdcephfs/share_1290939/zhianliu/py_projects/our_editing_swappingFace_video/01_2_face', type=str)\n\t\tself.parser.add_argument('--driven_images_dir', default='/apdcephfs/share_1290939/zhianliu/py_projects/our_editing_swappingFace_video/29698_to_01/driven', type=str)\n\t\tself.parser.add_argument('--UI_edit_masks_dir', default='/apdcephfs/share_1290939/zhianliu/py_projects/our_editing_swappingFace_video/29698_to_01/edit_mask', type=str)\n\t\tself.parser.add_argument('--swapped_style_vectors_dir', default='/apdcephfs/share_1290939/zhianliu/py_projects/our_editing_swappingFace_video/29698_to_01/FFHQ_model_video_swap_styleVec', type=str)\n\n # ================= 训练 相关 =====================\n\t\tself.parser.add_argument('--train_G', default=True, type=bool, help='Whether to train the model')\n\t\tself.parser.add_argument('--pti_learning_rate', default=1e-3, type=float, help='PTI learning rate')\n\t\tself.parser.add_argument('--stiching_learning_rate', default=1e-2, type=float, help='Stiching learning rate')\n\t\tself.parser.add_argument('--optim_name', default='adam', type=str, help='Which optimizer to use') \n\t\tself.parser.add_argument('--max_pti_steps', default=0, type=int, help='PTI finetune steps')\n\t\tself.parser.add_argument('--max_stiching_steps', default=100, type=int, help='Stiching finetune steps') \n\t\tself.parser.add_argument('--device', default='cuda:0', type=str, help='Which GPU(s) to use')\n \n # ================= Loss 相关 =====================\n\t\tself.parser.add_argument('--lpips_lambda', default=0.8, type=float, help='LPIPS loss multiplier factor')\n\t\tself.parser.add_argument('--id_lambda', default=0.1, type=float, help='ID loss multiplier factor')\n\t\tself.parser.add_argument('--id_loss_multiscale', default=True, type=bool, help='Whether to apply multi scale in ID loss') \n\t\tself.parser.add_argument('--face_parsing_lambda', default=0.1, type=float, help='Face parsing loss multiplier factor')\n\t\tself.parser.add_argument('--l2_lambda', default=1.0, type=float, help='L2 loss multiplier factor')\n\t\tself.parser.add_argument('--recolor_lambda', default=5.0, type=float, help='Recolor reg loss multiplier factor')\n \n # ================== 预训练模型 相关 ==================\n\t\tself.parser.add_argument('--learn_in_w', action='store_true', help='Whether to learn in w space instead of w+')\n # 是否从styleGAN的均值开始学习\n\t\tself.parser.add_argument('--start_from_latent_avg', action='store_true',default=True, help='Whether to add average latent vector to generate codes from encoder.')\n # styleGAN输出图片大小\n\t\tself.parser.add_argument('--output_size', default=1024, type=int, help='Output size of generator')\n\t\tself.parser.add_argument('--n_styles', default=18, type=int, help='StyleGAN层数')\n \n # ir_se50 预训练权重, for id_loss\n\t\t# self.parser.add_argument('--ir_se50_path', default='/apdcephfs/share_1290939/zhianliu/pretrained_models/pixel2style2pixel/model_ir_se50.pth', type=str, help='Path to ir_se50 model weights')\n\t\tself.parser.add_argument('--ir_se50_path',\n\t\t\t\t\t\t\t\t default='./pretrained/pixel2style2pixel/model_ir_se50.pth',\n\t\t\t\t\t\t\t\t type=str, help='Path to ir_se50 model weights')\n\t\t# self.parser.add_argument('--face_parsing_model_path', default='/apdcephfs/share_1290939/zhianliu/pretrained_models/CelebA-Mask-HQ-faceParser/model.pth', type=str, help='Path to face parsing model weights')\n\t\tself.parser.add_argument('--face_parsing_model_path',\n\t\t\t\t\t\t\t\t default='./pretrained/faceseg/model.pth',\n\t\t\t\t\t\t\t\t type=str, help='Path to face parsing model weights')\n\t\t# self.parser.add_argument('--checkpoint_path', default='/apdcephfs/share_1290939/zhianliu/running_results/our_editing/work_dirs/v_15_hybrid_stage1_seg12_finetuneGD_8A100_pspHyperParas_remainLyrIdx13_flip_FFHQ_300KIters/checkpoints/iteration_300000_belowPyTorch1_6.pt', type=str, help='Path to model checkpoint')\n\t\t# self.parser.add_argument('--checkpoint_path', default='/apdcephfs/share_1290939/zhianliu/running_results/our_editing/work_dirs/v_15_hybrid_stage1_seg12_finetuneGD_8A100_pspHyperParas_remainLyrIdx13_flip_FFHQ_300KIters/checkpoints/iteration_300000.pt', type=str, help='Path to model checkpoint')\n\t\tself.parser.add_argument('--checkpoint_path', default='./pretrained/E4S/iteration_300000.pt', type=str, help='Path to model checkpoint')\n\t\tself.parser.add_argument('--PTI_checkpoint_path', default='/apdcephfs/share_1290939/zhianliu/py_projects/pytorch-DDP-demo/work_dirs/v_18_video_swapping/musk_to_874/finetuned_G_lr1e3_iters150_erode.pth', type=str, help='Path to PTI finetuned model checkpoint')\n\t\t# self.parser.add_argument('--checkpoint_path', default='/apdcephfs/share_1290939/zhianliu/running_results/our_editing/work_dirs/v_15_hybrid_stage1_seg12_finetuneGD_8A100_pspHyperParas_remainLyrIdx13_flip_200KIters/checkpoints/iteration_120000.pt', type=str, help='Path to model checkpoint')\n\n\t\t\n\tdef parse(self):\n\t\topts = self.parser.parse_args()\n\t\treturn opts" }, { "identifier": "swap_head_mask_revisit", "path": "swap_face_fine/swap_face_mask.py", "snippet": "def swap_head_mask_revisit(source, target):\n res = np.zeros_like(target)\n \n # 先找到target的 hair 区域 , belowface 区域, 耳朵区域\n target_hair = np.equal(target , 4)\n target_belowface = np.equal(target , 8)\n target_eras = np.equal(target, 7), \n target_era_rings = np.equal(target, 11)\n \n target_bg = np.equal(target, 0)\n target_mid_region = np.logical_or(\n np.logical_or(\n np.logical_or(target_hair, target_belowface),\n target_eras\n ),\n target_era_rings\n )\n \n # 找到 source 的脸部区域,也就是 除了背景、头发、belowface的区域\n source_hair_bg_neck_region = np.logical_or(\n np.logical_or(np.equal(source, 0), np.equal(source, 4)),\n np.equal(source, 8)\n )\n source_ear_earings_region = np.logical_or(np.equal(source, 7), np.equal(source, 11))\n source_non_face_region = np.logical_or(source_hair_bg_neck_region, source_ear_earings_region)\n source_face_region = np.logical_not(source_non_face_region)\n # source_face_region = np.logical_not(source_hair_bg_neck_region)\n \n # 先贴target的背景、belowface,耳朵 以及, 耳环\n res[target_bg] = 99 # a magic number, 先占住背景区域,用99标记\n res[target_belowface] = 8\n # # # 再贴target的头发\n # res[target_hair] = 4 ## 这一步放在贴source的前还是后,会影响是否保留target的刘海\n res[target_eras] = 7\n res[target_era_rings] = 11\n \n # 再贴source的脸部\n res[source_face_region] = source[source_face_region]\n \n # 再贴target的头发\n res[target_hair] = 4\n \n # 剩余是0的地方,我们填皮肤\n if np.sum(res==0) != 0:\n hole_map = 255*(res==0)\n res[res==0] = 6\n else:\n hole_map = np.zeros_like(res)\n \n # 最后把背景的label恢复一下\n res[res==99] = 0\n\n try:\n eye_line = np.where(res == 2)[0].min()\n except:\n eye_line = np.where(res == 3)[0].min()\n \n return res, hole_map, eye_line" }, { "identifier": "swap_head_mask_hole_first", "path": "swap_face_fine/swap_face_mask.py", "snippet": "def swap_head_mask_hole_first(source, target):\n \"\"\" segmentation format:\n 0 - background\n 1 - lip\n 2 - eyebrow\n 3 - eyes\n 4 - hair\n 5 - nose\n 6 - skin\n 7 - ear\n 8 - neck\n 9 - tooth\n 10 - eyeglass\n 11 - earring\n \"\"\"\n # calculate the hole map fist\n source_bg_mask = np.logical_or(source == 4, source == 0) # hair, bg\n source_bg_mask = np.logical_or(source_bg_mask, source == 8) # neck\n source_bg_mask = np.logical_or(source_bg_mask, source == 7) # ear\n source_bg_mask = np.logical_or(source_bg_mask, source == 11) # earring\n source_face_mask = np.logical_not(source_bg_mask)\n\n target_bg_mask = np.logical_or(target == 4, target == 0) # hair, bg\n target_bg_mask = np.logical_or(target_bg_mask, target == 8) # neck\n target_bg_mask = np.logical_or(target_bg_mask, target == 7) # ear\n target_bg_mask = np.logical_or(target_bg_mask, target == 11) # earring\n target_face_mask = np.logical_not(target_bg_mask)\n\n face_overlap_mask = np.logical_and(source_face_mask, target_face_mask)\n hole_mask = np.logical_xor(face_overlap_mask, target_face_mask)\n\n # swap mask\n res = np.zeros_like(target)\n \n target_regions = [np.equal(target, i) for i in range(12)]\n source_regions = [np.equal(source, i) for i in range(12)]\n\n # adjust or finetune the hole mask\n eye_line = int(2 / 5 * target.shape[0])\n nose_line = int(3 / 5 * target.shape[0])\n if np.any(source == 3):\n eye_line = np.where(source == 3)[0].max() # eye lowest\n elif np.any(source == 2):\n eye_line = np.where(source == 2)[0].max() # eye_brow lowest\n if np.any(source == 5):\n nose_line = np.where(source == 5)[0].max() # nose lowest\n # hole_mask[np.logical_and(source_regions[4], target_regions[6])] = False # source hair & target skin, not\n # hole_mask[np.logical_and(source_regions[4], target_regions[2])] = False # source hair & target eyebrow, not\n # hole_mask[np.logical_and(source_regions[4], target_regions[3])] = False # source hair & target eye, not\n if len(hole_mask) >= eye_line:\n hole_mask[:eye_line, :] = False # higher than eyes set as False\n\n \"\"\" The background, neck, ear and earrings regions of target \"\"\"\n res[target_regions[0]] = 99 # a place-holder magic number for bg (target-bg)\n res[target_regions[8]] = 8 # neck (target-bg)\n # res[target_regions[4]] = 4 # hair, hair first as background\n res[target_regions[7]] = 7 # ear (target-bg)\n res[target_regions[11]] = 11 # earring (target-bg)\n\n # fill in the hole\n\n # res = fill_hole(res, hole_mask, radius=5, eye_line=eye_line, nose_line=nose_line)\n # res[hole_mask] = 4\n # hole_mask[:eye_line, :] = False # higher than eyes set as False\n\n \"\"\" The inner-face of source \"\"\"\n ''' op1. cairong version '''\n # res[source_regions[7]] = 7\n # res[source_regions[11]] = 11\n res[source_regions[1]] = 1 # lip\n res[source_regions[2]] = 2 # eyebrows\n res[np.logical_and(source_regions[4], target_regions[2])] = 2 # source hair & target eyebrows\n res[source_regions[3]] = 3 # eyes\n res[source_regions[5]] = 5 # nose\n res[source_regions[6]] = 6 # skin\n res[source_regions[9]] = 9 # mouth\n ''' op2. zhian version '''\n # res[np.logical_and(source_regions[1], np.not_equal(res, 99))] = 1 # lip\n # res[np.logical_and(source_regions[2], np.not_equal(res, 99))] = 2 # eyebrows\n # res[np.logical_and(source_regions[3], np.not_equal(res, 99))] = 3 # eyes\n # res[np.logical_and(source_regions[5], np.not_equal(res, 99))] = 5 # nose\n # res[np.logical_and(source_regions[6], np.not_equal(res, 99))] = 6 # skin\n # res[np.logical_and(source_regions[9], np.not_equal(res, 99))] = 9 # mouth\n\n \"\"\" Fix target foreground like hat occlusions \"\"\"\n # Additional foreground = (target_bg) && (source_skin higher than target_skin)\n H, W = target.shape\n target_skin_highest_by_width = np.ones(W, dtype=np.long) * H\n target_skin = np.zeros_like(target, dtype=target.dtype)\n target_skin[target_regions[6]] = 1\n target_skin = target_skin * (np.arange(H)[:, None])\n target_skin[target_skin == 0] = H\n target_skin_highest_by_width = target_skin.min(axis=0) # (W,)\n target_bg_region = np.where(target == 0)\n target_bg_positions_h = target_bg_region[0]\n target_bg_positions_w = target_bg_region[1]\n target_foreground_h_positions = []\n target_foreground_w_positions = []\n for i in range(len(target_bg_positions_h)):\n h = target_bg_positions_h[i]\n w = target_bg_positions_w[i]\n if h <= target_skin_highest_by_width[w] != H:\n target_foreground_h_positions.append(h)\n target_foreground_w_positions.append(w)\n target_foreground_region = (np.array(target_foreground_h_positions),\n np.array(target_foreground_w_positions))\n if len(target_foreground_h_positions) > 0:\n res[target_foreground_region] = 98 # additional foreground (target-foreground)\n\n # res[np.logical_and(source_regions[6], np.not_equal(res, 99))] = 6 # skin\n res[target_regions[4]] = 4 # not hair first (target-foreground), hair as foreground\n res[target_regions[10]] = 10 # eye_glass (target-foreground)\n # res[target_regions[7]] = 7 # removed, ear is background (target-background)\n\n \"\"\" The missing pixels, fill in skin temporarily \"\"\"\n ''' op1. cairong version '''\n res[res == 0] = 6 # fill hole with skin\n res[res == 99] = 0\n res[res == 98] = 0\n hole_map = res.copy()\n hole_map[hole_mask] = 17 # see: torch_utils.get_colors\n ''' op2. zhian version '''\n # if np.sum(res == 0) != 0:\n # hole_mask = 1 * (res == 0)\n # res[res == 0] = 6 # skin\n # else:\n # hole_mask = np.zeros_like(res)\n # hole_mask = hole_mask.astype(np.bool)\n # # hole_mask[0:eye_line] = False # set parts higher than eyes to zero(False)\n # hole_mask[source_regions[4]] = False # set source hair parts to zero(False)\n # res[res == 99] = 0 # restore the background\n # hole_map = res.copy()\n # hole_map[hole_mask] = 1\n\n \"\"\"\n res: 0-bg, 1-lip, 2-eyebrow, 3-eye, 4-hair, 5-nose, 6-skin, 7-ear, 8-neck\n hole_mask: in {True,False}\n hole_map: in {0,...,11}\n \"\"\"\n return res, hole_mask, hole_map, nose_line" }, { "identifier": "dilation", "path": "utils/morphology.py", "snippet": "def dilation(\n tensor: torch.Tensor,\n kernel: torch.Tensor,\n structuring_element: Optional[torch.Tensor] = None,\n origin: Optional[List[int]] = None,\n border_type: str = 'geodesic',\n border_value: float = 0.0,\n max_val: float = 1e4,\n engine: str = 'unfold',\n) -> torch.Tensor:\n r\"\"\"Return the dilated image applying the same kernel in each channel.\n .. image:: _static/img/dilation.png\n The kernel must have 2 dimensions.\n Args:\n tensor: Image with shape :math:`(B, C, H, W)`.\n kernel: Positions of non-infinite elements of a flat structuring element. Non-zero values give\n the set of neighbors of the center over which the operation is applied. Its shape is :math:`(k_x, k_y)`.\n For full structural elements use torch.ones_like(structural_element).\n structuring_element: Structuring element used for the grayscale dilation. It may be a non-flat\n structuring element.\n origin: Origin of the structuring element. Default: ``None`` and uses the center of\n the structuring element as origin (rounding towards zero).\n border_type: It determines how the image borders are handled, where ``border_value`` is the value\n when ``border_type`` is equal to ``constant``. Default: ``geodesic`` which ignores the values that are\n outside the image when applying the operation.\n border_value: Value to fill past edges of input if ``border_type`` is ``constant``.\n max_val: The value of the infinite elements in the kernel.\n engine: convolution is faster and less memory hungry, and unfold is more stable numerically\n Returns:\n Dilated image with shape :math:`(B, C, H, W)`.\n .. note::\n See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/\n morphology_101.html>`__.\n Example:\n >>> tensor = torch.rand(1, 3, 5, 5)\n >>> kernel = torch.ones(3, 3)\n >>> dilated_img = dilation(tensor, kernel)\n \"\"\"\n\n if not isinstance(tensor, torch.Tensor):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(tensor)}\")\n\n if len(tensor.shape) != 4:\n raise ValueError(f\"Input size must have 4 dimensions. Got {tensor.dim()}\")\n\n if not isinstance(kernel, torch.Tensor):\n raise TypeError(f\"Kernel type is not a torch.Tensor. Got {type(kernel)}\")\n\n if len(kernel.shape) != 2:\n raise ValueError(f\"Kernel size must have 2 dimensions. Got {kernel.dim()}\")\n\n # origin\n se_h, se_w = kernel.shape\n if origin is None:\n origin = [se_h // 2, se_w // 2]\n\n # pad\n pad_e: List[int] = [origin[1], se_w - origin[1] - 1, origin[0], se_h - origin[0] - 1]\n if border_type == 'geodesic':\n border_value = -max_val\n border_type = 'constant'\n output: torch.Tensor = F.pad(tensor, pad_e, mode=border_type, value=border_value)\n\n # computation\n if structuring_element is None:\n neighborhood = torch.zeros_like(kernel)\n neighborhood[kernel == 0] = -max_val\n else:\n neighborhood = structuring_element.clone()\n neighborhood[kernel == 0] = -max_val\n\n if engine == 'unfold':\n output = output.unfold(2, se_h, 1).unfold(3, se_w, 1)\n output, _ = torch.max(output + neighborhood.flip((0, 1)), 4)\n output, _ = torch.max(output, 4)\n elif engine == 'convolution':\n B, C, H, W = tensor.size()\n h_pad, w_pad = output.shape[-2:]\n reshape_kernel = _neight2channels_like_kernel(kernel)\n output, _ = F.conv2d(\n output.view(B * C, 1, h_pad, w_pad), reshape_kernel, padding=0, bias=neighborhood.view(-1).flip(0)\n ).max(dim=1)\n output = output.view(B, C, H, W)\n else:\n raise NotImplementedError(f\"engine {engine} is unknown, use 'convolution' or 'unfold'\")\n return output.view_as(tensor)" }, { "identifier": "erosion", "path": "utils/morphology.py", "snippet": "def erosion(\n tensor: torch.Tensor,\n kernel: torch.Tensor,\n structuring_element: Optional[torch.Tensor] = None,\n origin: Optional[List[int]] = None,\n border_type: str = 'geodesic',\n border_value: float = 0.0,\n max_val: float = 1e4,\n engine: str = 'unfold',\n) -> torch.Tensor:\n r\"\"\"Return the eroded image applying the same kernel in each channel.\n .. image:: _static/img/erosion.png\n The kernel must have 2 dimensions.\n Args:\n tensor: Image with shape :math:`(B, C, H, W)`.\n kernel: Positions of non-infinite elements of a flat structuring element. Non-zero values give\n the set of neighbors of the center over which the operation is applied. Its shape is :math:`(k_x, k_y)`.\n For full structural elements use torch.ones_like(structural_element).\n structuring_element (torch.Tensor, optional): Structuring element used for the grayscale dilation.\n It may be a non-flat structuring element.\n origin: Origin of the structuring element. Default: ``None`` and uses the center of\n the structuring element as origin (rounding towards zero).\n border_type: It determines how the image borders are handled, where ``border_value`` is the value\n when ``border_type`` is equal to ``constant``. Default: ``geodesic`` which ignores the values that are\n outside the image when applying the operation.\n border_value: Value to fill past edges of input if border_type is ``constant``.\n max_val: The value of the infinite elements in the kernel.\n engine: ``convolution`` is faster and less memory hungry, and ``unfold`` is more stable numerically\n Returns:\n Eroded image with shape :math:`(B, C, H, W)`.\n .. note::\n See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/\n morphology_101.html>`__.\n Example:\n >>> tensor = torch.rand(1, 3, 5, 5)\n >>> kernel = torch.ones(5, 5)\n >>> output = erosion(tensor, kernel)\n \"\"\"\n\n if not isinstance(tensor, torch.Tensor):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(tensor)}\")\n\n if len(tensor.shape) != 4:\n raise ValueError(f\"Input size must have 4 dimensions. Got {tensor.dim()}\")\n\n if not isinstance(kernel, torch.Tensor):\n raise TypeError(f\"Kernel type is not a torch.Tensor. Got {type(kernel)}\")\n\n if len(kernel.shape) != 2:\n raise ValueError(f\"Kernel size must have 2 dimensions. Got {kernel.dim()}\")\n\n # origin\n se_h, se_w = kernel.shape\n if origin is None:\n origin = [se_h // 2, se_w // 2]\n\n # pad\n pad_e: List[int] = [origin[1], se_w - origin[1] - 1, origin[0], se_h - origin[0] - 1]\n if border_type == 'geodesic':\n border_value = max_val\n border_type = 'constant'\n output: torch.Tensor = F.pad(tensor, pad_e, mode=border_type, value=border_value)\n\n # computation\n if structuring_element is None:\n neighborhood = torch.zeros_like(kernel)\n neighborhood[kernel == 0] = -max_val\n else:\n neighborhood = structuring_element.clone()\n neighborhood[kernel == 0] = -max_val\n\n if engine == 'unfold':\n output = output.unfold(2, se_h, 1).unfold(3, se_w, 1)\n output, _ = torch.min(output - neighborhood, 4)\n output, _ = torch.min(output, 4)\n elif engine == 'convolution':\n B, C, H, W = tensor.size()\n Hpad, Wpad = output.shape[-2:]\n reshape_kernel = _neight2channels_like_kernel(kernel)\n output, _ = F.conv2d(output.view(B * C, 1, Hpad, Wpad),\n reshape_kernel,\n padding=0,\n bias=-neighborhood.view(-1)).min(dim=1)\n output = output.view(B, C, H, W)\n else:\n raise NotImplementedError(f\"engine {engine} is unknown, use 'convolution' or 'unfold'\")\n\n return output" }, { "identifier": "dialate_mask", "path": "training/video_swap_ft_coach.py", "snippet": "def dialate_mask(mask, img, radius=3, verbose=False):\n \"\"\"\n 将mask dialate一下\n \n 输入的mask必须是12个类别的, img是 PIL 图片 (1024分辨率)\n \"\"\"\n \n # 找到face region\n hair_bg_mask = np.stack([np.equal(mask, clz) for clz in [0,4,7,8,11]], axis=0).any(axis=0)\n face_mask = np.logical_not(hair_bg_mask)\n \n # dilate 一下\n kernel_size = (radius * 2 + 1, radius * 2 + 1)\n kernel = np.ones(kernel_size)\n dilated_face_mask = cv2.dilate((255*face_mask).astype(np.uint8), kernel, borderType=cv2.BORDER_CONSTANT, borderValue=0)\n \n dilated_mask = np.zeros_like(mask)\n dilated_mask[np.equal(dilated_face_mask, 255)] = mask[np.equal(dilated_face_mask, 255)]\n \n dilated_region = dilated_face_mask - (255*face_mask).astype(np.uint8)\n dilated_mask[np.equal(dilated_region, 255)] = 6 # 外扩区域填皮肤\n \n # TODO 可视化一下 erode mask 和 原始的 mask\n if verbose:\n orig_mask_vis = vis_parsing_maps(img, mask)\n dilated_mask_vis = vis_parsing_maps(img, dilated_mask)\n comp = Image.fromarray(np.hstack([orig_mask_vis, dilated_mask_vis]))\n return dilated_mask, comp\n \n return dilated_mask, None" }, { "identifier": "erode_mask", "path": "training/video_swap_ft_coach.py", "snippet": "def erode_mask(mask, img, radius=3, verbose=False):\n \"\"\"\n 将mask erode一下\n \n 输入的mask必须是12个类别的, img是 PIL 图片 (1024分辨率)\n \"\"\"\n \n # # 找到face region\n hair_bg_mask = np.stack([np.equal(mask, clz) for clz in [0,4,11]], axis=0).any(axis=0)\n face_mask = np.logical_not(hair_bg_mask)\n \n # 找到face region\n # face_mask = np.equal(mask, 6) \n \n # erode 一下\n kernel_size = (radius * 2 + 1, radius * 2 + 1)\n kernel = np.ones(kernel_size)\n eroded_face_mask = cv2.erode((255*face_mask).astype(np.uint8), kernel, borderType=cv2.BORDER_CONSTANT, borderValue=0)\n \n eroded_mask = np.zeros_like(mask)\n eroded_mask[np.equal(eroded_face_mask, 255)] = mask[np.equal(eroded_face_mask, 255)]\n \n # TODO 可视化一下 erode mask 和 原始的 mask\n if verbose:\n orig_mask_vis = vis_parsing_maps(img, mask)\n eroded_mask_vis = vis_parsing_maps(img, eroded_mask)\n comp = Image.fromarray(np.hstack([orig_mask_vis, eroded_mask_vis]))\n return eroded_mask, comp\n \n return eroded_mask, None" }, { "identifier": "blending", "path": "swap_face_fine/multi_band_blending.py", "snippet": "def blending(full_img, ori_img, mask):\n height, width = ori_img.shape[:2]\n\n mask_sharp = 1\n \n \"\"\"\n try:\n new_h = 2 ** (int(np.log2(height)) + 1)\n new_w = 2 ** (int(np.log2(width)) + 1)\n full_img, ori_img, full_mask = [cv2.resize(x, (new_h, new_w)) for x in (full_img, ori_img, np.float32(mask_sharp * mask))]\n # full_img = cv2.convertScaleAbs(ori_img*(1-full_mask) + full_img*full_mask)\n img = Laplacian_Pyramid_Blending_with_mask(full_img, ori_img, full_mask, 10)\n except:\n \"\"\"\n new_h = 1024\n new_w = 1024\n full_img, ori_img, full_mask = [cv2.resize(x, (new_h, new_w)) for x in (full_img, ori_img, np.float32(mask_sharp * mask))]\n # full_img = cv2.convertScaleAbs(ori_img*(1-full_mask) + full_img*full_mask)\n img = Laplacian_Pyramid_Blending_with_mask(full_img, ori_img, full_mask, 10)\n\n ### img in [0, 255]\n img = np.clip(img, 0 ,255)\n img = np.uint8(cv2.resize(img, (width, height)))\n return img" } ]
import copy import cv2 import torch import numpy as np import torchvision.transforms as transforms import os import glob import torch.nn as nn from PIL import Image from models.networks import Net3 from datasets.dataset import get_transforms, TO_TENSOR, NORMALIZE from utils import torch_utils from tqdm import tqdm from torch.nn import functional as F from training.video_swap_ft_coach import VideoSwapPTICoach from tqdm import trange from options.our_swap_face_pipeline_options import OurSwapFacePipelineOptions from swap_face_fine.swap_face_mask import swap_head_mask_revisit, swap_head_mask_hole_first from utils.morphology import dilation, erosion from training.video_swap_ft_coach import dialate_mask, erode_mask from swap_face_fine.multi_band_blending import blending from utils.alignment import crop_faces, calc_alignment_coefficients # 这一句有 import的 bug,大概率 dlib的问题 from skimage.transform import resize # 这一句有点问题 from swap_face_fine.face_vid2vid.drive_demo import init_facevid2vid_pretrained_model, drive_source_demo # 这一行有点问题 from swap_face_fine.gpen.gpen_demo import init_gpen_pretrained_model, GPEN_demo from swap_face_fine.face_parsing.face_parsing_demo import init_faceParsing_pretrained_model, faceParsing_demo, vis_parsing_maps
14,816
# from training.video_swap_st_constraint import VideoSwapPTICoach # from training.video_swap_stich_coach import VideoSwapStichingCoach def create_masks(mask, operation='dilation', radius=0): temp = copy.deepcopy(mask) if operation == 'dilation': full_mask = dilation(temp, torch.ones(2 * radius + 1, 2 * radius + 1, device=mask.device), engine='convolution') border_mask = full_mask - temp elif operation == 'erosion':
# from training.video_swap_st_constraint import VideoSwapPTICoach # from training.video_swap_stich_coach import VideoSwapStichingCoach def create_masks(mask, operation='dilation', radius=0): temp = copy.deepcopy(mask) if operation == 'dilation': full_mask = dilation(temp, torch.ones(2 * radius + 1, 2 * radius + 1, device=mask.device), engine='convolution') border_mask = full_mask - temp elif operation == 'erosion':
full_mask = erosion(temp, torch.ones(2 * radius + 1, 2 * radius + 1, device=mask.device), engine='convolution')
10
2023-10-15 12:15:01+00:00
24k
sotopia-lab/sotopia
sotopia/server.py
[ { "identifier": "Agents", "path": "sotopia/agents/llm_agent.py", "snippet": "class Agents(dict[str, BaseAgent[Observation, AgentAction]]):\n def reset(self) -> None:\n for agent in self.values():\n agent.reset()\n\n def act(self, obs: dict[str, Observation]) -> dict[str, AgentAction]:\n return {\n agent_name: agent.act(obs[agent_name])\n for agent_name, agent in self.items()\n }" }, { "identifier": "HumanAgent", "path": "sotopia/agents/llm_agent.py", "snippet": "class HumanAgent(BaseAgent[Observation, AgentAction]):\n \"\"\"\n A human agent that takes input from the command line.\n \"\"\"\n\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n agent_profile: AgentProfile | None = None,\n ) -> None:\n super().__init__(\n agent_name=agent_name,\n uuid_str=uuid_str,\n agent_profile=agent_profile,\n )\n\n @property\n def goal(self) -> str:\n if self._goal is not None:\n return self._goal\n goal = input(\"Goal: \")\n return goal\n\n @goal.setter\n def goal(self, goal: str) -> None:\n self._goal = goal\n\n def act(self, obs: Observation) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n print(\"Available actions:\")\n for i, action in enumerate(obs.available_actions):\n print(f\"{i}: {action}\")\n\n action_type = obs.available_actions[int(input(\"Action type: \"))]\n argument = input(\"Argument: \")\n\n return AgentAction(action_type=action_type, argument=argument)\n\n async def aact(self, obs: Observation) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n print(\"Available actions:\")\n for i, action in enumerate(obs.available_actions):\n print(f\"{i}: {action}\")\n\n if obs.available_actions != [\"none\"]:\n action_type_number = await ainput(\n \"Action type (Please only input the number): \"\n )\n try:\n action_type_number = int(action_type_number) # type: ignore\n except:\n print(\"Please input a number.\")\n action_type_number = await ainput(\n \"Action type (Please only input the number): \"\n )\n action_type_number = int(action_type_number) # type: ignore\n assert isinstance(\n action_type_number, int\n ), \"Please input a number.\"\n action_type = obs.available_actions[action_type_number]\n else:\n action_type = \"none\"\n if action_type in [\"speak\", \"non-verbal communication\"]:\n argument = await ainput(\"Argument: \")\n else:\n argument = \"\"\n\n return AgentAction(action_type=action_type, argument=argument)" }, { "identifier": "LLMAgent", "path": "sotopia/agents/llm_agent.py", "snippet": "class LLMAgent(BaseAgent[Observation, AgentAction]):\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n agent_profile: AgentProfile | None = None,\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n script_like: bool = False,\n ) -> None:\n super().__init__(\n agent_name=agent_name,\n uuid_str=uuid_str,\n agent_profile=agent_profile,\n )\n self.model_name = model_name\n self.script_like = script_like\n\n @property\n def goal(self) -> str:\n if self._goal is not None:\n return self._goal\n assert (\n len(self.inbox) > 0\n ), \"attribute goal has to be called after at least one step\"\n goal = generate_goal(\n self.model_name,\n background=self.inbox[0][\n 1\n ].to_natural_language(), # Only consider the first message for now\n )\n return goal\n\n @goal.setter\n def goal(self, goal: str) -> None:\n self._goal = goal\n\n def act(\n self,\n obs: Observation,\n gen_func: Callable[..., AgentAction] = generate_action,\n ) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n if len(obs.available_actions) == 1 and \"none\" in obs.available_actions:\n return AgentAction(action_type=\"none\", argument=\"\")\n else:\n action = gen_func(\n self.model_name,\n history=\"\\n\".join(\n f\"{y.to_natural_language()}\" for x, y in self.inbox\n ),\n turn_number=obs.turn_number,\n action_types=obs.available_actions,\n agent=self.agent_name,\n goal=self.goal,\n )\n return action\n\n async def aact(self, obs: Observation) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n if len(obs.available_actions) == 1 and \"none\" in obs.available_actions:\n return AgentAction(action_type=\"none\", argument=\"\")\n else:\n action, prompt = await agenerate_action(\n self.model_name,\n history=\"\\n\".join(\n f\"{y.to_natural_language()}\" for x, y in self.inbox\n ),\n turn_number=obs.turn_number,\n action_types=obs.available_actions,\n agent=self.agent_name,\n goal=self.goal,\n script_like=self.script_like,\n )\n return action" }, { "identifier": "ScriptWritingAgent", "path": "sotopia/agents/llm_agent.py", "snippet": "class ScriptWritingAgent(LLMAgent):\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n agent_profile: AgentProfile | None = None,\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n agent_names: list[str] = [],\n background: ScriptBackground | None = None,\n ) -> None:\n super().__init__(\n agent_name=agent_name,\n uuid_str=uuid_str,\n agent_profile=agent_profile,\n )\n self.model_name = model_name\n self.agent_names = agent_names\n assert background is not None, \"background cannot be None\"\n self.background = background\n\n async def aact(self, obs: Observation) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n message_to_compose = [\n y for idx, (x, y) in enumerate(self.inbox) if idx != 0\n ]\n\n history = \"\\n\".join(\n f\"{y.to_natural_language()}\" for y in message_to_compose\n )\n print(\"Current agent: \", self.agent_name)\n print(\"Composed history: \", history)\n\n action, prompt = await agenerate_script(\n model_name=self.model_name,\n background=self.background,\n agent_names=self.agent_names,\n history=history,\n agent_name=self.agent_name,\n single_step=True,\n )\n # action: tuple[\n # list[list[tuple[str, str, Message]]], list[tuple[str, Message]]\n # ]\n returned_action = cast(AgentAction, action[1][0][1])\n print(\"Action: \", returned_action, type(returned_action))\n # print(\"Action: \", action)\n # exit(0)\n\n return returned_action" }, { "identifier": "SpeakAgent", "path": "sotopia/agents/llm_agent.py", "snippet": "class SpeakAgent(LLMAgent):\n def act(\n self,\n obs: Observation,\n gen_func: Callable[..., AgentAction] = generate_action_speak,\n ) -> AgentAction:\n return super().act(obs, gen_func=gen_func)" }, { "identifier": "RedisAgent", "path": "sotopia/agents/redis_agent.py", "snippet": "class RedisAgent(BaseAgent[Observation, AgentAction]):\n \"\"\"An agent use redis as a message broker.\"\"\"\n\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n session_id: str | None = None,\n agent_profile: AgentProfile | None = None,\n ) -> None:\n super().__init__(\n agent_name=agent_name,\n uuid_str=uuid_str,\n agent_profile=agent_profile,\n )\n # super().__init__(agent_name=agent_name, uuid_str=uuid_str)\n self.session_id = session_id or str(uuid4())\n self.sender_id = str(uuid4())\n print(f\"session id: {self.session_id}\")\n print(\"step 1: connect to the server\")\n assert (\n \"FASTAPI_URL\" in os.environ\n ), \"To use redis agent, you have to launch a FastAPI server and set FASTAPI_URL\"\n self._URL = os.environ[\"FASTAPI_URL\"]\n response = requests.request(\n \"POST\",\n f\"{self._URL}/connect/{self.session_id}/server/{self.sender_id}\",\n )\n assert (\n response.status_code == 200 and response.text == \"[]\"\n ), \"Failed to connect to the server\"\n logging.info(f\"Session ID: {self.session_id}\")\n # logging.info(f\"Sender ID: {self.sender_id}\")\n\n def act(\n self,\n obs: Observation,\n ) -> AgentAction:\n raise NotImplementedError\n\n async def aact(\n self,\n obs: Observation,\n ) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n if len(obs.available_actions) == 1 and \"none\" in obs.available_actions:\n if obs.turn_number == 0:\n async with aiohttp.ClientSession() as session:\n print(\"step 2: post observation to the message list\")\n response = await session.request(\n \"POST\",\n f\"{self._URL}/send/{self.session_id}/{self.sender_id}\",\n data=obs.to_natural_language(),\n )\n assert response.status == 200, response\n sorted_message_list: list[tuple[float, str, str]] = list(\n map(\n lambda x: MessageTransaction.parse_obj(\n x\n ).to_tuple(),\n await response.json(),\n )\n )\n last_timestamp = sorted_message_list[-1][0]\n return AgentAction(action_type=\"none\", argument=\"\")\n else:\n async with aiohttp.ClientSession() as session:\n # 1. post observation to the message list\n response = await session.request(\n \"POST\",\n f\"{self._URL}/send/{self.session_id}/{self.sender_id}\",\n data=obs.to_natural_language(),\n )\n assert response.status == 200, response\n sorted_message_list = list(\n map(\n lambda x: MessageTransaction.parse_obj(x).to_tuple(),\n await response.json(),\n )\n )\n last_timestamp = sorted_message_list[-1][0]\n\n print(\"step 2: unlock the server for the client\")\n # 2. unlock the server for the client\n response = await session.request(\n \"PUT\",\n f\"{self._URL}/lock/{self.session_id}/{self.sender_id}/action\",\n )\n assert response.status == 200, response\n\n print(\"step 3: wait for the client to post their message\")\n # 3. wait for the client to post their message\n for _ in range(300):\n response = await session.request(\n \"GET\",\n f\"{self._URL}/get/{self.session_id}\",\n )\n # print(f\"get response: {response}\")\n assert response.status == 200, response\n sorted_message_list = list(\n map(\n lambda x: MessageTransaction.parse_obj(\n x\n ).to_tuple(),\n await response.json(),\n )\n )\n if (\n sorted_message_list[-1][0] > last_timestamp\n and sorted_message_list[-1][1] == \"client\"\n ):\n # 3.a if the client has posted their message, lock the server for the client\n response = await session.request(\n \"PUT\",\n f\"{self._URL}/lock/{self.session_id}/{self.sender_id}/no%20action\",\n )\n assert response.status == 200, response\n break\n else:\n # 3.b if the client has not posted their message, wait for 0.1 second and retry\n await asyncio.sleep(1)\n else:\n response = await session.request(\n \"PUT\",\n f\"{self._URL}/lock/{self.session_id}/{self.sender_id}/no%20action\",\n )\n self.reset(\n \"Someone has left or the conversation is too long.\"\n )\n return AgentAction(action_type=\"leave\", argument=\"\")\n action_string = sorted_message_list[-1][2]\n try:\n action = AgentAction.parse_raw(action_string)\n return action\n except pydantic.error_wrappers.ValidationError:\n logging.warn(\n \"Failed to parse action string {}. Fall back to speak\".format(\n action_string\n )\n )\n return AgentAction(\n action_type=\"speak\", argument=sorted_message_list[-1][2]\n )\n\n def reset(\n self,\n reset_reason: str = \"\",\n ) -> None:\n super().reset()\n try:\n if reset_reason != \"\":\n response = requests.request(\n \"POST\",\n f\"{self._URL}/send/{self.session_id}/{self.sender_id}\",\n json=reset_reason,\n )\n assert response.status_code == 200\n\n except Exception as e:\n logging.error(f\"Failed to reset RedisAgent {self.sender_id}: {e}\")" }, { "identifier": "BaseAgent", "path": "sotopia/agents/base_agent.py", "snippet": "class BaseAgent(Generic[ObsType, ActType], MessengerMixin):\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n agent_profile: AgentProfile | None = None,\n ) -> None:\n MessengerMixin.__init__(self)\n if agent_profile is not None:\n self.profile = agent_profile\n self.agent_name = (\n self.profile.first_name + \" \" + self.profile.last_name\n )\n elif uuid_str is not None:\n # try retrieving profile from database\n try:\n self.profile = AgentProfile.get(pk=uuid_str)\n except NotFoundError:\n raise ValueError(\n f\"Agent with uuid {uuid_str} not found in database\"\n )\n self.agent_name = (\n self.profile.first_name + \" \" + self.profile.last_name\n )\n else:\n assert (\n agent_name is not None\n ), \"Either agent_name or uuid_str must be provided\"\n self.agent_name = agent_name\n\n self._goal: str | None = None\n\n @property\n def goal(self) -> str:\n assert (\n self._goal is not None\n ), \"attribute goal has to be set before use\"\n return self._goal\n\n @goal.setter\n def goal(self, goal: str) -> None:\n self._goal = goal\n\n def act(self, obs: ObsType) -> ActType:\n raise NotImplementedError\n\n async def aact(self, obs: ObsType) -> ActType:\n raise NotImplementedError\n\n def reset(self) -> None:\n self.reset_inbox()" }, { "identifier": "EpisodeLog", "path": "sotopia/database/logs.py", "snippet": "class EpisodeLog(JsonModel):\n # Note that we did not validate the following constraints:\n # 1. The number of turns in messages and rewards should be the same or off by 1\n # 2. The agents in the messages are the same as the agetns\n\n environment: str = Field(index=True)\n agents: list[str] = Field(index=True)\n tag: str | None = Field(index=True)\n models: list[str] | None = Field(index=True)\n messages: list[list[tuple[str, str, str]]] # Messages arranged by turn\n reasoning: str\n rewards: list[\n tuple[float, dict[str, float]] | float\n ] # Rewards arranged by turn\n rewards_prompt: str\n\n @root_validator\n def agent_number_message_number_reward_number_turn_number_match(\n cls, values: Any\n ) -> Any:\n agents, _, reasoning, rewards = (\n values.get(\"agents\"),\n values.get(\"messages\"),\n values.get(\"reasoning\"),\n values.get(\"rewards\"),\n )\n agent_number = len(agents)\n\n assert (\n len(rewards) == agent_number\n ), f\"Number of agents in rewards {len(rewards)} and agents {agent_number} do not match\"\n return values\n\n def render_for_humans(self) -> tuple[list[AgentProfile], list[str]]:\n \"\"\"Generate a human readable version of the episode log.\n\n Returns:\n A tuple of (a list of agent_profiles, a list of str): The agent profiles, and the messages and rewards in each turn.\n \"\"\"\n\n agent_profiles = [\n AgentProfile.get(pk=uuid_str) for uuid_str in self.agents\n ]\n messages_and_rewards = []\n for idx, turn in enumerate(self.messages):\n messages_in_this_turn = []\n if idx == 0:\n assert (\n len(turn) >= 2\n ), \"The first turn should have at least environemnt messages\"\n messages_in_this_turn.append(turn[0][2])\n messages_in_this_turn.append(turn[1][2])\n for sender, receiver, message in turn:\n if receiver == \"Environment\":\n if sender != \"Environment\":\n if \"did nothing\" in message:\n continue\n else:\n if \"said:\" in message:\n messages_in_this_turn.append(\n f\"{sender} {message}\"\n )\n else:\n messages_in_this_turn.append(\n f\"{sender}: {message}\"\n )\n else:\n messages_in_this_turn.append(message)\n messages_and_rewards.append(\"\\n\".join(messages_in_this_turn))\n messages_and_rewards.append(f\"The reasoning is:\\n{self.reasoning}\")\n messages_and_rewards.append(\n f\"The rewards are:\\nAgent 1: {self.rewards[0]}\\nAgent 2: {self.rewards[1]}\"\n )\n return agent_profiles, messages_and_rewards" }, { "identifier": "AgentProfile", "path": "sotopia/database/persistent_profile.py", "snippet": "class AgentProfile(JsonModel):\n first_name: str = Field(index=True)\n last_name: str = Field(index=True)\n age: int = Field(index=True, default_factory=lambda: 0)\n occupation: str = Field(index=True, default_factory=lambda: \"\")\n gender: str = Field(index=True, default_factory=lambda: \"\")\n gender_pronoun: str = Field(index=True, default_factory=lambda: \"\")\n public_info: str = Field(index=True, default_factory=lambda: \"\")\n big_five: str = Field(index=True, default_factory=lambda: \"\")\n moral_values: list[str] = Field(index=False, default_factory=lambda: [])\n schwartz_personal_values: list[str] = Field(\n index=False, default_factory=lambda: []\n )\n personality_and_values: str = Field(index=True, default_factory=lambda: \"\")\n decision_making_style: str = Field(index=True, default_factory=lambda: \"\")\n secret: str = Field(default_factory=lambda: \"\")\n model_id: str = Field(default_factory=lambda: \"\")" }, { "identifier": "EnvironmentProfile", "path": "sotopia/database/persistent_profile.py", "snippet": "class EnvironmentProfile(JsonModel):\n codename: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"The codename of the environment\",\n )\n source: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"The source of the environment\",\n )\n scenario: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"A concrete scenario of where the social interaction takes place, the scenario should have two agents (agent1 and agent2), and you should illustrate the relationship between the two agents, and for what purpose agent1 is interacting with agent2. Please avoid mentioning specific names and occupations in the scenario and keep all the mentions gender-neutral. Also avoid generating scenarios that requires childrend (below 18) or elderly (above 70) to be involved.\",\n )\n agent_goals: list[str] = Field(\n default_factory=lambda: [],\n description=\"The social goals of each agent, which could include <extra_info>...</extra_info>, <clarification_hint>...</clarification_hint>, and <strategy_hint>...</strategy_hint> to help the agent achieve the goal. Avoid providing too specific strategy hint, try to be as abstract as possible. For example, use 'you can provide financial benefits to achieve your goal' instead of 'you can buy him a boba tea to achieve your goal.'\",\n )\n relationship: RelationshipType = Field(\n index=True,\n default_factory=lambda: RelationshipType.stranger,\n description=\"The relationship between the two agents, choose from: stranger, know_by_name, acquaintance, friend, romantic_relationship, family_member. Do not make up a relationship, but choose from the list, 0 means stranger, 1 means know_by_name, 2 means acquaintance, 3 means friend, 4 means romantic_relationship, 5 means family_member\",\n )\n age_constraint: str | None = Field(\n default_factory=lambda: None,\n description=\"The age constraint of the environment, a list of tuples, each tuple is a range of age, e.g., '[(18, 25), (30, 40)]' means the environment is only available to agent one between 18 and 25, and agent two between 30 and 40\",\n )\n occupation_constraint: str | None = Field(\n default_factory=lambda: None,\n description=\"The occupation constraint of the environment, a list of lists, each list is a list of occupations, e.g., '[['student', 'teacher'], ['doctor', 'nurse']]' means the environment is only available to agent one if agent one is a student or a teacher, and agent two is a doctor or a nurse\",\n )\n agent_constraint: list[list[str]] | None = Field(\n default_factory=lambda: None,\n )" }, { "identifier": "ParallelSotopiaEnv", "path": "sotopia/envs/parallel.py", "snippet": "class ParallelSotopiaEnv(\n ParallelEnv[str, Observation, AgentAction], MessengerMixin\n):\n def __init__(\n self,\n available_action_types: set[ActionType] = set(\n [\"none\", \"speak\", \"non-verbal communication\", \"action\", \"leave\"]\n ),\n action_order: Literal[\n \"simutaneous\", \"round-robin\", \"random\"\n ] = \"simutaneous\",\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n evaluators: list[Evaluator] = [],\n terminal_evaluators: list[Evaluator] = [],\n uuid_str: str | None = None,\n env_profile: EnvironmentProfile | None = None,\n ) -> None:\n \"\"\"A sotopia environment for parallel agents.\n\n Args:\n available_action_types (set[ActionType], optional): The action types that are available to the agents. Defaults to set([\"none\", \"speak\", \"non-verbal communication\", \"action\"]).\n action_order (Literal[\"simutaneous\", \"round-robin\", \"random\"], optional): The order in which the agents take actions. Defaults to \"simutaneous\".\n model_name (LLM_Name, optional): The name of the language model to use. Defaults to \"gpt-3.5-turbo\".\n \"\"\"\n super().__init__()\n self.model_name = model_name\n self.background = ScriptBackground(\n scenario=\"\",\n p1_background=\"\",\n p2_background=\"\",\n p1_goal=\"\",\n p2_goal=\"\",\n p1_name=\"\",\n p2_name=\"\",\n )\n\n self.agents = []\n self.action_spaces = {}\n self.available_action_types = list(available_action_types)\n self.action_order = action_order\n self.action_mask: list[bool] = []\n self.evaluators = evaluators\n self.terminal_evaluators = terminal_evaluators\n\n # if an environment profile is provided, use it\n assert (\n env_profile or uuid_str\n ), \"Either env_profile or uuid_str must be provided\"\n if env_profile is not None:\n self.profile = env_profile\n # if a uuid is provided, try to load the environment profile from the database\n elif uuid_str is not None:\n # try retrieving profile from database\n try:\n self.profile = EnvironmentProfile.get(pk=uuid_str)\n except NotFoundError:\n raise ValueError(\n f\"Agent with uuid {uuid_str} not found in database\"\n )\n\n @configurable\n def reset(\n self,\n seed: int | None = None,\n options: dict[str, str] | None = None,\n agents: Agents | None = None,\n omniscient: bool = False,\n lite: bool = False,\n ) -> dict[str, Observation]:\n \"\"\"Starting a new episode. Must be called before step().\n\n Args:\n seed (int, optional): Seed for the environment. Defaults to None. Not used right now.\n options (dict, optional): Options for the environment. Defaults to None.\n \"partial_background_file\" (str): Path to a json file which need to contain a ScriptBackground object. The backgound can be incompleted (\"unknown\" for missing parts), and the missing parts will be filled in by the environment.\n \"full_background_file\" (str): Path to a json file which need to contain a ScriptBackground object. The backgound must be completed (no \"unknown\" for missing parts).\n omniscient (bool, optional): Whether the agents know the other agent's goal. Defaults to False.\n \"\"\"\n super().__init__()\n MessengerMixin.reset_inbox(self)\n assert (\n not options\n or not (\"partial_background_file\" in options)\n and not (\"full_background_file\" in options)\n ), \"partial_background_file and full_background_file are not supported anymore\"\n if agents is not None:\n assert agents, \"agents must be provided\"\n assert len(agents) == 2, \"Only supporting two agents right now\"\n agent_names = list(agents.keys())\n agent_goals = self.profile.agent_goals\n assert (\n len(agent_goals) == 2\n ), \"Only supporting two agents right now\"\n\n raw_background = ScriptBackground(\n scenario=self.profile.scenario,\n p1_background=get_bio(\n self.profile.relationship,\n agents[agent_names[0]].profile,\n agent_id=0,\n ),\n p2_background=get_bio(\n self.profile.relationship,\n agents[agent_names[1]].profile,\n agent_id=1,\n ),\n p1_goal=f\"<root viewer='agent_0'>{agent_goals[0]}</root>\",\n p2_goal=f\"<root viewer='agent_1'>{agent_goals[1]}</root>\",\n p1_name=agent_names[0],\n p2_name=agent_names[1],\n )\n\n if lite:\n raw_background.p1_background = \"\"\n raw_background.p2_background = \"\"\n\n self.background = ScriptBackground(\n scenario=render_text_for_environment(raw_background.scenario),\n p1_background=render_text_for_environment(\n raw_background.p1_background\n ),\n p2_background=render_text_for_environment(\n raw_background.p2_background\n ),\n p1_goal=render_text_for_environment(raw_background.p1_goal),\n p2_goal=render_text_for_environment(raw_background.p2_goal),\n p1_name=raw_background.p1_name,\n p2_name=raw_background.p2_name,\n )\n else:\n raise ValueError(\"agents must be provided\")\n\n self.agents = [self.background.p1_name, self.background.p2_name]\n agent_backgrounds: list[ScriptBackground] = []\n if omniscient:\n for i in range(self.num_agents):\n agent_backgrounds.append(copy.deepcopy(self.background))\n else:\n for i in range(self.num_agents):\n agent_backgrounds.append(\n ScriptBackground(\n scenario=render_text_for_agent(\n raw_background.scenario, i\n ),\n p1_background=render_text_for_agent(\n raw_background.p1_background, i\n ),\n p2_background=render_text_for_agent(\n raw_background.p2_background, i\n ),\n p1_goal=render_text_for_agent(\n raw_background.p1_goal, i\n ),\n p2_goal=render_text_for_agent(\n raw_background.p2_goal, i\n ),\n p1_name=raw_background.p1_name,\n p2_name=raw_background.p2_name,\n )\n )\n background_for_a = agent_backgrounds[0]\n background_for_b = agent_backgrounds[1]\n\n print(\"Is the agent omniscient?\", omniscient)\n if not omniscient:\n background_for_a.p2_goal = \"Unknown\"\n background_for_b.p1_goal = \"Unknown\"\n\n self.action_spaces = {\n agent: Dict(\n dict(\n action_type=Discrete(len(self.available_action_types)),\n argument=Text(256),\n )\n )\n for agent in self.agents\n }\n self.turn_number = 0\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[0] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n\n self.recv_message(\"Environment\", self.background)\n\n return {\n self.background.p1_name: Observation(\n last_turn=background_for_a.to_natural_language(),\n turn_number=0,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=background_for_b.to_natural_language(),\n turn_number=0,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n }\n\n @beartype\n def step(\n self, actions: dict[str, AgentAction] | dict[str, dict[str, int | str]]\n ) -> tuple[\n dict[str, Observation],\n dict[str, float],\n dict[str, bool],\n dict[str, bool],\n dict[str, dict[Any, Any]],\n ]:\n # Time step ++\n self.turn_number += 1\n\n # For action sampled from action space, it needs to be converted into AgentAction\n complied_actions: dict[str, AgentAction] = {}\n for key in actions.keys():\n action = actions[key]\n if isinstance(action, AgentAction):\n complied_actions[key] = action\n else:\n action[\"action_type\"] = self.available_action_types[\n int(action[\"action_type\"])\n ]\n complied_actions[key] = AgentAction.parse_obj(action)\n\n # Masking actions from agent that are in turn\n for idx, agent in enumerate(self.agents):\n if not self.action_mask[idx]:\n complied_actions[agent] = AgentAction(\n action_type=\"none\", argument=\"\"\n )\n\n self.recv_message(\n \"Environment\", SimpleMessage(message=f\"Turn #{self.turn_number}\")\n )\n for agent, action in complied_actions.items():\n self.recv_message(agent, action)\n\n response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *(\n evaluator(\n turn_number=self.turn_number, messages=self.inbox\n )\n for evaluator in self.evaluators\n )\n )\n )\n )\n\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[self.turn_number % len(self.action_mask)] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n obs = _actions_to_natural_language(complied_actions)\n return (\n {\n self.background.p1_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=0),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=1),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n },\n {\n self.background.p1_name: (\n response.p1_rate\n if isinstance(response.p1_rate, float)\n else response.p1_rate[0]\n )\n if response.p1_rate\n else 0,\n self.background.p2_name: (\n response.p2_rate\n if isinstance(response.p2_rate, float)\n else response.p2_rate[0]\n )\n if response.p2_rate\n else 0,\n },\n {\n self.background.p1_name: response.terminated,\n self.background.p2_name: response.terminated,\n },\n {\n self.background.p1_name: False,\n self.background.p2_name: False,\n },\n {\n self.background.p1_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p1_rate or 0,\n },\n self.background.p2_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p2_rate or 0,\n },\n },\n )\n\n @beartype\n async def astep(\n self, actions: dict[str, AgentAction] | dict[str, dict[str, int | str]]\n ) -> tuple[\n dict[str, Observation],\n dict[str, float],\n dict[str, bool],\n dict[str, bool],\n dict[str, dict[Any, Any]],\n ]:\n # Time step ++\n self.turn_number += 1\n\n # For action sampled from action space, it needs to be converted into AgentAction\n complied_actions: dict[str, AgentAction] = {}\n for key in actions.keys():\n action = actions[key]\n if isinstance(action, AgentAction):\n complied_actions[key] = action\n else:\n action[\"action_type\"] = self.available_action_types[\n int(action[\"action_type\"])\n ]\n complied_actions[key] = AgentAction.parse_obj(action)\n\n # Masking actions from agent that are in turn\n for idx, agent in enumerate(self.agents):\n if not self.action_mask[idx]:\n complied_actions[agent] = AgentAction(\n action_type=\"none\", argument=\"\"\n )\n\n self.recv_message(\n \"Environment\", SimpleMessage(message=f\"Turn #{self.turn_number}\")\n )\n for agent, action in complied_actions.items():\n self.recv_message(agent, action)\n\n response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *await asyncio.gather(\n *[\n evaluator.__acall__(\n turn_number=self.turn_number,\n messages=self.inbox,\n )\n for evaluator in self.evaluators\n ]\n )\n )\n )\n )\n\n if response.terminated:\n terminal_response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *await asyncio.gather(\n *[\n evaluator.__acall__(\n turn_number=self.turn_number,\n messages=self.inbox,\n )\n for evaluator in self.terminal_evaluators\n ]\n )\n )\n )\n )\n # incorporate terminal response into response\n response.p1_rate = response.p1_rate or terminal_response.p1_rate\n response.p2_rate = response.p2_rate or terminal_response.p2_rate\n if response.comments and terminal_response.comments:\n response.comments += terminal_response.comments\n elif terminal_response.comments:\n response.comments = terminal_response.comments\n\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[self.turn_number % len(self.action_mask)] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n obs = _actions_to_natural_language(complied_actions)\n info = {\n self.background.p1_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p1_rate or 0,\n },\n self.background.p2_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p2_rate or 0,\n },\n }\n if response.terminated:\n info[\"rewards_prompt\"] = {\"overall_prompt\": self.terminal_evaluators[0].prompt} # type: ignore\n\n return (\n {\n self.background.p1_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=0),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=1),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n },\n {\n self.background.p1_name: (\n response.p1_rate\n if isinstance(response.p1_rate, float)\n else response.p1_rate[0]\n )\n if response.p1_rate\n else 0,\n self.background.p2_name: (\n response.p2_rate\n if isinstance(response.p2_rate, float)\n else response.p2_rate[0]\n )\n if response.p2_rate\n else 0,\n },\n {\n self.background.p1_name: response.terminated,\n self.background.p2_name: response.terminated,\n },\n {\n self.background.p1_name: False,\n self.background.p2_name: False,\n },\n info,\n )\n\n def render(self, mode: str = \"human\") -> None:\n pass\n\n def close(self) -> None:\n pass" }, { "identifier": "ReachGoalLLMEvaluator", "path": "sotopia/envs/evaluators.py", "snippet": "class ReachGoalLLMEvaluator(Evaluator):\n @beartype\n def __init__(\n self, model_name: LLM_Name, response_format: str = \"basic\"\n ) -> None:\n self.model_name = model_name\n self.prompt = \"\"\n self.response_format = response_format\n\n def __call__(\n self, turn_number: int, messages: list[tuple[str, Message]]\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n raise NotImplementedError(\n \"ReachGoalLLMEvaluator is not implemented for synchronous evaluation\"\n )\n\n @gin.configurable\n @beartype\n async def __acall__(\n self,\n turn_number: int,\n messages: list[tuple[str, Message]] | None,\n history: str = \"\",\n temperature: float = 0.7,\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n # filter did nothing\n if not history and messages:\n messages_filtered = [\n (x, y)\n for x, y in messages\n if \"did nothing\" not in y.to_natural_language()\n ]\n history = \"\\n\".join(\n [\n f\"{x} {y.to_natural_language()}\"\n if x != \"Environment\"\n else y.to_natural_language()\n for x, y in messages_filtered\n ]\n )\n response_format_class = (\n EnvResponsePlus if self.response_format == \"plus\" else EnvResponse\n )\n\n try:\n response: EnvResponsePlus | EnvResponse # fix type error from langchain 0.0.264. we don't need this line for langchain 0.0.263\n response, prompt = await agenerate(\n model_name=self.model_name,\n template=\"\"\"{history},\n Based on previous interactions, evaluate how well participants achieve their goals.\n Please following the format:\n {format_instructions}\n \"\"\",\n input_values=dict(history=history),\n output_parser=PydanticOutputParser[\n EnvResponsePlus | EnvResponse\n ](pydantic_object=response_format_class),\n temperature=temperature,\n )\n self.prompt = prompt\n response_list = []\n # TODO: multiple agents\n for dimension in response.agent_1_evaluation.dict().keys():\n response_list.append(\n (\n \"agent_1\",\n (\n (\n dimension,\n response.agent_1_evaluation.dict()[dimension][\n 1\n ],\n ),\n response.agent_1_evaluation.dict()[dimension][0],\n ),\n )\n )\n response_list.append(\n (\n \"agent_2\",\n (\n (\n dimension,\n response.agent_2_evaluation.dict()[dimension][\n 1\n ],\n ),\n response.agent_2_evaluation.dict()[dimension][0],\n ),\n )\n )\n return response_list\n except Exception as e:\n log.debug(f\"[red] Failed to generate environment response. {e}\")\n return []" }, { "identifier": "RuleBasedTerminatedEvaluator", "path": "sotopia/envs/evaluators.py", "snippet": "class RuleBasedTerminatedEvaluator(Evaluator):\n def __init__(\n self, max_turn_number: int = 20, max_stale_turn: int = 2\n ) -> None:\n self.max_turn_number = max_turn_number\n self.max_stale_turn = max_stale_turn\n\n def __call__(\n self, turn_number: int, messages: list[tuple[str, Message]]\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n # Rule 1: If the conversation is too long, terminate the conversation\n conversation_too_long = turn_number > self.max_turn_number\n # Rule 2: If one of the players leaves, terminate the conversation\n p1_leaving = (\n len(messages) > 1\n and isinstance(messages[-2][1], AgentAction)\n and messages[-2][1].action_type == \"leave\"\n )\n p2_leaving = (\n bool(len(messages))\n and isinstance(messages[-1][1], AgentAction)\n and messages[-1][1].action_type == \"leave\"\n )\n # Rule 3: If the conversation is stale for too long, terminate the conversation\n stale_count = 0\n for message in messages[::-1]:\n if message[0] == \"Environment\":\n continue\n assert isinstance(message[1], AgentAction)\n if message[1].action_type == \"none\":\n stale_count += 1\n else:\n break\n if stale_count > self.max_stale_turn:\n break\n stale_too_long = stale_count > self.max_stale_turn\n terminated = (\n conversation_too_long or p1_leaving or p2_leaving or stale_too_long\n )\n reasons_for_termination = (\n f\"{'The conversation is too long; ' if conversation_too_long else ''}\"\n f\"{'Agent 1 is leaving; ' if p1_leaving else ''}\"\n f\"{'Agent 2 is leaving; ' if p2_leaving else ''}\"\n f\"{'The conversation stales for too long; ' if stale_too_long else ''}\"\n )\n return [\n (\n \"environment\",\n ((\"terminated\", terminated), reasons_for_termination),\n )\n ]\n\n async def __acall__(\n self, turn_number: int, messages: list[tuple[str, Message]]\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n return self(turn_number, messages)" }, { "identifier": "unweighted_aggregate_evaluate", "path": "sotopia/envs/evaluators.py", "snippet": "@beartype\ndef unweighted_aggregate_evaluate(\n responses: list[tuple[str, tuple[tuple[str, int | float | bool], str]]],\n) -> ScriptEnvironmentResponse:\n \"\"\"\n Aggregate the responses from the environment\n\n Args:\n responses (list[tuple[str, tuple[tuple[str, int | bool], str]]]): list of responses from the environment\n Each response is a tuple of (agent_name/environment, (response, reasoning))\n \"\"\"\n responses_dict: dict[\n str, list[tuple[tuple[str, int | float | bool], str]]\n ] = defaultdict(list)\n for response in responses:\n assert response[0] == \"environment\" or response[0].startswith(\"agent\")\n responses_dict[response[0]].append(response[1])\n\n environment_responses: tuple[dict[str, float | int | bool], str] = ({}, \"\")\n agent_1_responses: tuple[dict[str, float | int | bool], str] = ({}, \"\")\n agent_2_responses: tuple[dict[str, float | int | bool], str] = ({}, \"\")\n for k, v in responses_dict.items():\n if k == \"environment\":\n environment_responses = _reduce(v)\n else:\n if k == \"agent_1\":\n agent_1_responses = _reduce(v)\n elif k == \"agent_2\":\n agent_2_responses = _reduce(v)\n else:\n # TODO: supports more than two agents\n raise ValueError(f\"Only supports agent_1 and agent_2, got {k}\")\n\n comments = (\n (\n f\"Environment comments: {environment_responses[1]}\\n\"\n if environment_responses[1]\n else \"\"\n )\n + (\n f\"Agent 1 comments:\\n{agent_1_responses[1]}\\n\"\n if agent_1_responses[1]\n else \"\"\n )\n + (\n f\"Agent 2 comments:\\n{agent_2_responses[1]}\\n\"\n if agent_2_responses[1]\n else \"\"\n )\n )\n if (\n \"terminated\" in environment_responses[0]\n and environment_responses[0][\"terminated\"]\n ):\n log.debug(f\"[green] The conversation is terminated. {response}\")\n return ScriptEnvironmentResponse(\n terminated=environment_responses[0][\"terminated\"]\n if \"terminated\" in environment_responses[0]\n else False,\n p1_rate=(\n agent_1_responses[0][\"overall_score\"]\n if \"overall_score\" in agent_1_responses[0]\n else 0,\n agent_1_responses[0],\n )\n if agent_1_responses != ({}, \"\")\n else None,\n p2_rate=(\n agent_2_responses[0][\"overall_score\"]\n if \"overall_score\" in agent_2_responses[0]\n else 0,\n agent_2_responses[0],\n )\n if agent_2_responses != ({}, \"\")\n else None,\n comments=comments,\n )" }, { "identifier": "LLM_Name", "path": "sotopia/generation_utils/generate.py", "snippet": "class EnvResponse(BaseModel):\nclass EnvResponsePydanticOutputParser(PydanticOutputParser[EnvResponse]):\nclass ListOfIntOutputParser(BaseOutputParser[list[int]]):\nclass ListOfStrOutputParser(BaseOutputParser[list[str]]):\nclass StrOutputParser(BaseOutputParser[str]):\nclass ScriptOutputParser(BaseOutputParser[ScriptInteractionReturnType]):\n def __init__(self, pydantic_object: Type[BaseModel] = EnvResponse) -> None:\n def parse(self, text: str) -> EnvResponse:\n def get_format_instructions(self) -> str:\n def __init__(\n self,\n number_of_int: int | None = None,\n range_of_int: tuple[int, int] | None = None,\n ):\n def _get_description_text(self) -> str:\n def get_format_instructions(self) -> str:\n def parse(self, output: str) -> list[int]:\n def _type(self) -> str:\n def __init__(\n self,\n number_of_str: int | None = None,\n ):\n def _get_description_text(self) -> str:\n def get_format_instructions(self) -> str:\n def parse(self, output: str) -> list[str]:\n def _type(self) -> str:\n def __init__(self) -> None:\n def get_format_instructions(self) -> str:\n def parse(self, output: str) -> str:\n def _type(self) -> str:\n def get_format_instructions(self) -> str:\n def parse(self, output: str) -> ScriptInteractionReturnType:\n def _type(self) -> str:\ndef _return_fixed_model_version(\n model_name: Literal[\"gpt-3.5-turbo\", \"gpt-4\", \"gpt-4-turbo\"]\n) -> str:\ndef obtain_chain(\n model_name: LLM_Name,\n template: str,\n input_variables: list[str],\n temperature: float = 0.7,\n max_retries: int = 6,\n) -> LLMChain:\ndef format_bad_output_for_script(\n ill_formed_output: str,\n format_instructions: str,\n agents: list[str],\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n) -> str:\ndef format_bad_output(\n ill_formed_output: str,\n format_instructions: str,\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n) -> str:\ndef generate(\n model_name: LLM_Name,\n template: str,\n input_values: dict[str, str],\n output_parser: BaseOutputParser[OutputType],\n temperature: float = 0.7,\n) -> OutputType:\nasync def agenerate(\n model_name: LLM_Name,\n template: str,\n input_values: dict[str, str],\n output_parser: BaseOutputParser[OutputType],\n temperature: float = 0.7,\n) -> tuple[OutputType, str]:\ndef generate_episode(\n model_name: LLM_Name,\n participants: str = \"Jack (a greedy person), Rose\",\n topic: str = \"lawsuit\",\n extra_info: str = \"\",\n) -> EnvResponse:\nasync def agenerate_env_profile(\n model_name: LLM_Name,\n inspiration_prompt: str = \"asking my boyfriend to stop being friends with his ex\",\n examples: str = \"\",\n temperature: float = 0.7,\n) -> tuple[EnvironmentProfile, str]:\nasync def agenerate_relationship_profile(\n model_name: LLM_Name,\n agents_profiles: list[str],\n) -> tuple[RelationshipProfile, str]:\nasync def agenerate_enviroment_profile(\n model_name: LLM_Name,\n inspiration_prompt: str = \"asking my boyfriend to stop being friends with his ex\",\n examples: str = \"\",\n) -> tuple[EnvironmentProfile, str]:\ndef fill_in_background(\n model_name: LLM_Name,\n partial_background: ScriptBackground,\n) -> ScriptBackground:\ndef generate_action(\n model_name: LLM_Name,\n history: str,\n turn_number: int,\n action_types: list[ActionType],\n agent: str,\n goal: str,\n) -> AgentAction:\ndef generate_action_speak(\n model_name: LLM_Name,\n history: str,\n turn_number: int,\n action_types: list[ActionType],\n agent: str,\n goal: str,\n) -> AgentAction:\nasync def agenerate_action(\n model_name: LLM_Name,\n history: str,\n turn_number: int,\n action_types: list[ActionType],\n agent: str,\n goal: str,\n temperature: float = 0.7,\n script_like: bool = False,\n) -> tuple[AgentAction, str]:\nasync def agenerate_script(\n model_name: LLM_Name,\n background: ScriptBackground,\n temperature: float = 0.7,\n agent_names: list[str] = [],\n agent_name: str = \"\",\n history: str = \"\",\n single_step: bool = False,\n) -> tuple[ScriptInteractionReturnType, str]:\ndef process_history(\n script: ScriptBackground | EnvResponse | dict[str, AgentAction]\n) -> str:\ndef generate_init_profile(\n model_name: LLM_Name, basic_info: dict[str, str]\n) -> str:\ndef convert_narratives(model_name: LLM_Name, narrative: str, text: str) -> str:\ndef generate_goal(model_name: LLM_Name, background: str) -> str:" }, { "identifier": "AgentAction", "path": "sotopia/messages/message_classes.py", "snippet": "class AgentAction(Message):\n action_type: ActionType = Field(\n description=\"whether to speak at this turn or choose to not do anything\"\n )\n argument: str = Field(\n description=\"the utterance if choose to speak, the expression or gesture if choose non-verbal communication, or the physical action if choose action\"\n )\n\n def to_natural_language(self) -> str:\n match self.action_type:\n case \"none\":\n return \"did nothing\"\n case \"speak\":\n return f'said: \"{self.argument}\"'\n case \"non-verbal communication\":\n return f\"[{self.action_type}] {self.argument}\"\n case \"action\":\n return f\"[{self.action_type}] {self.argument}\"\n case \"leave\":\n return \"left the conversation\"" }, { "identifier": "Message", "path": "sotopia/messages/message_classes.py", "snippet": "class Message(BaseModel):\n \"\"\"\n An interface for messages.\n There is only one required method: to_natural_language\n \"\"\"\n\n def to_natural_language(self) -> str:\n raise NotImplementedError" }, { "identifier": "Observation", "path": "sotopia/messages/message_classes.py", "snippet": "class Observation(Message):\n last_turn: str = Field(description=\"the last turn of the conversation\")\n turn_number: int = Field(description=\"the turn number of the conversation\")\n available_actions: list[ActionType] = Field(\n description=\"the available actions\"\n )\n\n def to_natural_language(self) -> str:\n if self.turn_number == 0:\n return f\"\\n{self.last_turn}\\nConversation Starts:\\n\"\n else:\n return f\"Turn #{self.turn_number-1}: {self.last_turn}\\n\"" }, { "identifier": "ScriptBackground", "path": "sotopia/messages/message_classes.py", "snippet": "class ScriptBackground(Message):\n scenario: str = Field(description=\"scenario of the episode\")\n p1_name: str = Field(description=\"name of participant 1\")\n p2_name: str = Field(description=\"name of participant 2\")\n p1_background: str = Field(description=\"background of participant 1\")\n p2_background: str = Field(description=\"background of participant 2\")\n p1_goal: str = Field(description=\"goal of participant 1\")\n p2_goal: str = Field(description=\"goal of participant 2\")\n\n def to_natural_language(self) -> str:\n if self.p1_background and self.p2_background:\n return format_docstring(\n f\"\"\"Here is the context of this interaction:\n Scenario: {self.scenario}\n Participants: {self.p1_name} and {self.p2_name}\n {self.p1_name}'s background: {self.p1_background}\n {self.p2_name}'s background: {self.p2_background}\n {self.p1_name}'s goal: {self.p1_goal}\n {self.p2_name}'s goal: {self.p2_goal}\n \"\"\"\n )\n else:\n return format_docstring(\n f\"\"\"Here is the context of this interaction:\n Scenario: {self.scenario}\n Participants: {self.p1_name} and {self.p2_name}\n {self.p1_name}'s goal: {self.p1_goal}\n {self.p2_name}'s goal: {self.p2_goal}\n \"\"\"\n )" }, { "identifier": "ScriptEnvironmentResponse", "path": "sotopia/messages/message_classes.py", "snippet": "class ScriptEnvironmentResponse(Message):\n terminated: bool = Field(\n description=\"whether the conversation is terminated\",\n default_factory=lambda: False,\n )\n p1_rate: float | tuple[float, dict[str, float]] | None = Field(\n description=\"rating of participant 1, on the scale of 1 to 10\"\n )\n p2_rate: float | tuple[float, dict[str, float]] | None = Field(\n description=\"rating of participant 2, on the scale of 1 to 10\"\n )\n comments: str | None = Field(\n description=\"All of the comments supporting the termination and rating\"\n )\n\n def to_natural_language(self) -> str:\n reason_to_stop = format_docstring(\n f\"\"\"Environment response:\n {\"The conversation is terminated.\" if self.terminated else \"\"}\n {\"Rating of participant 1\" + str(self.p1_rate) if self.p1_rate is not None else \"\"}\n {\"Rating of participant 2\" + str(self.p2_rate) if self.p2_rate is not None else \"\"}\n {self.comments if self.comments is not None else \"\"}\n \"\"\"\n )\n clean_text = \"\"\n for line in reason_to_stop.split(\"\\n\"):\n if line.strip():\n clean_text += line + \"\\n\"\n return clean_text" }, { "identifier": "ScriptInteraction", "path": "sotopia/messages/message_classes.py", "snippet": "class ScriptInteraction(Message):\n interactions: str = Field(\n description=\"\"\"The interaction between the two participants in maximum 20 turns. Each turn is separated by a newline, and should only describe one agent. Following the structure:\n Turn #x\n [participant's name] [action] {argument for some actions}\n\n You can use different types of actions, but only use one in each turn. You should move other information into argument part. Below shows a python code snippet of the format for each action type:\n match self.action_type:\n case \"none\":\n return \"did nothing\"\n case \"speak\":\n return f'said: \"{self.argument}\"'\n case \"non-verbal communication\":\n return f\"[{self.action_type}] {self.argument}\"\n case \"action\":\n return f\"[{self.action_type}] {self.argument}\"\n case \"leave\":\n return \"left the conversation\"\n\n For example, the following is acceptable:\n Turn #x\n Oliver Thompson said: \"Hey Esmeralda, what's wrong? You seem upset.\"\n Turn #x\n Esmeralda Solis [action] moved closer\n Turn #x\n Oliver Thompson [non-verbal communication] smiled\n Turn #x\n Esmeralda Solis did nothing\n Turn #x\n Oliver Thompson left the conversation\n Turn #x\n Esmeralda Solis [action] leaned in and lowered her voice: \"Sorry\"\n\n And the following is not acceptable:\n Turn #1\n Oliver Thompson [speak] said: \"Hey Esmeralda, what's wrong? You seem upset.\"\n Turn #1\n Esmeralda Solis non-verbal communication moved closer\n \"\"\"\n )\n\n def to_natural_language(self) -> str:\n return self.interactions\n\n def parse(\n self, agent_names: list[str], background: str\n ) -> tuple[\n list[list[tuple[str, str, Message]]], list[tuple[str, Message]]\n ]:\n interaction = self.interactions\n # print(\"Interaction: \", interaction)\n lines = self.split_by_turn(interaction)\n\n agent_results = []\n results: list[list[tuple[str, str, Message]]] = [\n [\n (\n \"Environment\",\n name,\n Observation(\n last_turn=background,\n turn_number=0,\n available_actions=[\"none\"],\n ),\n )\n for name in agent_names\n ]\n ]\n\n for line_idx, line in enumerate(lines):\n try:\n res = self.parse_single_dialogue(line)\n action: AgentAction = cast(AgentAction, res[\"action\"])\n argument: str = cast(str, res[\"argument\"])\n turn: int = cast(int, res[\"turn\"])\n name: str = cast(str, res[\"name\"])\n\n parsed_action = AgentAction(\n action_type=action, argument=argument\n )\n if name not in agent_names:\n print(\n f\"The name of the agent, {name}, is not in the list of agent names, {agent_names}\"\n )\n name = agent_names[\n line_idx % 2\n ] # TODO Not sure what name to be set here\n except Exception as e:\n print(\n f\"Error when parsing the dialogue: {line}\",\n f\"The error is: {e}\",\n )\n raise e\n parsed_action = AgentAction(action_type=\"none\", argument=\"\")\n name = agent_names[line_idx % 2] # TODO same question as above\n inactive_agent_name = (\n agent_names[0] if name == agent_names[1] else agent_names[1]\n )\n results.append(\n [\n (\n \"Environment\",\n name,\n Observation(\n last_turn=\"environment is the agent\",\n turn_number=line_idx + 1,\n available_actions=[\"none\"],\n ),\n )\n for name in agent_names\n ]\n + [\n (name, \"Environment\", parsed_action),\n (\n inactive_agent_name,\n \"Environment\",\n AgentAction(\n action_type=\"none\", argument=\"did nothing\"\n ),\n ),\n ]\n )\n\n agent_results.append((name, parsed_action))\n # print(\"Parsed agent results: \", agent_results)\n return (results, agent_results) # type: ignore\n\n def parse_single_dialogue(\n self, dialogue: str\n ) -> dict[str, str | int | AgentAction | None]:\n \"\"\"Parse a single dialogue string and return a dictionary with turn, name, action, and argument.\"\"\"\n\n # Match the turn number and name. Assume all agent name starts with a capital letter and is followed by lowercase letters\n match_turn_name = re.match(\n r\"Turn #?(\\d+):?\\s*\\n((?:[A-Z]['a-z]* ?)+)\", dialogue\n )\n\n if not match_turn_name:\n raise ValueError(\n f\"The dialogue does not match the expected format: {dialogue}\"\n )\n return (\n None # TODO Which should we use, return None or raise error?\n )\n\n turn, name = match_turn_name.groups()\n action_content = dialogue[\n len(match_turn_name.group(0)) :\n ].strip() # Extract the action content\n\n # Check for different action types\n if \"did nothing\" in action_content:\n action, argument = \"none\", \"\"\n elif match := re.match(r'said: \"(.*?)\"', action_content):\n action, argument = \"speak\", match.group(1)\n action, argument = action.strip(), argument.strip()\n elif match := re.match(r'\\[speak\\] said: \"(.*?)\"', action_content):\n action, argument = \"speak\", match.group(1)\n action, argument = action.strip(), argument.strip()\n elif match := re.match(\n r\"\\[(non-verbal communication|action)\\] (.*)\", action_content\n ):\n action, argument = match.groups()\n elif \"left the conversation\" in action_content:\n # TODO Make it more elegant to handle the situation of `left the conversation.`\n action, argument = \"leave\", \"\"\n else:\n action, argument = None, None\n\n parsed_item = {\n \"turn\": int(turn),\n \"name\": name.strip(),\n \"action\": action,\n \"argument\": argument,\n }\n return parsed_item\n\n def split_by_turn(self, input_string: str) -> list[str]:\n \"\"\"Split the input dialogue string by turn and return a list of dialogues.\"\"\"\n # Split using 'Turn #' as delimiter, but keep the delimiter in the results\n dialogues = re.split(r\"(?=Turn #?\\d+)\", input_string)\n # Remove any empty strings and strip whitespace\n dialogues = [\n dialogue.strip() for dialogue in dialogues if dialogue.strip()\n ]\n dialogues = [\n dialogue for dialogue in dialogues if dialogue.startswith(\"Turn\")\n ]\n # Change from Turn #x to Turn (#)x (# is optional)\n dialogues[-1] = \"\\n\".join(\n dialogues[-1].split(\"\\n\")[:2]\n ) # Discard further input in the last turn\n # print(\"Dialogues: \", dialogues)\n return dialogues\n\n @staticmethod\n def default_value_for_return_type() -> ScriptInteractionReturnType:\n results_1: list[list[tuple[str, str, Message]]] = [\n [\n (\n \"Environment\",\n name,\n Observation(\n last_turn=\"Environment is the agent\",\n turn_number=0,\n available_actions=[\"none\"],\n ),\n )\n for name in [\"none\", \"none\"]\n ]\n ]\n results_2: list[tuple[str, Message]] = [\n (\"\", AgentAction(action_type=\"none\", argument=\"\"))\n ]\n return (results_1, results_2)" }, { "identifier": "BaseSampler", "path": "sotopia/samplers/base_sampler.py", "snippet": "class BaseSampler(Generic[ObsType, ActType]):\n def __init__(\n self,\n env_candidates: Sequence[EnvironmentProfile | str] | None = None,\n agent_candidates: Sequence[AgentProfile | str] | None = None,\n ) -> None:\n def sample(\n self,\n agent_classes: Type[BaseAgent[ObsType, ActType]]\n | list[Type[BaseAgent[ObsType, ActType]]],\n n_agent: int = 2,\n replacement: bool = True,\n size: int = 1,\n env_params: dict[str, Any] = {},\n agents_params: list[dict[str, Any]] = [{}, {}],\n ) -> Generator[EnvAgentCombo[ObsType, ActType], None, None]:" }, { "identifier": "ConstraintBasedSampler", "path": "sotopia/samplers/constraint_based_sampler.py", "snippet": "class ConstraintBasedSampler(BaseSampler[ObsType, ActType]):\n def sample(\n self,\n agent_classes: Type[BaseAgent[ObsType, ActType]]\n | list[Type[BaseAgent[ObsType, ActType]]],\n n_agent: int = 2,\n replacement: bool = True,\n size: int = 10,\n env_params: dict[str, Any] = {},\n agents_params: list[dict[str, Any]] = [{}, {}],\n ) -> Generator[EnvAgentCombo[ObsType, ActType], None, None]:\n \"\"\"\n Sample an environment and a list of agents based on the constraints of the environment.\n\n Note: Sampling without replacement is only restricted to single env candidate.\n This is due to the fact that the number of possible combinations of env and agents is huge.\n Please sample for each env separately if you want to sample without replacement.\n \"\"\"\n assert (\n not isinstance(agent_classes, list)\n or len(agent_classes) == n_agent\n ), f\"agent_classes should be a list of length {n_agent} or a single agent class\"\n\n if not isinstance(agent_classes, list):\n agent_classes = [agent_classes] * n_agent\n assert (\n len(agents_params) == n_agent\n ), f\"agents_params should be a list of length {n_agent}\"\n\n env_profiles: list[EnvironmentProfile] = []\n agents_which_fit_scenario: list[list[str]] = []\n\n agent_candidate_ids: set[str] | None = None\n if self.agent_candidates:\n agent_candidate_ids = set(\n str(agent.pk) if not isinstance(agent, str) else agent\n for agent in self.agent_candidates\n )\n else:\n agent_candidate_ids = None\n\n if not replacement:\n assert self.env_candidates and len(self.env_candidates) == 1, (\n \"Sampling without replacement is only restricted to single env candidate (must be provided in the constructor). \"\n \"This is due to the fact that the number of possible combinations of env and agents is huge. \"\n \"Please sample for each env separately if you want to sample without replacement.\"\n )\n\n env_profile_id = (\n self.env_candidates[0].pk\n if not isinstance(self.env_candidates[0], str)\n else self.env_candidates[0]\n )\n\n assert env_profile_id, \"Env candidate must have an id\"\n\n agents_which_fit_scenario = _get_fit_agents_for_one_env(\n env_profile_id, agent_candidate_ids, size\n )\n env_profiles = (\n [EnvironmentProfile.get(env_profile_id)] * size\n if isinstance(self.env_candidates[0], str)\n else [self.env_candidates[0]] * size\n )\n else:\n for _ in range(size):\n if self.env_candidates:\n env_profile = random.choice(self.env_candidates)\n if isinstance(env_profile, str):\n env_profile = EnvironmentProfile.get(env_profile)\n else:\n env_profile_id = random.choice(\n list(EnvironmentProfile.all_pks())\n )\n env_profile = EnvironmentProfile.get(env_profile_id)\n env_profiles.append(env_profile)\n env_profile_id = env_profile.pk\n assert env_profile_id, \"Env candidate must have an id\"\n agents_which_fit_scenario.append(\n _get_fit_agents_for_one_env(\n env_profile_id, agent_candidate_ids, 1\n )[0]\n )\n\n assert (\n len(env_profiles) == size\n ), \"Number of env_profiles is not equal to size\"\n assert (\n len(agents_which_fit_scenario) == size\n ), \"Number of agents_which_fit_scenario is not equal to size\"\n\n for env_profile, agent_profile_id_list in zip(\n env_profiles, agents_which_fit_scenario\n ):\n env = ParallelSotopiaEnv(env_profile=env_profile, **env_params)\n agent_profiles = [\n AgentProfile.get(id) for id in agent_profile_id_list\n ]\n\n agents = [\n agent_class(agent_profile=agent_profile, **agent_params)\n for agent_class, agent_profile, agent_params in zip(\n agent_classes, agent_profiles, agents_params\n )\n ]\n # set goal for each agent\n for agent, goal in zip(agents, env.profile.agent_goals):\n agent.goal = goal\n\n yield env, agents" }, { "identifier": "UniformSampler", "path": "sotopia/samplers/uniform_sampler.py", "snippet": "class UniformSampler(BaseSampler[ObsType, ActType]):\n def sample(\n self,\n agent_classes: Type[BaseAgent[ObsType, ActType]]\n | list[Type[BaseAgent[ObsType, ActType]]],\n n_agent: int = 2,\n replacement: bool = True,\n size: int = 1,\n env_params: dict[str, Any] = {},\n agents_params: list[dict[str, Any]] = [{}, {}],\n ) -> Generator[EnvAgentCombo[ObsType, ActType], None, None]:\n \"\"\"\n Sample an environment and `n_agent` agents.\n\n Runtime checks:\n 1. If `agent_classes` is a list, it should have length `n_agent`.\n 2. `agents_params` should also be a list of length `n_agent`.\n\n Note: Currently, uniform sampling without replacement is not supported.\n This is due to the difficulty of sequentially sampling environment and agents.\n In theory, we can reject samples that have been sampled before, but this is not efficient.\n Please open an issue if you need this feature.\n \"\"\"\n assert (\n not isinstance(agent_classes, list)\n or len(agent_classes) == n_agent\n ), f\"agent_classes should be a list of length {n_agent} or a single agent class\"\n\n if not isinstance(agent_classes, list):\n agent_classes = [agent_classes] * n_agent\n assert (\n len(agents_params) == n_agent\n ), f\"agents_params should be a list of length {n_agent}\"\n\n assert (\n replacement\n ), \"Uniform sampling without replacement is not supported yet\"\n\n for _ in range(size):\n if self.env_candidates:\n env_profile = random.choice(self.env_candidates)\n if isinstance(env_profile, str):\n env_profile = EnvironmentProfile.get(env_profile)\n else:\n env_profile_id = random.choice(\n list(EnvironmentProfile.all_pks())\n )\n env_profile = EnvironmentProfile.get(env_profile_id)\n env = ParallelSotopiaEnv(env_profile=env_profile, **env_params)\n\n if self.agent_candidates:\n agent_profile_candidates = self.agent_candidates\n if len(agent_profile_candidates) < n_agent:\n raise ValueError(\n f\"Number of agent candidates ({len(agent_profile_candidates)}) is less than number of agents ({n_agent})\"\n )\n else:\n agent_profile_candidates_keys = list(AgentProfile.all_pks())\n if len(agent_profile_candidates_keys) < n_agent:\n raise ValueError(\n f\"Number of agent profile candidates ({len(agent_profile_candidates_keys)}) in database is less than number of agents ({n_agent})\"\n )\n agent_profile_candidates = [\n AgentProfile.get(pk=pk)\n for pk in agent_profile_candidates_keys\n ]\n\n if len(agent_profile_candidates) == n_agent:\n agent_profiles_maybe_id = agent_profile_candidates\n else:\n agent_profiles_maybe_id = random.sample(\n agent_profile_candidates, n_agent\n )\n agent_profiles = [\n i if isinstance(i, AgentProfile) else AgentProfile.get(i)\n for i in agent_profiles_maybe_id\n ]\n agents = [\n agent_class(agent_profile=agent_profile, **agent_params)\n for agent_class, agent_profile, agent_params in zip(\n agent_classes, agent_profiles, agents_params\n )\n ]\n # set goal for each agent\n for agent, goal in zip(agents, env.profile.agent_goals):\n agent.goal = goal\n\n yield env, agents" } ]
import asyncio import functools import itertools import logging import gin import rich from typing import Callable, Literal, Sequence, Type, cast from beartype import beartype from tqdm.asyncio import tqdm_asyncio from sotopia.agents import ( Agents, HumanAgent, LLMAgent, RedisAgent, ScriptWritingAgent, SpeakAgent, ) from sotopia.agents.base_agent import BaseAgent from sotopia.database import EpisodeLog from sotopia.database.persistent_profile import ( AgentProfile, EnvironmentProfile, ) from sotopia.envs import ParallelSotopiaEnv from sotopia.envs.evaluators import ( ReachGoalLLMEvaluator, RuleBasedTerminatedEvaluator, unweighted_aggregate_evaluate, ) from sotopia.generation_utils.generate import LLM_Name, agenerate_script from sotopia.messages import AgentAction, Message, Observation from sotopia.messages.message_classes import ( ScriptBackground, ScriptEnvironmentResponse, ScriptInteraction, ) from sotopia.samplers import ( BaseSampler, ConstraintBasedSampler, EnvAgentCombo, UniformSampler, )
18,296
@beartype def run_sync_server( model_name_dict: dict[str, LLM_Name], action_order: Literal["simutaneous", "round-robin", "random"], agents_info: dict[str, dict[str, str]] | None = None, partial_background_file: str | None = None, full_background_file: str | None = None, mode: str | None = None, ) -> list[tuple[str, str, Message]]: # Create Environment and agents # This step will be moved to outside this function env = ParallelSotopiaEnv( model_name=model_name_dict["env"], action_order=action_order, evaluators=[ RuleBasedTerminatedEvaluator(), ], ) if partial_background_file: environment_messages = env.reset( options={"partial_background_file": partial_background_file} ) elif full_background_file: environment_messages = env.reset( options={"full_background_file": full_background_file} ) else: environment_messages = env.reset() agents = Agents() agents_model_names = [model_name_dict["agent1"], model_name_dict["agent2"]] for agent_name, agent_model in zip(env.agents, agents_model_names): if agent_model == "human": agents[agent_name] = HumanAgent(agent_name) elif mode == "speak": agents[agent_name] = SpeakAgent(agent_name, model_name=agent_model) else:
@beartype def run_sync_server( model_name_dict: dict[str, LLM_Name], action_order: Literal["simutaneous", "round-robin", "random"], agents_info: dict[str, dict[str, str]] | None = None, partial_background_file: str | None = None, full_background_file: str | None = None, mode: str | None = None, ) -> list[tuple[str, str, Message]]: # Create Environment and agents # This step will be moved to outside this function env = ParallelSotopiaEnv( model_name=model_name_dict["env"], action_order=action_order, evaluators=[ RuleBasedTerminatedEvaluator(), ], ) if partial_background_file: environment_messages = env.reset( options={"partial_background_file": partial_background_file} ) elif full_background_file: environment_messages = env.reset( options={"full_background_file": full_background_file} ) else: environment_messages = env.reset() agents = Agents() agents_model_names = [model_name_dict["agent1"], model_name_dict["agent2"]] for agent_name, agent_model in zip(env.agents, agents_model_names): if agent_model == "human": agents[agent_name] = HumanAgent(agent_name) elif mode == "speak": agents[agent_name] = SpeakAgent(agent_name, model_name=agent_model) else:
agents[agent_name] = LLMAgent(agent_name, model_name=agent_model)
2
2023-10-23 19:47:26+00:00
24k
f0uriest/interpax
tests/test_interpolate.py
[ { "identifier": "fft_interp1d", "path": "interpax/_fourier.py", "snippet": "@partial(jit, static_argnames=\"n\")\ndef fft_interp1d(f: jax.Array, n: int, sx: jax.Array = None, dx: float = 1.0):\n \"\"\"Interpolation of a 1d periodic function via FFT.\n\n Parameters\n ----------\n f : ndarray, shape(nx, ...)\n Source data. Assumed to cover 1 full period, excluding the endpoint.\n n : int\n Number of desired interpolation points.\n sx : ndarray or None\n Shift in x to evaluate at. If original data is f(x), interpolates to f(x + sx)\n dx : float\n Spacing of source points\n\n Returns\n -------\n fi : ndarray, shape(n, ..., len(sx))\n Interpolated (and possibly shifted) data points\n \"\"\"\n c = jnp.fft.ifft(f, axis=0)\n nx = c.shape[0]\n if sx is not None:\n sx = jnp.exp(-1j * 2 * jnp.pi * jnp.fft.fftfreq(nx)[:, None] * sx / dx)\n c = (c[None].T * sx).T\n c = jnp.moveaxis(c, 0, -1)\n pad = ((n - nx) // 2, n - nx - (n - nx) // 2)\n if nx % 2 != 0:\n pad = pad[::-1]\n c = jnp.fft.ifftshift(_pad_along_axis(jnp.fft.fftshift(c, axes=0), pad, axis=0))\n return jnp.fft.fft(c, axis=0).real" }, { "identifier": "fft_interp2d", "path": "interpax/_fourier.py", "snippet": "@partial(jit, static_argnames=(\"n1\", \"n2\"))\ndef fft_interp2d(\n f: jax.Array,\n n1: int,\n n2: int,\n sx: jax.Array = None,\n sy: jax.Array = None,\n dx: float = 1.0,\n dy: float = 1.0,\n):\n \"\"\"Interpolation of a 2d periodic function via FFT.\n\n Parameters\n ----------\n f : ndarray, shape(nx, ny, ...)\n Source data. Assumed to cover 1 full period, excluding the endpoint.\n n1, n2 : int\n Number of desired interpolation points in x and y directions\n sx, sy : ndarray or None\n Shift in x and y to evaluate at. If original data is f(x,y), interpolates to\n f(x + sx, y + sy). Both must be provided or None\n dx, dy : float\n Spacing of source points in x and y\n\n Returns\n -------\n fi : ndarray, shape(n1, n2, ..., len(sx))\n Interpolated (and possibly shifted) data points\n \"\"\"\n c = jnp.fft.ifft2(f, axes=(0, 1))\n nx, ny = c.shape[:2]\n if (sx is not None) and (sy is not None):\n sx = jnp.exp(-1j * 2 * jnp.pi * jnp.fft.fftfreq(nx)[:, None] * sx / dx)\n sy = jnp.exp(-1j * 2 * jnp.pi * jnp.fft.fftfreq(ny)[:, None] * sy / dy)\n c = (c[None].T * sx[None, :, :] * sy[:, None, :]).T\n c = jnp.moveaxis(c, 0, -1)\n padx = ((n1 - nx) // 2, n1 - nx - (n1 - nx) // 2)\n pady = ((n2 - ny) // 2, n2 - ny - (n2 - ny) // 2)\n if nx % 2 != 0:\n padx = padx[::-1]\n if ny % 2 != 0:\n pady = pady[::-1]\n\n c = jnp.fft.ifftshift(\n _pad_along_axis(jnp.fft.fftshift(c, axes=0), padx, axis=0), axes=0\n )\n c = jnp.fft.ifftshift(\n _pad_along_axis(jnp.fft.fftshift(c, axes=1), pady, axis=1), axes=1\n )\n\n return jnp.fft.fft2(c, axes=(0, 1)).real" }, { "identifier": "Interpolator1D", "path": "interpax/_spline.py", "snippet": "class Interpolator1D(eqx.Module):\n \"\"\"Convenience class for representing a 1D interpolated function.\n\n Parameters\n ----------\n x : ndarray, shape(Nx,)\n coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n - ``'monotonic'``: C1 cubic splines that attempt to preserve monotonicity in the\n data, and will not introduce new extrema in the interpolated points\n - ``'monotonic-0'``: same as ``'monotonic'`` but with 0 first derivatives at\n both endpoints\n\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as a 2 element array or tuple to specify different conditions\n for xq<x[0] and x[-1]<xq\n period : float > 0, None\n periodicity of the function. If given, function is assumed to be periodic\n on the interval [0,period]. None denotes no periodicity\n\n Notes\n -----\n This class is registered as a PyTree in JAX (it is actually an equinox.Module)\n so should be compatible with standard JAX transformations (jit, grad, vmap, etc.)\n\n \"\"\"\n\n x: jax.Array\n f: jax.Array\n derivs: dict\n method: str\n extrap: Union[bool, float, tuple]\n period: Union[None, float]\n axis: int\n\n def __init__(\n self,\n x: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float] = None,\n **kwargs,\n ):\n x, f = map(jnp.asarray, (x, f))\n axis = kwargs.get(\"axis\", 0)\n fx = kwargs.pop(\"fx\", None)\n\n errorif(\n (len(x) != f.shape[axis]) or (jnp.ndim(x) != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_1D, ValueError, f\"unknown method {method}\")\n\n self.x = x\n self.f = f\n self.axis = axis\n self.method = method\n self.extrap = extrap\n self.period = period\n\n if fx is None:\n fx = approx_df(x, f, method, axis, **kwargs)\n\n self.derivs = {\"fx\": fx}\n\n def __call__(self, xq: jax.Array, dx: int = 0):\n \"\"\"Evaluate the interpolated function or its derivatives.\n\n Parameters\n ----------\n xq : ndarray, shape(Nq,)\n Query points where interpolation is desired\n dx : int >= 0\n Derivative to take.\n\n Returns\n -------\n fq : ndarray, shape(Nq, ...)\n Interpolated values.\n \"\"\"\n return interp1d(\n xq,\n self.x,\n self.f,\n self.method,\n dx,\n self.extrap,\n self.period,\n **self.derivs,\n )" }, { "identifier": "Interpolator2D", "path": "interpax/_spline.py", "snippet": "class Interpolator2D(eqx.Module):\n \"\"\"Convenience class for representing a 2D interpolated function.\n\n Parameters\n ----------\n x : ndarray, shape(Nx,)\n x coordinates of known function values (\"knots\")\n y : ndarray, shape(Ny,)\n y coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,Ny,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as an array or tuple to specify different conditions\n [[xlow, xhigh],[ylow,yhigh]]\n period : float > 0, None, array-like, shape(2,)\n periodicity of the function in x, y directions. None denotes no periodicity,\n otherwise function is assumed to be periodic on the interval [0,period]. Use a\n single value for the same in both directions.\n\n Notes\n -----\n This class is registered as a PyTree in JAX (it is actually an equinox.Module)\n so should be compatible with standard JAX transformations (jit, grad, vmap, etc.)\n\n \"\"\"\n\n x: jax.Array\n y: jax.Array\n f: jax.Array\n derivs: dict\n method: str\n extrap: Union[bool, float, tuple]\n period: Union[None, float, tuple]\n axis: int\n\n def __init__(\n self,\n x: jax.Array,\n y: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float, tuple] = None,\n **kwargs,\n ):\n x, y, f = map(jnp.asarray, (x, y, f))\n axis = kwargs.get(\"axis\", 0)\n fx = kwargs.pop(\"fx\", None)\n fy = kwargs.pop(\"fy\", None)\n fxy = kwargs.pop(\"fxy\", None)\n\n errorif(\n (len(x) != f.shape[0]) or (x.ndim != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(\n (len(y) != f.shape[1]) or (y.ndim != 1),\n ValueError,\n \"y and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_2D, ValueError, f\"unknown method {method}\")\n\n self.x = x\n self.y = y\n self.f = f\n self.axis = axis\n self.method = method\n self.extrap = extrap\n self.period = period\n\n if fx is None:\n fx = approx_df(x, f, method, 0, **kwargs)\n if fy is None:\n fy = approx_df(y, f, method, 1, **kwargs)\n if fxy is None:\n fxy = approx_df(y, fx, method, 1, **kwargs)\n\n self.derivs = {\"fx\": fx, \"fy\": fy, \"fxy\": fxy}\n\n def __call__(self, xq: jax.Array, yq: jax.Array, dx: int = 0, dy: int = 0):\n \"\"\"Evaluate the interpolated function or its derivatives.\n\n Parameters\n ----------\n xq, yq : ndarray, shape(Nq,)\n x, y query points where interpolation is desired\n dx, dy : int >= 0\n Derivative to take in x, y directions.\n\n Returns\n -------\n fq : ndarray, shape(Nq, ...)\n Interpolated values.\n \"\"\"\n return interp2d(\n xq,\n yq,\n self.x,\n self.y,\n self.f,\n self.method,\n (dx, dy),\n self.extrap,\n self.period,\n **self.derivs,\n )" }, { "identifier": "Interpolator3D", "path": "interpax/_spline.py", "snippet": "class Interpolator3D(eqx.Module):\n \"\"\"Convenience class for representing a 3D interpolated function.\n\n Parameters\n ----------\n x : ndarray, shape(Nx,)\n x coordinates of known function values (\"knots\")\n y : ndarray, shape(Ny,)\n y coordinates of known function values (\"knots\")\n z : ndarray, shape(Nz,)\n z coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,Ny,Nz,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as an array or tuple to specify different conditions\n [[xlow, xhigh],[ylow,yhigh]]\n period : float > 0, None, array-like, shape(2,)\n periodicity of the function in x, y, z directions. None denotes no periodicity,\n otherwise function is assumed to be periodic on the interval [0,period]. Use a\n single value for the same in both directions.\n\n Notes\n -----\n This class is registered as a PyTree in JAX (it is actually an equinox.Module)\n so should be compatible with standard JAX transformations (jit, grad, vmap, etc.)\n\n \"\"\"\n\n x: jax.Array\n y: jax.Array\n z: jax.Array\n f: jax.Array\n derivs: dict\n method: str\n extrap: Union[bool, float, tuple]\n period: Union[None, float, tuple]\n axis: int\n\n def __init__(\n self,\n x: jax.Array,\n y: jax.Array,\n z: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float, tuple] = None,\n **kwargs,\n ):\n x, y, z, f = map(jnp.asarray, (x, y, z, f))\n axis = kwargs.get(\"axis\", 0)\n\n errorif(\n (len(x) != f.shape[0]) or (x.ndim != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(\n (len(y) != f.shape[1]) or (y.ndim != 1),\n ValueError,\n \"y and f must be arrays of equal length\",\n )\n errorif(\n (len(z) != f.shape[2]) or (z.ndim != 1),\n ValueError,\n \"z and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_3D, ValueError, f\"unknown method {method}\")\n\n fx = kwargs.pop(\"fx\", None)\n fy = kwargs.pop(\"fy\", None)\n fz = kwargs.pop(\"fz\", None)\n fxy = kwargs.pop(\"fxy\", None)\n fxz = kwargs.pop(\"fxz\", None)\n fyz = kwargs.pop(\"fyz\", None)\n fxyz = kwargs.pop(\"fxyz\", None)\n\n self.x = x\n self.y = y\n self.z = z\n self.f = f\n self.axis = axis\n self.method = method\n self.extrap = extrap\n self.period = period\n\n if fx is None:\n fx = approx_df(x, f, method, 0, **kwargs)\n if fy is None:\n fy = approx_df(y, f, method, 1, **kwargs)\n if fz is None:\n fz = approx_df(z, f, method, 2, **kwargs)\n if fxy is None:\n fxy = approx_df(y, fx, method, 1, **kwargs)\n if fxz is None:\n fxz = approx_df(z, fx, method, 2, **kwargs)\n if fyz is None:\n fyz = approx_df(z, fy, method, 2, **kwargs)\n if fxyz is None:\n fxyz = approx_df(z, fxy, method, 2, **kwargs)\n\n self.derivs = {\n \"fx\": fx,\n \"fy\": fy,\n \"fz\": fz,\n \"fxy\": fxy,\n \"fxz\": fxz,\n \"fyz\": fyz,\n \"fxyz\": fxyz,\n }\n\n def __call__(\n self,\n xq: jax.Array,\n yq: jax.Array,\n zq: jax.Array,\n dx: int = 0,\n dy: int = 0,\n dz: int = 0,\n ):\n \"\"\"Evaluate the interpolated function or its derivatives.\n\n Parameters\n ----------\n xq, yq, zq : ndarray, shape(Nq,)\n x, y, z query points where interpolation is desired\n dx, dy, dz : int >= 0\n Derivative to take in x, y, z directions.\n\n Returns\n -------\n fq : ndarray, shape(Nq, ...)\n Interpolated values.\n \"\"\"\n return interp3d(\n xq,\n yq,\n zq,\n self.x,\n self.y,\n self.z,\n self.f,\n self.method,\n (dx, dy, dz),\n self.extrap,\n self.period,\n **self.derivs,\n )" }, { "identifier": "interp1d", "path": "interpax/_spline.py", "snippet": "@partial(jit, static_argnames=\"method\")\ndef interp1d(\n xq: jax.Array,\n x: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n derivative: int = 0,\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float] = None,\n **kwargs,\n):\n \"\"\"Interpolate a 1d function.\n\n Parameters\n ----------\n xq : ndarray, shape(Nq,)\n query points where interpolation is desired\n x : ndarray, shape(Nx,)\n coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n - ``'monotonic'``: C1 cubic splines that attempt to preserve monotonicity in the\n data, and will not introduce new extrema in the interpolated points\n - ``'monotonic-0'``: same as ``'monotonic'`` but with 0 first derivatives at\n both endpoints\n\n derivative : int >= 0\n derivative order to calculate\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as a 2 element array or tuple to specify different conditions\n for xq<x[0] and x[-1]<xq\n period : float > 0, None\n periodicity of the function. If given, function is assumed to be periodic\n on the interval [0,period]. None denotes no periodicity\n\n Returns\n -------\n fq : ndarray, shape(Nq,...)\n function value at query points\n\n Notes\n -----\n For repeated interpolation given the same x, f data, recommend using Interpolator1D\n which caches the calculation of the derivatives and spline coefficients.\n\n \"\"\"\n xq, x, f = map(jnp.asarray, (xq, x, f))\n axis = kwargs.get(\"axis\", 0)\n fx = kwargs.pop(\"fx\", None)\n outshape = xq.shape + f.shape[1:]\n\n # Promote scalar query points to 1D array.\n # Note this is done after the computation of outshape\n # to make jax.grad work in the scalar case.\n xq = jnp.atleast_1d(xq)\n\n errorif(\n (len(x) != f.shape[axis]) or (jnp.ndim(x) != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_1D, ValueError, f\"unknown method {method}\")\n\n lowx, highx = _parse_extrap(extrap, 1)\n\n if period is not None:\n xq, x, f, fx = _make_periodic(xq, x, period, axis, f, fx)\n lowx = highx = True\n\n if method == \"nearest\":\n\n def derivative0():\n i = jnp.argmin(jnp.abs(xq[:, np.newaxis] - x[np.newaxis]), axis=1)\n return f[i]\n\n def derivative1():\n return jnp.zeros((xq.size, *f.shape[1:]))\n\n fq = jax.lax.switch(derivative, [derivative0, derivative1])\n\n elif method == \"linear\":\n\n def derivative0():\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n df = jnp.take(f, i, axis) - jnp.take(f, i - 1, axis)\n dx = x[i] - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n delta = xq - x[i - 1]\n fq = jnp.where(\n (dx == 0),\n jnp.take(f, i, axis).T,\n jnp.take(f, i - 1, axis).T + (delta * dxi * df.T),\n ).T\n return fq\n\n def derivative1():\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n df = jnp.take(f, i, axis) - jnp.take(f, i - 1, axis)\n dx = x[i] - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n return (df.T * dxi).T\n\n def derivative2():\n return jnp.zeros((xq.size, *f.shape[1:]))\n\n fq = jax.lax.switch(derivative, [derivative0, derivative1, derivative2])\n\n elif method in (CUBIC_METHODS + (\"monotonic\", \"monotonic-0\")):\n\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n if fx is None:\n fx = approx_df(x, f, method, axis, **kwargs)\n assert fx.shape == f.shape\n\n dx = x[i] - x[i - 1]\n delta = xq - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n t = delta * dxi\n\n f0 = jnp.take(f, i - 1, axis)\n f1 = jnp.take(f, i, axis)\n fx0 = (jnp.take(fx, i - 1, axis).T * dx).T\n fx1 = (jnp.take(fx, i, axis).T * dx).T\n\n F = jnp.stack([f0, f1, fx0, fx1], axis=0).T\n coef = jnp.vectorize(jnp.matmul, signature=\"(n,n),(n)->(n)\")(A_CUBIC, F).T\n ttx = _get_t_der(t, derivative, dxi)\n fq = jnp.einsum(\"ji...,ij->i...\", coef, ttx)\n\n fq = _extrap(xq, fq, x, lowx, highx)\n return fq.reshape(outshape)" }, { "identifier": "interp2d", "path": "interpax/_spline.py", "snippet": "@partial(jit, static_argnames=\"method\")\ndef interp2d( # noqa: C901 - FIXME: break this up into simpler pieces\n xq: jax.Array,\n yq: jax.Array,\n x: jax.Array,\n y: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n derivative: int = 0,\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float, tuple] = None,\n **kwargs,\n):\n \"\"\"Interpolate a 2d function.\n\n Parameters\n ----------\n xq : ndarray, shape(Nq,)\n x query points where interpolation is desired\n yq : ndarray, shape(Nq,)\n y query points where interpolation is desired\n x : ndarray, shape(Nx,)\n x coordinates of known function values (\"knots\")\n y : ndarray, shape(Ny,)\n y coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,Ny,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n\n derivative : int >= 0 or array-like, shape(2,)\n derivative order to calculate in x, y. Use a single value for the same in both\n directions.\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as an array or tuple to specify different conditions\n [[xlow, xhigh],[ylow,yhigh]]\n period : float > 0, None, array-like, shape(2,)\n periodicity of the function in x, y directions. None denotes no periodicity,\n otherwise function is assumed to be periodic on the interval [0,period]. Use a\n single value for the same in both directions.\n\n Returns\n -------\n fq : ndarray, shape(Nq,...)\n function value at query points\n\n Notes\n -----\n For repeated interpolation given the same x, y, f data, recommend using\n Interpolator2D which caches the calculation of the derivatives and spline\n coefficients.\n\n \"\"\"\n xq, yq, x, y, f = map(jnp.asarray, (xq, yq, x, y, f))\n fx = kwargs.pop(\"fx\", None)\n fy = kwargs.pop(\"fy\", None)\n fxy = kwargs.pop(\"fxy\", None)\n xq, yq = jnp.broadcast_arrays(xq, yq)\n outshape = xq.shape + f.shape[2:]\n\n # Promote scalar query points to 1D array.\n # Note this is done after the computation of outshape\n # to make jax.grad work in the scalar case.\n xq, yq = map(jnp.atleast_1d, (xq, yq))\n\n errorif(\n (len(x) != f.shape[0]) or (x.ndim != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(\n (len(y) != f.shape[1]) or (y.ndim != 1),\n ValueError,\n \"y and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_2D, ValueError, f\"unknown method {method}\")\n\n periodx, periody = _parse_ndarg(period, 2)\n derivative_x, derivative_y = _parse_ndarg(derivative, 2)\n lowx, highx, lowy, highy = _parse_extrap(extrap, 2)\n\n if periodx is not None:\n xq, x, f, fx, fy, fxy = _make_periodic(xq, x, periodx, 0, f, fx, fy, fxy)\n lowx = highx = True\n if periody is not None:\n yq, y, f, fx, fy, fxy = _make_periodic(yq, y, periody, 1, f, fx, fy, fxy)\n lowy = highy = True\n\n if method == \"nearest\":\n\n def derivative0():\n # because of the regular spaced grid we know that the nearest point\n # will be one of the 4 neighbors on the grid, so we first find those\n # and then take the nearest one among them.\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n neighbors_x = jnp.array(\n [[x[i], x[i - 1], x[i], x[i - 1]], [y[j], y[j], y[j - 1], y[j - 1]]]\n )\n neighbors_f = jnp.array(\n [f[i, j].T, f[i - 1, j].T, f[i, j - 1].T, f[i - 1, j - 1].T]\n )\n xyq = jnp.array([xq, yq])\n dist = jnp.linalg.norm(neighbors_x - xyq[:, None, :], axis=0)\n idx = jnp.argmin(dist, axis=0)\n return jax.vmap(lambda a, b: jnp.take(a, b, axis=-1))(neighbors_f.T, idx)\n\n def derivative1():\n return jnp.zeros((xq.size, *f.shape[2:]))\n\n fq = jax.lax.cond(\n (derivative_x == 0) & (derivative_y == 0), derivative0, derivative1\n )\n\n elif method == \"linear\":\n\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n\n f00 = f[i - 1, j - 1]\n f01 = f[i - 1, j]\n f10 = f[i, j - 1]\n f11 = f[i, j]\n x0 = x[i - 1]\n x1 = x[i]\n y0 = y[j - 1]\n y1 = y[j]\n dx = x1 - x0\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n dy = y1 - y0\n dyi = jnp.where(dy == 0, 0, 1 / dy)\n\n dx0 = lambda: jnp.array([x1 - xq, xq - x0])\n dx1 = lambda: jnp.array([-jnp.ones_like(xq), jnp.ones_like(xq)])\n dx2 = lambda: jnp.zeros((2, xq.size))\n dy0 = lambda: jnp.array([y1 - yq, yq - y0])\n dy1 = lambda: jnp.array([-jnp.ones_like(yq), jnp.ones_like(yq)])\n dy2 = lambda: jnp.zeros((2, yq.size))\n\n tx = jax.lax.switch(derivative_x, [dx0, dx1, dx2])\n ty = jax.lax.switch(derivative_y, [dy0, dy1, dy2])\n F = jnp.array([[f00, f01], [f10, f11]])\n fq = (dxi * dyi * jnp.einsum(\"ijk...,ik,jk->k...\", F, tx, ty).T).T\n\n elif method in CUBIC_METHODS:\n if fx is None:\n fx = approx_df(x, f, method, 0, **kwargs)\n if fy is None:\n fy = approx_df(y, f, method, 1, **kwargs)\n if fxy is None:\n fxy = approx_df(y, fx, method, 1, **kwargs)\n assert fx.shape == fy.shape == fxy.shape == f.shape\n\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n\n dx = x[i] - x[i - 1]\n deltax = xq - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n tx = deltax * dxi\n dy = y[j] - y[j - 1]\n deltay = yq - y[j - 1]\n dyi = jnp.where(dy == 0, 0, 1 / dy)\n ty = deltay * dyi\n\n fs = OrderedDict()\n fs[\"f\"] = f\n fs[\"fx\"] = fx\n fs[\"fy\"] = fy\n fs[\"fxy\"] = fxy\n fsq = OrderedDict()\n for ff in fs.keys():\n for jj in [0, 1]:\n for ii in [0, 1]:\n s = ff + str(ii) + str(jj)\n fsq[s] = fs[ff][i - 1 + ii, j - 1 + jj]\n if \"x\" in ff:\n fsq[s] = (dx * fsq[s].T).T\n if \"y\" in ff:\n fsq[s] = (dy * fsq[s].T).T\n\n F = jnp.stack([foo for foo in fsq.values()], axis=0).T\n coef = jnp.vectorize(jnp.matmul, signature=\"(n,n),(n)->(n)\")(A_BICUBIC, F).T\n coef = jnp.moveaxis(coef.reshape((4, 4, *coef.shape[1:]), order=\"F\"), 2, 0)\n ttx = _get_t_der(tx, derivative_x, dxi)\n tty = _get_t_der(ty, derivative_y, dyi)\n fq = jnp.einsum(\"ijk...,ij,ik->i...\", coef, ttx, tty)\n\n fq = _extrap(xq, fq, x, lowx, highx)\n fq = _extrap(yq, fq, y, lowy, highy)\n\n return fq.reshape(outshape)" }, { "identifier": "interp3d", "path": "interpax/_spline.py", "snippet": "@partial(jit, static_argnames=\"method\")\ndef interp3d( # noqa: C901 - FIXME: break this up into simpler pieces\n xq: jax.Array,\n yq: jax.Array,\n zq: jax.Array,\n x: jax.Array,\n y: jax.Array,\n z: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n derivative: int = 0,\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float, tuple] = None,\n **kwargs,\n):\n \"\"\"Interpolate a 3d function.\n\n Parameters\n ----------\n xq : ndarray, shape(Nq,)\n x query points where interpolation is desired\n yq : ndarray, shape(Nq,)\n y query points where interpolation is desired\n zq : ndarray, shape(Nq,)\n z query points where interpolation is desired\n x : ndarray, shape(Nx,)\n x coordinates of known function values (\"knots\")\n y : ndarray, shape(Ny,)\n y coordinates of known function values (\"knots\")\n z : ndarray, shape(Nz,)\n z coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,Ny,Nz,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n\n derivative : int >= 0, array-like, shape(3,)\n derivative order to calculate in x,y,z directions. Use a single value for the\n same in all directions.\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as an array or tuple to specify different conditions for\n [[xlow, xhigh],[ylow,yhigh],[zlow,zhigh]]\n period : float > 0, None, array-like, shape(3,)\n periodicity of the function in x, y, z directions. None denotes no periodicity,\n otherwise function is assumed to be periodic on the interval [0,period]. Use a\n single value for the same in all directions.\n\n Returns\n -------\n fq : ndarray, shape(Nq,...)\n function value at query points\n\n Notes\n -----\n For repeated interpolation given the same x, y, z, f data, recommend using\n Interpolator3D which caches the calculation of the derivatives and spline\n coefficients.\n\n \"\"\"\n xq, yq, zq, x, y, z, f = map(jnp.asarray, (xq, yq, zq, x, y, z, f))\n errorif(\n (len(x) != f.shape[0]) or (x.ndim != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(\n (len(y) != f.shape[1]) or (y.ndim != 1),\n ValueError,\n \"y and f must be arrays of equal length\",\n )\n errorif(\n (len(z) != f.shape[2]) or (z.ndim != 1),\n ValueError,\n \"z and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_3D, ValueError, f\"unknown method {method}\")\n\n xq, yq, zq = jnp.broadcast_arrays(xq, yq, zq)\n outshape = xq.shape + f.shape[3:]\n\n # Promote scalar query points to 1D array.\n # Note this is done after the computation of outshape\n # to make jax.grad work in the scalar case.\n xq, yq, zq = map(jnp.atleast_1d, (xq, yq, zq))\n\n fx = kwargs.pop(\"fx\", None)\n fy = kwargs.pop(\"fy\", None)\n fz = kwargs.pop(\"fz\", None)\n fxy = kwargs.pop(\"fxy\", None)\n fxz = kwargs.pop(\"fxz\", None)\n fyz = kwargs.pop(\"fyz\", None)\n fxyz = kwargs.pop(\"fxyz\", None)\n\n periodx, periody, periodz = _parse_ndarg(period, 3)\n derivative_x, derivative_y, derivative_z = _parse_ndarg(derivative, 3)\n lowx, highx, lowy, highy, lowz, highz = _parse_extrap(extrap, 3)\n\n if periodx is not None:\n xq, x, f, fx, fy, fz, fxy, fxz, fyz, fxyz = _make_periodic(\n xq, x, periodx, 0, f, fx, fy, fz, fxy, fxz, fyz, fxyz\n )\n lowx = highx = True\n if periody is not None:\n yq, y, f, fx, fy, fz, fxy, fxz, fyz, fxyz = _make_periodic(\n yq, y, periody, 1, f, fx, fy, fz, fxy, fxz, fyz, fxyz\n )\n lowy = highy = True\n if periodz is not None:\n zq, z, f, fx, fy, fz, fxy, fxz, fyz, fxyz = _make_periodic(\n zq, z, periodz, 2, f, fx, fy, fz, fxy, fxz, fyz, fxyz\n )\n lowz = highz = True\n\n if method == \"nearest\":\n\n def derivative0():\n # because of the regular spaced grid we know that the nearest point\n # will be one of the 8 neighbors on the grid, so we first find those\n # and then take the nearest one among them.\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n k = jnp.clip(jnp.searchsorted(z, zq, side=\"right\"), 1, len(z) - 1)\n neighbors_x = jnp.array(\n [\n [x[i], x[i - 1], x[i], x[i - 1], x[i], x[i - 1], x[i], x[i - 1]],\n [y[j], y[j], y[j - 1], y[j - 1], y[j], y[j], y[j - 1], y[j - 1]],\n [z[k], z[k], z[k], z[k], z[k - 1], z[k - 1], z[k - 1], z[k - 1]],\n ]\n )\n neighbors_f = jnp.array(\n [\n f[i, j, k].T,\n f[i - 1, j, k].T,\n f[i, j - 1, k].T,\n f[i - 1, j - 1, k].T,\n f[i, j, k - 1].T,\n f[i - 1, j, k - 1].T,\n f[i, j - 1, k - 1].T,\n f[i - 1, j - 1, k - 1].T,\n ]\n )\n xyzq = jnp.array([xq, yq, zq])\n dist = jnp.linalg.norm(neighbors_x - xyzq[:, None, :], axis=0)\n idx = jnp.argmin(dist, axis=0)\n return jax.vmap(lambda a, b: jnp.take(a, b, axis=-1))(neighbors_f.T, idx)\n\n def derivative1():\n return jnp.zeros((xq.size, *f.shape[3:]))\n\n fq = jax.lax.cond(\n (derivative_x == 0) & (derivative_y == 0) & (derivative_z == 0),\n derivative0,\n derivative1,\n )\n\n elif method == \"linear\":\n\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n k = jnp.clip(jnp.searchsorted(z, zq, side=\"right\"), 1, len(z) - 1)\n\n f000 = f[i - 1, j - 1, k - 1]\n f001 = f[i - 1, j - 1, k]\n f010 = f[i - 1, j, k - 1]\n f100 = f[i, j - 1, k - 1]\n f110 = f[i, j, k - 1]\n f011 = f[i - 1, j, k]\n f101 = f[i, j - 1, k]\n f111 = f[i, j, k]\n x0 = x[i - 1]\n x1 = x[i]\n y0 = y[j - 1]\n y1 = y[j]\n z0 = z[k - 1]\n z1 = z[k]\n dx = x1 - x0\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n dy = y1 - y0\n dyi = jnp.where(dy == 0, 0, 1 / dy)\n dz = z1 - z0\n dzi = jnp.where(dz == 0, 0, 1 / dz)\n\n dx0 = lambda: jnp.array([x1 - xq, xq - x0])\n dx1 = lambda: jnp.array([-jnp.ones_like(xq), jnp.ones_like(xq)])\n dx2 = lambda: jnp.zeros((2, xq.size))\n dy0 = lambda: jnp.array([y1 - yq, yq - y0])\n dy1 = lambda: jnp.array([-jnp.ones_like(yq), jnp.ones_like(yq)])\n dy2 = lambda: jnp.zeros((2, yq.size))\n dz0 = lambda: jnp.array([z1 - zq, zq - z0])\n dz1 = lambda: jnp.array([-jnp.ones_like(zq), jnp.ones_like(zq)])\n dz2 = lambda: jnp.zeros((2, zq.size))\n\n tx = jax.lax.switch(derivative_x, [dx0, dx1, dx2])\n ty = jax.lax.switch(derivative_y, [dy0, dy1, dy2])\n tz = jax.lax.switch(derivative_z, [dz0, dz1, dz2])\n\n F = jnp.array([[[f000, f001], [f010, f011]], [[f100, f101], [f110, f111]]])\n fq = (dxi * dyi * dzi * jnp.einsum(\"lijk...,lk,ik,jk->k...\", F, tx, ty, tz).T).T\n\n elif method in CUBIC_METHODS:\n if fx is None:\n fx = approx_df(x, f, method, 0, **kwargs)\n if fy is None:\n fy = approx_df(y, f, method, 1, **kwargs)\n if fz is None:\n fz = approx_df(z, f, method, 2, **kwargs)\n if fxy is None:\n fxy = approx_df(y, fx, method, 1, **kwargs)\n if fxz is None:\n fxz = approx_df(z, fx, method, 2, **kwargs)\n if fyz is None:\n fyz = approx_df(z, fy, method, 2, **kwargs)\n if fxyz is None:\n fxyz = approx_df(z, fxy, method, 2, **kwargs)\n assert (\n fx.shape\n == fy.shape\n == fz.shape\n == fxy.shape\n == fxz.shape\n == fyz.shape\n == fxyz.shape\n == f.shape\n )\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n k = jnp.clip(jnp.searchsorted(z, zq, side=\"right\"), 1, len(z) - 1)\n\n dx = x[i] - x[i - 1]\n deltax = xq - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n tx = deltax * dxi\n\n dy = y[j] - y[j - 1]\n deltay = yq - y[j - 1]\n dyi = jnp.where(dy == 0, 0, 1 / dy)\n ty = deltay * dyi\n\n dz = z[k] - z[k - 1]\n deltaz = zq - z[k - 1]\n dzi = jnp.where(dz == 0, 0, 1 / dz)\n tz = deltaz * dzi\n\n fs = OrderedDict()\n fs[\"f\"] = f\n fs[\"fx\"] = fx\n fs[\"fy\"] = fy\n fs[\"fz\"] = fz\n fs[\"fxy\"] = fxy\n fs[\"fxz\"] = fxz\n fs[\"fyz\"] = fyz\n fs[\"fxyz\"] = fxyz\n fsq = OrderedDict()\n for ff in fs.keys():\n for kk in [0, 1]:\n for jj in [0, 1]:\n for ii in [0, 1]:\n s = ff + str(ii) + str(jj) + str(kk)\n fsq[s] = fs[ff][i - 1 + ii, j - 1 + jj, k - 1 + kk]\n if \"x\" in ff:\n fsq[s] = (dx * fsq[s].T).T\n if \"y\" in ff:\n fsq[s] = (dy * fsq[s].T).T\n if \"z\" in ff:\n fsq[s] = (dz * fsq[s].T).T\n\n F = jnp.stack([foo for foo in fsq.values()], axis=0).T\n coef = jnp.vectorize(jnp.matmul, signature=\"(n,n),(n)->(n)\")(A_TRICUBIC, F).T\n coef = jnp.moveaxis(coef.reshape((4, 4, 4, *coef.shape[1:]), order=\"F\"), 3, 0)\n ttx = _get_t_der(tx, derivative_x, dxi)\n tty = _get_t_der(ty, derivative_y, dyi)\n ttz = _get_t_der(tz, derivative_z, dzi)\n fq = jnp.einsum(\"lijk...,li,lj,lk->l...\", coef, ttx, tty, ttz)\n\n fq = _extrap(xq, fq, x, lowx, highx)\n fq = _extrap(yq, fq, y, lowy, highy)\n fq = _extrap(zq, fq, z, lowz, highz)\n\n return fq.reshape(outshape)" } ]
import jax import jax.numpy as jnp import numpy as np import pytest from jax import config as jax_config from interpax import ( Interpolator1D, Interpolator2D, Interpolator3D, fft_interp1d, fft_interp2d, interp1d, interp2d, interp3d, )
16,854
y = np.linspace(0, 2 * np.pi, 200) xxp, yyp = np.meshgrid(xp, yp, indexing="ij") f = lambda x, y: np.array([np.sin(x) * np.cos(y), np.sin(x) + np.cos(y)]) fp = f(xxp.T, yyp.T).T fq = interp2d(x, y, xp, yp, fp, method="nearest") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-2, atol=1.2e-1) fq = interp2d(x, y, xp, yp, fp, method="linear") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-3, atol=1e-2) fq = interp2d(x, y, xp, yp, fp, method="cubic") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-5, atol=2e-3) class TestInterp3D: """Tests for interp3d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x, y, z", [ ( np.linspace(0, np.pi, 1000), np.linspace(0, 2 * np.pi, 1000), np.linspace(0, 3, 1000), ), (0.0, 0.0, 0.0), ], ) def test_interp3d(self, x, y, z): """Test accuracy of different 3d interpolation methods.""" xp = np.linspace(0, np.pi, 20) yp = np.linspace(0, 2 * np.pi, 30) zp = np.linspace(0, 3, 25) xxp, yyp, zzp = np.meshgrid(xp, yp, zp, indexing="ij") f = lambda x, y, z: np.sin(x) * np.cos(y) * z**2 fp = f(xxp, yyp, zzp) interp1 = lambda xq, yq, zq, *args, **kwargs: interp3d( xq, yq, zq, *args, **kwargs ) interp2 = lambda xq, yq, zq, *args, **kwargs: Interpolator3D(*args, **kwargs)( xq, yq, zq ) for interp in [interp1, interp2]: fq = interp(x, y, z, xp, yp, zp, fp) np.testing.assert_allclose(fq, f(x, y, z), rtol=1e-5, atol=1e-2) fq = interp(x, y, z, xp, yp, zp, fp, method="nearest") np.testing.assert_allclose(fq, f(x, y, z), rtol=1e-2, atol=1) fq = interp(x, y, z, xp, yp, zp, fp, method="linear") np.testing.assert_allclose(fq, f(x, y, z), rtol=1e-3, atol=1e-1) atol = 5.5e-3 rtol = 1e-5 fq = interp(x, y, z, xp, yp, zp, fp, method="cubic") np.testing.assert_allclose(fq, f(x, y, z), rtol=rtol, atol=atol) fq = interp(x, y, z, xp, yp, zp, fp, method="cubic2") np.testing.assert_allclose(fq, f(x, y, z), rtol=rtol, atol=atol) fq = interp(x, y, z, xp, yp, zp, fp, method="catmull-rom") np.testing.assert_allclose(fq, f(x, y, z), rtol=rtol, atol=atol) fq = interp(x, y, z, xp, yp, zp, fp, method="cardinal") np.testing.assert_allclose(fq, f(x, y, z), rtol=rtol, atol=atol) @pytest.mark.unit def test_interp3d_vector_valued(self): """Test for interpolating vector valued function.""" x = np.linspace(0, np.pi, 1000) y = np.linspace(0, 2 * np.pi, 1000) z = np.linspace(0, 3, 1000) xp = np.linspace(0, np.pi, 20) yp = np.linspace(0, 2 * np.pi, 30) zp = np.linspace(0, 3, 25) xxp, yyp, zzp = np.meshgrid(xp, yp, zp, indexing="ij") f = lambda x, y, z: np.array( [np.sin(x) * np.cos(y) * z**2, 0.1 * (x + y - z)] ) fp = f(xxp.T, yyp.T, zzp.T).T fq = interp3d(x, y, z, xp, yp, zp, fp, method="nearest") np.testing.assert_allclose(fq, f(x, y, z).T, rtol=1e-2, atol=1) fq = interp3d(x, y, z, xp, yp, zp, fp, method="linear") np.testing.assert_allclose(fq, f(x, y, z).T, rtol=1e-3, atol=1e-1) fq = interp3d(x, y, z, xp, yp, zp, fp, method="cubic") np.testing.assert_allclose(fq, f(x, y, z).T, rtol=1e-5, atol=5e-3) @pytest.mark.unit def test_fft_interp1d(): """Test for 1d Fourier interpolation.""" def fun(x): return 2 * np.sin(1 * x) + 4 * np.cos(3 * x) + 1 x = {"o": {}, "e": {}} x["o"][1] = np.linspace(0, 2 * np.pi, 33, endpoint=False) x["e"][1] = np.linspace(0, 2 * np.pi, 32, endpoint=False) x["o"][2] = np.linspace(0, 2 * np.pi, 133, endpoint=False) x["e"][2] = np.linspace(0, 2 * np.pi, 132, endpoint=False) f1 = {} for p in ["o", "e"]: f1[p] = {} for i in [1, 2]: f1[p][i] = fun(x[p][i]) for sp in ["o", "e"]: # source parity fi = f1[sp][1] fs = fun(x[sp][1] + 0.2) np.testing.assert_allclose(
"""Tests for interpolation functions.""" jax_config.update("jax_enable_x64", True) class TestInterp1D: """Tests for interp1d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x", [ np.linspace(0, 2 * np.pi, 10000), 0.0, ], ) def test_interp1d(self, x): """Test accuracy of different 1d interpolation methods.""" xp = np.linspace(0, 2 * np.pi, 100) f = lambda x: np.sin(x) fp = f(xp) interp1 = lambda xq, *args, **kwargs: interp1d(xq, *args, **kwargs) interp2 = lambda xq, *args, **kwargs: Interpolator1D(*args, **kwargs)(xq) for interp in [interp1, interp2]: fq = interp(x, xp, fp, method="nearest") np.testing.assert_allclose(fq, f(x), rtol=1e-2, atol=1e-1) fq = interp(x, xp, fp, method="linear") np.testing.assert_allclose(fq, f(x), rtol=1e-4, atol=1e-3) fq = interp(x, xp, fp, method="cubic") np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-5) fq = interp(x, xp, fp, method="cubic2") np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-5) fq = interp(x, xp, fp, method="cardinal") np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-5) fq = interp(x, xp, fp, method="catmull-rom") np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-5) fq = interp(x, xp, fp, method="monotonic") np.testing.assert_allclose(fq, f(x), rtol=1e-4, atol=1e-3) fq = interp(x, xp, fp, method="monotonic-0") np.testing.assert_allclose(fq, f(x), rtol=1e-4, atol=1e-2) @pytest.mark.unit def test_interp1d_vector_valued(self): """Test for interpolating vector valued function.""" xp = np.linspace(0, 2 * np.pi, 100) x = np.linspace(0, 2 * np.pi, 300)[10:-10] f = lambda x: np.array([np.sin(x), np.cos(x)]) fp = f(xp).T fq = interp1d(x, xp, fp, method="nearest") np.testing.assert_allclose(fq, f(x).T, rtol=1e-2, atol=1e-1) fq = interp1d(x, xp, fp, method="linear") np.testing.assert_allclose(fq, f(x).T, rtol=1e-4, atol=1e-3) fq = interp1d(x, xp, fp, method="cubic") np.testing.assert_allclose(fq, f(x).T, rtol=1e-6, atol=1e-5) fq = interp1d(x, xp, fp, method="cubic2") np.testing.assert_allclose(fq, f(x).T, rtol=1e-6, atol=1e-5) fq = interp1d(x, xp, fp, method="cardinal") np.testing.assert_allclose(fq, f(x).T, rtol=1e-6, atol=1e-5) fq = interp1d(x, xp, fp, method="catmull-rom") np.testing.assert_allclose(fq, f(x).T, rtol=1e-6, atol=1e-5) fq = interp1d(x, xp, fp, method="monotonic") np.testing.assert_allclose(fq, f(x).T, rtol=1e-4, atol=1e-3) fq = interp1d(x, xp, fp, method="monotonic-0") np.testing.assert_allclose(fq, f(x).T, rtol=1e-4, atol=1e-2) @pytest.mark.unit def test_interp1d_extrap_periodic(self): """Test extrapolation and periodic BC of 1d interpolation.""" xp = np.linspace(0, 2 * np.pi, 200) x = np.linspace(-1, 2 * np.pi + 1, 10000) f = lambda x: np.sin(x) fp = f(xp) fq = interp1d(x, xp, fp, method="cubic", extrap=False) assert np.isnan(fq[0]) assert np.isnan(fq[-1]) fq = interp1d(x, xp, fp, method="cubic", extrap=True) assert not np.isnan(fq[0]) assert not np.isnan(fq[-1]) fq = interp1d(x, xp, fp, method="cubic", period=2 * np.pi) np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-2) @pytest.mark.unit def test_interp1d_monotonic(self): """Ensure monotonic interpolation is actually monotonic.""" # true function is just linear with a jump discontinuity at x=1.5 x = np.linspace(-4, 5, 10) f = np.heaviside(x - 1.5, 0) + 0.1 * x xq = np.linspace(-4, 5, 1000) dfc = interp1d(xq, x, f, derivative=1, method="cubic") dfm = interp1d(xq, x, f, derivative=1, method="monotonic") dfm0 = interp1d(xq, x, f, derivative=1, method="monotonic-0") assert dfc.min() < 0 # cubic interpolation undershoots, giving negative slope assert dfm.min() > 0 # monotonic interpolation doesn't assert dfm0.min() >= 0 # monotonic-0 doesn't overshoot either # ensure monotonic-0 has 0 slope at end points np.testing.assert_allclose(dfm0[np.array([0, -1])], 0, atol=1e-12) class TestInterp2D: """Tests for interp2d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x, y", [ (np.linspace(0, 3 * np.pi, 1000), np.linspace(0, 2 * np.pi, 1000)), (0.0, 0.0), ], ) def test_interp2d(self, x, y): """Test accuracy of different 2d interpolation methods.""" xp = np.linspace(0, 3 * np.pi, 99) yp = np.linspace(0, 2 * np.pi, 40) xxp, yyp = np.meshgrid(xp, yp, indexing="ij") f = lambda x, y: np.sin(x) * np.cos(y) fp = f(xxp, yyp) interp1 = lambda xq, yq, *args, **kwargs: interp2d(xq, yq, *args, **kwargs) interp2 = lambda xq, yq, *args, **kwargs: Interpolator2D(*args, **kwargs)( xq, yq ) for interp in [interp1, interp2]: fq = interp( x, y, xp, yp, fp, method="nearest", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=1e-2, atol=1) fq = interp( x, y, xp, yp, fp, method="linear", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=1e-4, atol=1e-2) atol = 2e-3 rtol = 1e-5 fq = interp(x, y, xp, yp, fp, method="cubic", period=(2 * np.pi, 2 * np.pi)) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="cubic2", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="catmull-rom", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="cardinal", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) @pytest.mark.unit def test_interp2d_vector_valued(self): """Test for interpolating vector valued function.""" xp = np.linspace(0, 3 * np.pi, 99) yp = np.linspace(0, 2 * np.pi, 40) x = np.linspace(0, 3 * np.pi, 200) y = np.linspace(0, 2 * np.pi, 200) xxp, yyp = np.meshgrid(xp, yp, indexing="ij") f = lambda x, y: np.array([np.sin(x) * np.cos(y), np.sin(x) + np.cos(y)]) fp = f(xxp.T, yyp.T).T fq = interp2d(x, y, xp, yp, fp, method="nearest") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-2, atol=1.2e-1) fq = interp2d(x, y, xp, yp, fp, method="linear") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-3, atol=1e-2) fq = interp2d(x, y, xp, yp, fp, method="cubic") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-5, atol=2e-3) class TestInterp3D: """Tests for interp3d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x, y, z", [ ( np.linspace(0, np.pi, 1000), np.linspace(0, 2 * np.pi, 1000), np.linspace(0, 3, 1000), ), (0.0, 0.0, 0.0), ], ) def test_interp3d(self, x, y, z): """Test accuracy of different 3d interpolation methods.""" xp = np.linspace(0, np.pi, 20) yp = np.linspace(0, 2 * np.pi, 30) zp = np.linspace(0, 3, 25) xxp, yyp, zzp = np.meshgrid(xp, yp, zp, indexing="ij") f = lambda x, y, z: np.sin(x) * np.cos(y) * z**2 fp = f(xxp, yyp, zzp) interp1 = lambda xq, yq, zq, *args, **kwargs: interp3d( xq, yq, zq, *args, **kwargs ) interp2 = lambda xq, yq, zq, *args, **kwargs: Interpolator3D(*args, **kwargs)( xq, yq, zq ) for interp in [interp1, interp2]: fq = interp(x, y, z, xp, yp, zp, fp) np.testing.assert_allclose(fq, f(x, y, z), rtol=1e-5, atol=1e-2) fq = interp(x, y, z, xp, yp, zp, fp, method="nearest") np.testing.assert_allclose(fq, f(x, y, z), rtol=1e-2, atol=1) fq = interp(x, y, z, xp, yp, zp, fp, method="linear") np.testing.assert_allclose(fq, f(x, y, z), rtol=1e-3, atol=1e-1) atol = 5.5e-3 rtol = 1e-5 fq = interp(x, y, z, xp, yp, zp, fp, method="cubic") np.testing.assert_allclose(fq, f(x, y, z), rtol=rtol, atol=atol) fq = interp(x, y, z, xp, yp, zp, fp, method="cubic2") np.testing.assert_allclose(fq, f(x, y, z), rtol=rtol, atol=atol) fq = interp(x, y, z, xp, yp, zp, fp, method="catmull-rom") np.testing.assert_allclose(fq, f(x, y, z), rtol=rtol, atol=atol) fq = interp(x, y, z, xp, yp, zp, fp, method="cardinal") np.testing.assert_allclose(fq, f(x, y, z), rtol=rtol, atol=atol) @pytest.mark.unit def test_interp3d_vector_valued(self): """Test for interpolating vector valued function.""" x = np.linspace(0, np.pi, 1000) y = np.linspace(0, 2 * np.pi, 1000) z = np.linspace(0, 3, 1000) xp = np.linspace(0, np.pi, 20) yp = np.linspace(0, 2 * np.pi, 30) zp = np.linspace(0, 3, 25) xxp, yyp, zzp = np.meshgrid(xp, yp, zp, indexing="ij") f = lambda x, y, z: np.array( [np.sin(x) * np.cos(y) * z**2, 0.1 * (x + y - z)] ) fp = f(xxp.T, yyp.T, zzp.T).T fq = interp3d(x, y, z, xp, yp, zp, fp, method="nearest") np.testing.assert_allclose(fq, f(x, y, z).T, rtol=1e-2, atol=1) fq = interp3d(x, y, z, xp, yp, zp, fp, method="linear") np.testing.assert_allclose(fq, f(x, y, z).T, rtol=1e-3, atol=1e-1) fq = interp3d(x, y, z, xp, yp, zp, fp, method="cubic") np.testing.assert_allclose(fq, f(x, y, z).T, rtol=1e-5, atol=5e-3) @pytest.mark.unit def test_fft_interp1d(): """Test for 1d Fourier interpolation.""" def fun(x): return 2 * np.sin(1 * x) + 4 * np.cos(3 * x) + 1 x = {"o": {}, "e": {}} x["o"][1] = np.linspace(0, 2 * np.pi, 33, endpoint=False) x["e"][1] = np.linspace(0, 2 * np.pi, 32, endpoint=False) x["o"][2] = np.linspace(0, 2 * np.pi, 133, endpoint=False) x["e"][2] = np.linspace(0, 2 * np.pi, 132, endpoint=False) f1 = {} for p in ["o", "e"]: f1[p] = {} for i in [1, 2]: f1[p][i] = fun(x[p][i]) for sp in ["o", "e"]: # source parity fi = f1[sp][1] fs = fun(x[sp][1] + 0.2) np.testing.assert_allclose(
fs, fft_interp1d(fi, *fi.shape, sx=0.2, dx=np.diff(x[sp][1])[0]).squeeze()
0
2023-10-18 13:12:20+00:00
24k
city96/ComfyUI_ExtraModels
PixArt/sampler.py
[ { "identifier": "gaussian_diffusion", "path": "PixArt/sampling/gaussian_diffusion.py", "snippet": "def mean_flat(tensor):\n def is_vb(self):\ndef _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, warmup_frac):\ndef get_beta_schedule(beta_schedule, *, beta_start, beta_end, num_diffusion_timesteps):\ndef get_named_beta_schedule(schedule_name, num_diffusion_timesteps):\ndef betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):\n def __init__(\n self,\n *,\n betas,\n model_mean_type,\n model_var_type,\n loss_type,\n snr=False\n ):\n def q_mean_variance(self, x_start, t):\n def q_sample(self, x_start, t, noise=None):\n def q_posterior_mean_variance(self, x_start, x_t, t):\n def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None):\n def process_xstart(x):\n def _predict_xstart_from_eps(self, x_t, t, eps):\n def _predict_eps_from_xstart(self, x_t, t, pred_xstart):\n def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):\n def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):\n def p_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n ):\n def p_sample_loop(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n ):\n def p_sample_loop_progressive(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n ):\n def ddim_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n eta=0.0,\n ):\n def ddim_reverse_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n eta=0.0,\n ):\n def ddim_sample_loop(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n eta=0.0,\n ):\n def ddim_sample_loop_progressive(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n eta=0.0,\n ):\n def _vb_terms_bpd(\n self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None\n ):\n def training_losses(self, model, x_start, t, model_kwargs=None, noise=None):\n def _prior_bpd(self, x_start):\n def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):\ndef _extract_into_tensor(arr, timesteps, broadcast_shape):\nclass ModelMeanType(enum.Enum):\nclass ModelVarType(enum.Enum):\nclass LossType(enum.Enum):\nclass GaussianDiffusion:\n PREVIOUS_X = enum.auto() # the model predicts x_{t-1}\n START_X = enum.auto() # the model predicts x_0\n EPSILON = enum.auto() # the model predicts epsilon\n LEARNED = enum.auto()\n FIXED_SMALL = enum.auto()\n FIXED_LARGE = enum.auto()\n LEARNED_RANGE = enum.auto()\n MSE = enum.auto() # use raw MSE loss (and KL when learning variances)\n RESCALED_MSE = (\n enum.auto()\n ) # use raw MSE loss (with RESCALED_KL when learning variances)\n KL = enum.auto() # use the variational lower-bound\n RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB\n B, C = x.shape[:2]\n B, C = x_t.shape[:2]" }, { "identifier": "model_wrapper", "path": "PixArt/sampling/dpm_solver.py", "snippet": "def model_wrapper(\n model,\n noise_schedule,\n model_type=\"noise\",\n model_kwargs={},\n guidance_type=\"uncond\",\n condition=None,\n unconditional_condition=None,\n guidance_scale=1.,\n classifier_fn=None,\n classifier_kwargs={},\n):\n \"\"\"Create a wrapper function for the noise prediction model.\n\n DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to\n firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.\n\n We support four types of the diffusion model by setting `model_type`:\n\n 1. \"noise\": noise prediction model. (Trained by predicting noise).\n\n 2. \"x_start\": data prediction model. (Trained by predicting the data x_0 at time 0).\n\n 3. \"v\": velocity prediction model. (Trained by predicting the velocity).\n The \"v\" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].\n\n [1] Salimans, Tim, and Jonathan Ho. \"Progressive distillation for fast sampling of diffusion models.\"\n arXiv preprint arXiv:2202.00512 (2022).\n [2] Ho, Jonathan, et al. \"Imagen Video: High Definition Video Generation with Diffusion Models.\"\n arXiv preprint arXiv:2210.02303 (2022).\n\n 4. \"score\": marginal score function. (Trained by denoising score matching).\n Note that the score function and the noise prediction model follows a simple relationship:\n ```\n noise(x_t, t) = -sigma_t * score(x_t, t)\n ```\n\n We support three types of guided sampling by DPMs by setting `guidance_type`:\n 1. \"uncond\": unconditional sampling by DPMs.\n The input `model` has the following format:\n ``\n model(x, t_input, **model_kwargs) -> noise | x_start | v | score\n ``\n\n 2. \"classifier\": classifier guidance sampling [3] by DPMs and another classifier.\n The input `model` has the following format:\n ``\n model(x, t_input, **model_kwargs) -> noise | x_start | v | score\n ``\n\n The input `classifier_fn` has the following format:\n ``\n classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)\n ``\n\n [3] P. Dhariwal and A. Q. Nichol, \"Diffusion models beat GANs on image synthesis,\"\n in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.\n\n 3. \"classifier-free\": classifier-free guidance sampling by conditional DPMs.\n The input `model` has the following format:\n ``\n model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score\n ``\n And if cond == `unconditional_condition`, the model output is the unconditional DPM output.\n\n [4] Ho, Jonathan, and Tim Salimans. \"Classifier-free diffusion guidance.\"\n arXiv preprint arXiv:2207.12598 (2022).\n\n\n The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)\n or continuous-time labels (i.e. epsilon to T).\n\n We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:\n ``\n def model_fn(x, t_continuous) -> noise:\n t_input = get_model_input_time(t_continuous)\n return noise_pred(model, x, t_input, **model_kwargs)\n ``\n where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.\n\n ===============================================================\n\n Args:\n model: A diffusion model with the corresponding format described above.\n noise_schedule: A noise schedule object, such as NoiseScheduleVP.\n model_type: A `str`. The parameterization type of the diffusion model.\n \"noise\" or \"x_start\" or \"v\" or \"score\".\n model_kwargs: A `dict`. A dict for the other inputs of the model function.\n guidance_type: A `str`. The type of the guidance for sampling.\n \"uncond\" or \"classifier\" or \"classifier-free\".\n condition: A pytorch tensor. The condition for the guided sampling.\n Only used for \"classifier\" or \"classifier-free\" guidance type.\n unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.\n Only used for \"classifier-free\" guidance type.\n guidance_scale: A `float`. The scale for the guided sampling.\n classifier_fn: A classifier function. Only used for the classifier guidance.\n classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.\n Returns:\n A noise prediction model that accepts the noised data and the continuous time as the inputs.\n \"\"\"\n\n def get_model_input_time(t_continuous):\n \"\"\"\n Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.\n For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].\n For continuous-time DPMs, we just use `t_continuous`.\n \"\"\"\n if noise_schedule.schedule == 'discrete':\n return (t_continuous - 1. / noise_schedule.total_N) * 1000.\n else:\n return t_continuous\n\n def noise_pred_fn(x, t_continuous, cond=None):\n t_input = get_model_input_time(t_continuous)\n if cond is None:\n output = model(\n x = x,\n timesteps = t_input,\n context = None,\n y = None,\n **model_kwargs\n )\n else:\n output = model(\n x = x,\n timesteps = t_input,\n context = cond,\n y = None,\n **model_kwargs\n )\n if model_type == \"noise\":\n return output\n elif model_type == \"x_start\":\n alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)\n return (x - expand_dims(alpha_t, x.dim()) * output) / expand_dims(sigma_t, x.dim())\n elif model_type == \"v\":\n alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)\n return expand_dims(alpha_t, x.dim()) * output + expand_dims(sigma_t, x.dim()) * x\n elif model_type == \"score\":\n sigma_t = noise_schedule.marginal_std(t_continuous)\n return -expand_dims(sigma_t, x.dim()) * output\n\n def cond_grad_fn(x, t_input):\n \"\"\"\n Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).\n \"\"\"\n with torch.enable_grad():\n x_in = x.detach().requires_grad_(True)\n log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)\n return torch.autograd.grad(log_prob.sum(), x_in)[0]\n\n def model_fn(x, t_continuous):\n \"\"\"\n The noise predicition model function that is used for DPM-Solver.\n \"\"\"\n if guidance_type == \"uncond\":\n return noise_pred_fn(x, t_continuous)\n elif guidance_type == \"classifier\":\n assert classifier_fn is not None\n t_input = get_model_input_time(t_continuous)\n cond_grad = cond_grad_fn(x, t_input)\n sigma_t = noise_schedule.marginal_std(t_continuous)\n noise = noise_pred_fn(x, t_continuous)\n return noise - guidance_scale * expand_dims(sigma_t, x.dim()) * cond_grad\n elif guidance_type == \"classifier-free\":\n if guidance_scale == 1. or unconditional_condition is None:\n return noise_pred_fn(x, t_continuous, cond=condition)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t_continuous] * 2)\n c_in = torch.cat([unconditional_condition, condition])\n noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)\n return noise_uncond + guidance_scale * (noise - noise_uncond)\n\n assert model_type in [\"noise\", \"x_start\", \"v\", \"score\"]\n assert guidance_type in [\"uncond\", \"classifier\", \"classifier-free\"]\n return model_fn" }, { "identifier": "DPM_Solver", "path": "PixArt/sampling/dpm_solver.py", "snippet": "class DPM_Solver:\n def __init__(\n self,\n model_fn,\n noise_schedule,\n algorithm_type=\"dpmsolver++\",\n correcting_x0_fn=None,\n correcting_xt_fn=None,\n thresholding_max_val=1.,\n dynamic_thresholding_ratio=0.995,\n ):\n \"\"\"Construct a DPM-Solver.\n\n We support both DPM-Solver (`algorithm_type=\"dpmsolver\"`) and DPM-Solver++ (`algorithm_type=\"dpmsolver++\"`).\n\n We also support the \"dynamic thresholding\" method in Imagen[1]. For pixel-space diffusion models, you\n can set both `algorithm_type=\"dpmsolver++\"` and `correcting_x0_fn=\"dynamic_thresholding\"` to use the\n dynamic thresholding. The \"dynamic thresholding\" can greatly improve the sample quality for pixel-space\n DPMs with large guidance scales. Note that the thresholding method is **unsuitable** for latent-space\n DPMs (such as stable-diffusion).\n\n To support advanced algorithms in image-to-image applications, we also support corrector functions for\n both x0 and xt.\n\n Args:\n model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]):\n ``\n def model_fn(x, t_continuous):\n return noise\n ``\n The shape of `x` is `(batch_size, **shape)`, and the shape of `t_continuous` is `(batch_size,)`.\n noise_schedule: A noise schedule object, such as NoiseScheduleVP.\n algorithm_type: A `str`. Either \"dpmsolver\" or \"dpmsolver++\".\n correcting_x0_fn: A `str` or a function with the following format:\n ```\n def correcting_x0_fn(x0, t):\n x0_new = ...\n return x0_new\n ```\n This function is to correct the outputs of the data prediction model at each sampling step. e.g.,\n ```\n x0_pred = data_pred_model(xt, t)\n if correcting_x0_fn is not None:\n x0_pred = correcting_x0_fn(x0_pred, t)\n xt_1 = update(x0_pred, xt, t)\n ```\n If `correcting_x0_fn=\"dynamic_thresholding\"`, we use the dynamic thresholding proposed in Imagen[1].\n correcting_xt_fn: A function with the following format:\n ```\n def correcting_xt_fn(xt, t, step):\n x_new = ...\n return x_new\n ```\n This function is to correct the intermediate samples xt at each sampling step. e.g.,\n ```\n xt = ...\n xt = correcting_xt_fn(xt, t, step)\n ```\n thresholding_max_val: A `float`. The max value for thresholding.\n Valid only when use `dpmsolver++` and `correcting_x0_fn=\"dynamic_thresholding\"`.\n dynamic_thresholding_ratio: A `float`. The ratio for dynamic thresholding (see Imagen[1] for details).\n Valid only when use `dpmsolver++` and `correcting_x0_fn=\"dynamic_thresholding\"`.\n\n [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour,\n Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models\n with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b.\n \"\"\"\n self.model = lambda x, t: model_fn(x, t.expand((x.shape[0])))\n self.noise_schedule = noise_schedule\n assert algorithm_type in [\"dpmsolver\", \"dpmsolver++\"]\n self.algorithm_type = algorithm_type\n if correcting_x0_fn == \"dynamic_thresholding\":\n self.correcting_x0_fn = self.dynamic_thresholding_fn\n else:\n self.correcting_x0_fn = correcting_x0_fn\n self.correcting_xt_fn = correcting_xt_fn\n self.dynamic_thresholding_ratio = dynamic_thresholding_ratio\n self.thresholding_max_val = thresholding_max_val\n\n def dynamic_thresholding_fn(self, x0, t):\n \"\"\"\n The dynamic thresholding method.\n \"\"\"\n dims = x0.dim()\n p = self.dynamic_thresholding_ratio\n s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)\n s = expand_dims(torch.maximum(s, self.thresholding_max_val * torch.ones_like(s).to(s.device)), dims)\n x0 = torch.clamp(x0, -s, s) / s\n return x0\n\n def noise_prediction_fn(self, x, t):\n \"\"\"\n Return the noise prediction model.\n \"\"\"\n return self.model(x, t)\n\n def data_prediction_fn(self, x, t):\n \"\"\"\n Return the data prediction model (with corrector).\n \"\"\"\n noise = self.noise_prediction_fn(x, t)\n alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)\n x0 = (x - sigma_t * noise) / alpha_t\n if self.correcting_x0_fn is not None:\n x0 = self.correcting_x0_fn(x0, t)\n return x0\n\n def model_fn(self, x, t):\n \"\"\"\n Convert the model to the noise prediction model or the data prediction model.\n \"\"\"\n if self.algorithm_type == \"dpmsolver++\":\n return self.data_prediction_fn(x, t)\n else:\n return self.noise_prediction_fn(x, t)\n\n def get_time_steps(self, skip_type, t_T, t_0, N, device):\n \"\"\"Compute the intermediate time steps for sampling.\n\n Args:\n skip_type: A `str`. The type for the spacing of the time steps. We support three types:\n - 'logSNR': uniform logSNR for the time steps.\n - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)\n - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n N: A `int`. The total number of the spacing of the time steps.\n device: A torch device.\n Returns:\n A pytorch tensor of the time steps, with the shape (N + 1,).\n \"\"\"\n if skip_type == 'logSNR':\n lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))\n lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))\n logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)\n return self.noise_schedule.inverse_lambda(logSNR_steps)\n elif skip_type == 'time_uniform':\n return torch.linspace(t_T, t_0, N + 1).to(device)\n elif skip_type == 'time_quadratic':\n t_order = 2\n t = torch.linspace(t_T ** (1. / t_order), t_0 ** (1. / t_order), N + 1).pow(t_order).to(device)\n return t\n else:\n raise ValueError(\n \"Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'\".format(skip_type))\n\n def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):\n \"\"\"\n Get the order of each step for sampling by the singlestep DPM-Solver.\n\n We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as \"DPM-Solver-fast\".\n Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:\n - If order == 1:\n We take `steps` of DPM-Solver-1 (i.e. DDIM).\n - If order == 2:\n - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.\n - If steps % 2 == 0, we use K steps of DPM-Solver-2.\n - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If order == 3:\n - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.\n - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.\n - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.\n\n ============================================\n Args:\n order: A `int`. The max order for the solver (2 or 3).\n steps: A `int`. The total number of function evaluations (NFE).\n skip_type: A `str`. The type for the spacing of the time steps. We support three types:\n - 'logSNR': uniform logSNR for the time steps.\n - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)\n - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n device: A torch device.\n Returns:\n orders: A list of the solver order of each step.\n \"\"\"\n if order == 3:\n K = steps // 3 + 1\n if steps % 3 == 0:\n orders = [3, ] * (K - 2) + [2, 1]\n elif steps % 3 == 1:\n orders = [3, ] * (K - 1) + [1]\n else:\n orders = [3, ] * (K - 1) + [2]\n elif order == 2:\n if steps % 2 == 0:\n K = steps // 2\n orders = [2, ] * K\n else:\n K = steps // 2 + 1\n orders = [2, ] * (K - 1) + [1]\n elif order == 1:\n K = 1\n orders = [1, ] * steps\n else:\n raise ValueError(\"'order' must be '1' or '2' or '3'.\")\n if skip_type == 'logSNR':\n # To reproduce the results in DPM-Solver paper\n timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)\n else:\n timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[\n torch.cumsum(torch.tensor([0, ] + orders), 0).to(device)]\n return timesteps_outer, orders\n\n def denoise_to_zero_fn(self, x, s):\n \"\"\"\n Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.\n \"\"\"\n return self.data_prediction_fn(x, s)\n\n def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):\n \"\"\"\n DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s`.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n ns = self.noise_schedule\n dims = x.dim()\n lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)\n h = lambda_t - lambda_s\n log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t)\n sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n if self.algorithm_type == \"dpmsolver++\":\n phi_1 = torch.expm1(-h)\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_t = (\n sigma_t / sigma_s * x\n - alpha_t * phi_1 * model_s\n )\n if return_intermediate:\n return x_t, {'model_s': model_s}\n else:\n return x_t\n else:\n phi_1 = torch.expm1(h)\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_t = (\n torch.exp(log_alpha_t - log_alpha_s) * x\n - (sigma_t * phi_1) * model_s\n )\n if return_intermediate:\n return x_t, {'model_s': model_s}\n else:\n return x_t\n\n def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False,\n solver_type='dpmsolver'):\n \"\"\"\n Singlestep solver DPM-Solver-2 from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n r1: A `float`. The hyperparameter of the second-order solver.\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if solver_type not in ['dpmsolver', 'taylor']:\n raise ValueError(\"'solver_type' must be either 'dpmsolver' or 'taylor', got {}\".format(solver_type))\n if r1 is None:\n r1 = 0.5\n ns = self.noise_schedule\n lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)\n h = lambda_t - lambda_s\n lambda_s1 = lambda_s + r1 * h\n s1 = ns.inverse_lambda(lambda_s1)\n log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(\n s1), ns.marginal_log_mean_coeff(t)\n sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t)\n alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t)\n\n if self.algorithm_type == \"dpmsolver++\":\n phi_11 = torch.expm1(-r1 * h)\n phi_1 = torch.expm1(-h)\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_s1 = (\n (sigma_s1 / sigma_s) * x\n - (alpha_s1 * phi_11) * model_s\n )\n model_s1 = self.model_fn(x_s1, s1)\n if solver_type == 'dpmsolver':\n x_t = (\n (sigma_t / sigma_s) * x\n - (alpha_t * phi_1) * model_s\n - (0.5 / r1) * (alpha_t * phi_1) * (model_s1 - model_s)\n )\n elif solver_type == 'taylor':\n x_t = (\n (sigma_t / sigma_s) * x\n - (alpha_t * phi_1) * model_s\n + (1. / r1) * (alpha_t * (phi_1 / h + 1.)) * (model_s1 - model_s)\n )\n else:\n phi_11 = torch.expm1(r1 * h)\n phi_1 = torch.expm1(h)\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_s1 = (\n torch.exp(log_alpha_s1 - log_alpha_s) * x\n - (sigma_s1 * phi_11) * model_s\n )\n model_s1 = self.model_fn(x_s1, s1)\n if solver_type == 'dpmsolver':\n x_t = (\n torch.exp(log_alpha_t - log_alpha_s) * x\n - (sigma_t * phi_1) * model_s\n - (0.5 / r1) * (sigma_t * phi_1) * (model_s1 - model_s)\n )\n elif solver_type == 'taylor':\n x_t = (\n torch.exp(log_alpha_t - log_alpha_s) * x\n - (sigma_t * phi_1) * model_s\n - (1. / r1) * (sigma_t * (phi_1 / h - 1.)) * (model_s1 - model_s)\n )\n if return_intermediate:\n return x_t, {'model_s': model_s, 'model_s1': model_s1}\n else:\n return x_t\n\n def singlestep_dpm_solver_third_update(self, x, s, t, r1=1. / 3., r2=2. / 3., model_s=None, model_s1=None,\n return_intermediate=False, solver_type='dpmsolver'):\n \"\"\"\n Singlestep solver DPM-Solver-3 from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n r1: A `float`. The hyperparameter of the third-order solver.\n r2: A `float`. The hyperparameter of the third-order solver.\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`).\n If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if solver_type not in ['dpmsolver', 'taylor']:\n raise ValueError(\"'solver_type' must be either 'dpmsolver' or 'taylor', got {}\".format(solver_type))\n if r1 is None:\n r1 = 1. / 3.\n if r2 is None:\n r2 = 2. / 3.\n ns = self.noise_schedule\n lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)\n h = lambda_t - lambda_s\n lambda_s1 = lambda_s + r1 * h\n lambda_s2 = lambda_s + r2 * h\n s1 = ns.inverse_lambda(lambda_s1)\n s2 = ns.inverse_lambda(lambda_s2)\n log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(\n s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t)\n sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(\n s2), ns.marginal_std(t)\n alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t)\n\n if self.algorithm_type == \"dpmsolver++\":\n phi_11 = torch.expm1(-r1 * h)\n phi_12 = torch.expm1(-r2 * h)\n phi_1 = torch.expm1(-h)\n phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1.\n phi_2 = phi_1 / h + 1.\n phi_3 = phi_2 / h - 0.5\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n if model_s1 is None:\n x_s1 = (\n (sigma_s1 / sigma_s) * x\n - (alpha_s1 * phi_11) * model_s\n )\n model_s1 = self.model_fn(x_s1, s1)\n x_s2 = (\n (sigma_s2 / sigma_s) * x\n - (alpha_s2 * phi_12) * model_s\n + r2 / r1 * (alpha_s2 * phi_22) * (model_s1 - model_s)\n )\n model_s2 = self.model_fn(x_s2, s2)\n if solver_type == 'dpmsolver':\n x_t = (\n (sigma_t / sigma_s) * x\n - (alpha_t * phi_1) * model_s\n + (1. / r2) * (alpha_t * phi_2) * (model_s2 - model_s)\n )\n elif solver_type == 'taylor':\n D1_0 = (1. / r1) * (model_s1 - model_s)\n D1_1 = (1. / r2) * (model_s2 - model_s)\n D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)\n D2 = 2. * (D1_1 - D1_0) / (r2 - r1)\n x_t = (\n (sigma_t / sigma_s) * x\n - (alpha_t * phi_1) * model_s\n + (alpha_t * phi_2) * D1\n - (alpha_t * phi_3) * D2\n )\n else:\n phi_11 = torch.expm1(r1 * h)\n phi_12 = torch.expm1(r2 * h)\n phi_1 = torch.expm1(h)\n phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1.\n phi_2 = phi_1 / h - 1.\n phi_3 = phi_2 / h - 0.5\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n if model_s1 is None:\n x_s1 = (\n (torch.exp(log_alpha_s1 - log_alpha_s)) * x\n - (sigma_s1 * phi_11) * model_s\n )\n model_s1 = self.model_fn(x_s1, s1)\n x_s2 = (\n (torch.exp(log_alpha_s2 - log_alpha_s)) * x\n - (sigma_s2 * phi_12) * model_s\n - r2 / r1 * (sigma_s2 * phi_22) * (model_s1 - model_s)\n )\n model_s2 = self.model_fn(x_s2, s2)\n if solver_type == 'dpmsolver':\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_s)) * x\n - (sigma_t * phi_1) * model_s\n - (1. / r2) * (sigma_t * phi_2) * (model_s2 - model_s)\n )\n elif solver_type == 'taylor':\n D1_0 = (1. / r1) * (model_s1 - model_s)\n D1_1 = (1. / r2) * (model_s2 - model_s)\n D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)\n D2 = 2. * (D1_1 - D1_0) / (r2 - r1)\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_s)) * x\n - (sigma_t * phi_1) * model_s\n - (sigma_t * phi_2) * D1\n - (sigma_t * phi_3) * D2\n )\n\n if return_intermediate:\n return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2}\n else:\n return x_t\n\n def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type=\"dpmsolver\"):\n \"\"\"\n Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)\n t: A pytorch tensor. The ending time, with the shape (1,).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if solver_type not in ['dpmsolver', 'taylor']:\n raise ValueError(\"'solver_type' must be either 'dpmsolver' or 'taylor', got {}\".format(solver_type))\n ns = self.noise_schedule\n model_prev_1, model_prev_0 = model_prev_list[-2], model_prev_list[-1]\n t_prev_1, t_prev_0 = t_prev_list[-2], t_prev_list[-1]\n lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(\n t_prev_0), ns.marginal_lambda(t)\n log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)\n sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n h_0 = lambda_prev_0 - lambda_prev_1\n h = lambda_t - lambda_prev_0\n r0 = h_0 / h\n D1_0 = (1. / r0) * (model_prev_0 - model_prev_1)\n if self.algorithm_type == \"dpmsolver++\":\n phi_1 = torch.expm1(-h)\n if solver_type == 'dpmsolver':\n x_t = (\n (sigma_t / sigma_prev_0) * x\n - (alpha_t * phi_1) * model_prev_0\n - 0.5 * (alpha_t * phi_1) * D1_0\n )\n elif solver_type == 'taylor':\n x_t = (\n (sigma_t / sigma_prev_0) * x\n - (alpha_t * phi_1) * model_prev_0\n + (alpha_t * (phi_1 / h + 1.)) * D1_0\n )\n else:\n phi_1 = torch.expm1(h)\n if solver_type == 'dpmsolver':\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_prev_0)) * x\n - (sigma_t * phi_1) * model_prev_0\n - 0.5 * (sigma_t * phi_1) * D1_0\n )\n elif solver_type == 'taylor':\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_prev_0)) * x\n - (sigma_t * phi_1) * model_prev_0\n - (sigma_t * (phi_1 / h - 1.)) * D1_0\n )\n return x_t\n\n def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpmsolver'):\n \"\"\"\n Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)\n t: A pytorch tensor. The ending time, with the shape (1,).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n ns = self.noise_schedule\n model_prev_2, model_prev_1, model_prev_0 = model_prev_list\n t_prev_2, t_prev_1, t_prev_0 = t_prev_list\n lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(\n t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)\n log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)\n sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n h_1 = lambda_prev_1 - lambda_prev_2\n h_0 = lambda_prev_0 - lambda_prev_1\n h = lambda_t - lambda_prev_0\n r0, r1 = h_0 / h, h_1 / h\n D1_0 = (1. / r0) * (model_prev_0 - model_prev_1)\n D1_1 = (1. / r1) * (model_prev_1 - model_prev_2)\n D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1)\n D2 = (1. / (r0 + r1)) * (D1_0 - D1_1)\n if self.algorithm_type == \"dpmsolver++\":\n phi_1 = torch.expm1(-h)\n phi_2 = phi_1 / h + 1.\n phi_3 = phi_2 / h - 0.5\n x_t = (\n (sigma_t / sigma_prev_0) * x\n - (alpha_t * phi_1) * model_prev_0\n + (alpha_t * phi_2) * D1\n - (alpha_t * phi_3) * D2\n )\n else:\n phi_1 = torch.expm1(h)\n phi_2 = phi_1 / h - 1.\n phi_3 = phi_2 / h - 0.5\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_prev_0)) * x\n - (sigma_t * phi_1) * model_prev_0\n - (sigma_t * phi_2) * D1\n - (sigma_t * phi_3) * D2\n )\n return x_t\n\n def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpmsolver', r1=None,\n r2=None):\n \"\"\"\n Singlestep DPM-Solver with the order `order` from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.\n return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n r1: A `float`. The hyperparameter of the second-order or third-order solver.\n r2: A `float`. The hyperparameter of the third-order solver.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if order == 1:\n return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate)\n elif order == 2:\n return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate,\n solver_type=solver_type, r1=r1)\n elif order == 3:\n return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate,\n solver_type=solver_type, r1=r1, r2=r2)\n else:\n raise ValueError(\"Solver order must be 1 or 2 or 3, got {}\".format(order))\n\n def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpmsolver'):\n \"\"\"\n Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)\n t: A pytorch tensor. The ending time, with the shape (1,).\n order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if order == 1:\n return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1])\n elif order == 2:\n return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)\n elif order == 3:\n return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)\n else:\n raise ValueError(\"Solver order must be 1 or 2 or 3, got {}\".format(order))\n\n def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5,\n solver_type='dpmsolver'):\n \"\"\"\n The adaptive step size solver based on singlestep DPM-Solver.\n\n Args:\n x: A pytorch tensor. The initial value at time `t_T`.\n order: A `int`. The (higher) order of the solver. We only support order == 2 or 3.\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n h_init: A `float`. The initial step size (for logSNR).\n atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1].\n rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05.\n theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1].\n t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the\n current time and `t_0` is less than `t_err`. The default setting is 1e-5.\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_0: A pytorch tensor. The approximated solution at time `t_0`.\n\n [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, \"Gotta go fast when generating data with score-based models,\" arXiv preprint arXiv:2105.14080, 2021.\n \"\"\"\n ns = self.noise_schedule\n s = t_T * torch.ones((1,)).to(x)\n lambda_s = ns.marginal_lambda(s)\n lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x))\n h = h_init * torch.ones_like(s).to(x)\n x_prev = x\n nfe = 0\n if order == 2:\n r1 = 0.5\n lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True)\n higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,\n solver_type=solver_type,\n **kwargs)\n elif order == 3:\n r1, r2 = 1. / 3., 2. / 3.\n lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,\n return_intermediate=True,\n solver_type=solver_type)\n higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2,\n solver_type=solver_type,\n **kwargs)\n else:\n raise ValueError(\"For adaptive step size solver, order must be 2 or 3, got {}\".format(order))\n while torch.abs((s - t_0)).mean() > t_err:\n t = ns.inverse_lambda(lambda_s + h)\n x_lower, lower_noise_kwargs = lower_update(x, s, t)\n x_higher = higher_update(x, s, t, **lower_noise_kwargs)\n delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev)))\n norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True))\n E = norm_fn((x_higher - x_lower) / delta).max()\n if torch.all(E <= 1.):\n x = x_higher\n s = t\n x_prev = x_lower\n lambda_s = ns.marginal_lambda(s)\n h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s)\n nfe += order\n print('adaptive solver nfe', nfe)\n return x\n\n def add_noise(self, x, t, noise=None):\n \"\"\"\n Compute the noised input xt = alpha_t * x + sigma_t * noise.\n\n Args:\n x: A `torch.Tensor` with shape `(batch_size, *shape)`.\n t: A `torch.Tensor` with shape `(t_size,)`.\n Returns:\n xt with shape `(t_size, batch_size, *shape)`.\n \"\"\"\n alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)\n if noise is None:\n noise = torch.randn((t.shape[0], *x.shape), device=x.device)\n x = x.reshape((-1, *x.shape))\n xt = expand_dims(alpha_t, x.dim()) * x + expand_dims(sigma_t, x.dim()) * noise\n if t.shape[0] == 1:\n return xt.squeeze(0)\n else:\n return xt\n\n def inverse(self, x, steps=20, t_start=None, t_end=None, order=2, skip_type='time_uniform',\n method='multistep', lower_order_final=True, denoise_to_zero=False, solver_type='dpmsolver',\n atol=0.0078, rtol=0.05, return_intermediate=False,\n ):\n \"\"\"\n Inverse the sample `x` from time `t_start` to `t_end` by DPM-Solver.\n For discrete-time DPMs, we use `t_start=1/N`, where `N` is the total time steps during training.\n \"\"\"\n t_0 = 1. / self.noise_schedule.total_N if t_start is None else t_start\n t_T = self.noise_schedule.T if t_end is None else t_end\n assert t_0 > 0 and t_T > 0, \"Time range needs to be greater than 0. For discrete-time DPMs, it needs to be in [1 / N, 1], where N is the length of betas array\"\n return self.sample(x, steps=steps, t_start=t_0, t_end=t_T, order=order, skip_type=skip_type,\n method=method, lower_order_final=lower_order_final, denoise_to_zero=denoise_to_zero,\n solver_type=solver_type,\n atol=atol, rtol=rtol, return_intermediate=return_intermediate)\n\n def sample(self, x, steps=20, t_start=None, t_end=None, order=2, skip_type='time_uniform',\n method='multistep', lower_order_final=True, denoise_to_zero=False, solver_type='dpmsolver',\n atol=0.0078, rtol=0.05, return_intermediate=False, latent_scale_factor=1.0, pbar=None, previewer=None,\n ):\n \"\"\"\n Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`.\n\n =====================================================\n\n We support the following algorithms for both noise prediction model and data prediction model:\n - 'singlestep':\n Singlestep DPM-Solver (i.e. \"DPM-Solver-fast\" in the paper), which combines different orders of singlestep DPM-Solver.\n We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps).\n The total number of function evaluations (NFE) == `steps`.\n Given a fixed NFE == `steps`, the sampling procedure is:\n - If `order` == 1:\n - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM).\n - If `order` == 2:\n - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling.\n - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2.\n - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If `order` == 3:\n - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.\n - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1.\n - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2.\n - 'multistep':\n Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`.\n We initialize the first `order` values by lower order multistep solvers.\n Given a fixed NFE == `steps`, the sampling procedure is:\n Denote K = steps.\n - If `order` == 1:\n - We use K steps of DPM-Solver-1 (i.e. DDIM).\n - If `order` == 2:\n - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2.\n - If `order` == 3:\n - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3.\n - 'singlestep_fixed':\n Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3).\n We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE.\n - 'adaptive':\n Adaptive step size DPM-Solver (i.e. \"DPM-Solver-12\" and \"DPM-Solver-23\" in the paper).\n We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`.\n You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs\n (NFE) and the sample quality.\n - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2.\n - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3.\n\n =====================================================\n\n Some advices for choosing the algorithm:\n - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs:\n Use singlestep DPM-Solver or DPM-Solver++ (\"DPM-Solver-fast\" in the paper) with `order = 3`.\n e.g., DPM-Solver:\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type=\"dpmsolver\")\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,\n skip_type='time_uniform', method='singlestep')\n e.g., DPM-Solver++:\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type=\"dpmsolver++\")\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,\n skip_type='time_uniform', method='singlestep')\n - For **guided sampling with large guidance scale** by DPMs:\n Use multistep DPM-Solver with `algorithm_type=\"dpmsolver++\"` and `order = 2`.\n e.g.\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type=\"dpmsolver++\")\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2,\n skip_type='time_uniform', method='multistep')\n\n We support three types of `skip_type`:\n - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images**\n - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**.\n - 'time_quadratic': quadratic time for the time steps.\n\n =====================================================\n Args:\n x: A pytorch tensor. The initial value at time `t_start`\n e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution.\n steps: A `int`. The total number of function evaluations (NFE).\n t_start: A `float`. The starting time of the sampling.\n If `T` is None, we use self.noise_schedule.T (default is 1.0).\n t_end: A `float`. The ending time of the sampling.\n If `t_end` is None, we use 1. / self.noise_schedule.total_N.\n e.g. if total_N == 1000, we have `t_end` == 1e-3.\n For discrete-time DPMs:\n - We recommend `t_end` == 1. / self.noise_schedule.total_N.\n For continuous-time DPMs:\n - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15.\n order: A `int`. The order of DPM-Solver.\n skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'.\n method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'.\n denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step.\n Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1).\n\n This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and\n score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID\n for diffusion models sampling by diffusion SDEs for low-resolutional images\n (such as CIFAR-10). However, we observed that such trick does not matter for\n high-resolutional images. As it needs an additional NFE, we do not recommend\n it for high-resolutional images.\n lower_order_final: A `bool`. Whether to use lower order solvers at the final steps.\n Only valid for `method=multistep` and `steps < 15`. We empirically find that\n this trick is a key to stabilizing the sampling by DPM-Solver with very few steps\n (especially for steps <= 10). So we recommend to set it to be `True`.\n solver_type: A `str`. The taylor expansion type for the solver. `dpmsolver` or `taylor`. We recommend `dpmsolver`.\n atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.\n rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.\n return_intermediate: A `bool`. Whether to save the xt at each step.\n When set to `True`, method returns a tuple (x0, intermediates); when set to False, method returns only x0.\n Returns:\n x_end: A pytorch tensor. The approximated solution at time `t_end`.\n\n \"\"\"\n t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end\n t_T = self.noise_schedule.T if t_start is None else t_start\n assert t_0 > 0 and t_T > 0, \"Time range needs to be greater than 0. For discrete-time DPMs, it needs to be in [1 / N, 1], where N is the length of betas array\"\n if return_intermediate:\n assert method in ['multistep', 'singlestep',\n 'singlestep_fixed'], \"Cannot use adaptive solver when saving intermediate values\"\n if self.correcting_xt_fn is not None:\n assert method in ['multistep', 'singlestep',\n 'singlestep_fixed'], \"Cannot use adaptive solver when correcting_xt_fn is not None\"\n device = x.device\n intermediates = []\n with torch.no_grad():\n if method == 'adaptive':\n x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol,\n solver_type=solver_type)\n elif method == 'multistep':\n assert steps >= order\n timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)\n assert timesteps.shape[0] - 1 == steps\n # Init the initial values.\n step = 0\n t = timesteps[step]\n t_prev_list = [t]\n model_prev_list = [self.model_fn(x, t)]\n if self.correcting_xt_fn is not None:\n x = self.correcting_xt_fn(x, t, step)\n if return_intermediate:\n intermediates.append(x)\n # Init the first `order` values by lower order multistep DPM-Solver.\n for step in range(1, order):\n t = timesteps[step]\n x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, t, step,\n solver_type=solver_type)\n if self.correcting_xt_fn is not None:\n x = self.correcting_xt_fn(x, t, step)\n if return_intermediate:\n intermediates.append(x)\n t_prev_list.append(t)\n model_prev_list.append(self.model_fn(x, t))\n # Compute the remaining values by `order`-th order multistep DPM-Solver.\n for step in tqdm(range(order, steps + 1)):\n t = timesteps[step]\n # We only use lower order for steps < 10\n if lower_order_final and steps < 10:\n step_order = min(order, steps + 1 - step)\n else:\n step_order = order\n x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, t, step_order,\n solver_type=solver_type)\n if self.correcting_xt_fn is not None:\n x = self.correcting_xt_fn(x, t, step)\n if return_intermediate:\n intermediates.append(x)\n for i in range(order - 1):\n t_prev_list[i] = t_prev_list[i + 1]\n model_prev_list[i] = model_prev_list[i + 1]\n t_prev_list[-1] = t\n # We do not need to evaluate the final model value.\n if step < steps:\n model_prev_list[-1] = self.model_fn(x, t)\n # comfyui preview\n if pbar:\n preview_bytes = None\n if previewer:\n preview_bytes = previewer.decode_latent_to_preview_image(\"JPEG\", x)\n pbar.update_absolute(step, steps, preview_bytes)\n\n elif method in ['singlestep', 'singlestep_fixed']:\n if method == 'singlestep':\n timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps,\n order=order,\n skip_type=skip_type,\n t_T=t_T, t_0=t_0,\n device=device)\n elif method == 'singlestep_fixed':\n K = steps // order\n orders = [order, ] * K\n timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device)\n for step, order in enumerate(orders):\n s, t = timesteps_outer[step], timesteps_outer[step + 1]\n timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=s.item(), t_0=t.item(), N=order,\n device=device)\n lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)\n h = lambda_inner[-1] - lambda_inner[0]\n r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h\n r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h\n x = self.singlestep_dpm_solver_update(x, s, t, order, solver_type=solver_type, r1=r1, r2=r2)\n if self.correcting_xt_fn is not None:\n x = self.correcting_xt_fn(x, t, step)\n if return_intermediate:\n intermediates.append(x)\n else:\n raise ValueError(\"Got wrong method {}\".format(method))\n if denoise_to_zero:\n t = torch.ones((1,)).to(device) * t_0\n x = self.denoise_to_zero_fn(x, t)\n if self.correcting_xt_fn is not None:\n x = self.correcting_xt_fn(x, t, step + 1)\n if return_intermediate:\n intermediates.append(x)\n\n if return_intermediate:\n return x, intermediates\n else:\n return x" }, { "identifier": "NoiseScheduleVP", "path": "PixArt/sampling/dpm_solver.py", "snippet": "class NoiseScheduleVP:\n def __init__(\n self,\n schedule='discrete',\n betas=None,\n alphas_cumprod=None,\n continuous_beta_0=0.1,\n continuous_beta_1=20.,\n dtype=torch.float32,\n ):\n \"\"\"Create a wrapper class for the forward SDE (VP type).\n\n ***\n Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.\n We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.\n ***\n\n The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).\n We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).\n Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:\n\n log_alpha_t = self.marginal_log_mean_coeff(t)\n sigma_t = self.marginal_std(t)\n lambda_t = self.marginal_lambda(t)\n\n Moreover, as lambda(t) is an invertible function, we also support its inverse function:\n\n t = self.inverse_lambda(lambda_t)\n\n ===============================================================\n\n We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).\n\n 1. For discrete-time DPMs:\n\n For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:\n t_i = (i + 1) / N\n e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.\n We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.\n\n Args:\n betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)\n alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)\n\n Note that we always have alphas_cumprod = cumprod(1 - betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.\n\n **Important**: Please pay special attention for the args for `alphas_cumprod`:\n The `alphas_cumprod` is the \\hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that\n q_{t_n | 0}(x_{t_n} | x_0) = N ( \\sqrt{\\hat{alpha_n}} * x_0, (1 - \\hat{alpha_n}) * I ).\n Therefore, the notation \\hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have\n alpha_{t_n} = \\sqrt{\\hat{alpha_n}},\n and\n log(alpha_{t_n}) = 0.5 * log(\\hat{alpha_n}).\n\n\n 2. For continuous-time DPMs:\n\n We support the linear VPSDE for the continuous time setting. The hyperparameters for the noise\n schedule are the default settings in Yang Song's ScoreSDE:\n\n Args:\n beta_min: A `float` number. The smallest beta for the linear schedule.\n beta_max: A `float` number. The largest beta for the linear schedule.\n T: A `float` number. The ending time of the forward process.\n\n ===============================================================\n\n Args:\n schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,\n 'linear' for continuous-time DPMs.\n Returns:\n A wrapper object of the forward SDE (VP type).\n\n ===============================================================\n\n Example:\n\n # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):\n >>> ns = NoiseScheduleVP('discrete', betas=betas)\n\n # For discrete-time DPMs, given alphas_cumprod (the \\hat{alpha_n} array for n = 0, 1, ..., N - 1):\n >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)\n\n # For continuous-time DPMs (VPSDE), linear schedule:\n >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)\n\n \"\"\"\n\n if schedule not in ['discrete', 'linear']:\n raise ValueError(\n \"Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear'\".format(schedule))\n\n self.schedule = schedule\n if schedule == 'discrete':\n if betas is not None:\n log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)\n else:\n assert alphas_cumprod is not None\n log_alphas = 0.5 * torch.log(alphas_cumprod)\n self.T = 1.\n self.log_alpha_array = self.numerical_clip_alpha(log_alphas).reshape((1, -1,)).to(dtype=dtype)\n self.total_N = self.log_alpha_array.shape[1]\n self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1)).to(dtype=dtype)\n else:\n self.T = 1.\n self.total_N = 1000\n self.beta_0 = continuous_beta_0\n self.beta_1 = continuous_beta_1\n\n def numerical_clip_alpha(self, log_alphas, clipped_lambda=-5.1):\n \"\"\"\n For some beta schedules such as cosine schedule, the log-SNR has numerical isssues.\n We clip the log-SNR near t=T within -5.1 to ensure the stability.\n Such a trick is very useful for diffusion models with the cosine schedule, such as i-DDPM, guided-diffusion and GLIDE.\n \"\"\"\n log_sigmas = 0.5 * torch.log(1. - torch.exp(2. * log_alphas))\n lambs = log_alphas - log_sigmas\n idx = torch.searchsorted(torch.flip(lambs, [0]), clipped_lambda)\n if idx > 0:\n log_alphas = log_alphas[:-idx]\n return log_alphas\n\n def marginal_log_mean_coeff(self, t):\n \"\"\"\n Compute log(alpha_t) of a given continuous-time label t in [0, T].\n \"\"\"\n if self.schedule == 'discrete':\n return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device),\n self.log_alpha_array.to(t.device)).reshape((-1))\n elif self.schedule == 'linear':\n return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0\n\n def marginal_alpha(self, t):\n \"\"\"\n Compute alpha_t of a given continuous-time label t in [0, T].\n \"\"\"\n return torch.exp(self.marginal_log_mean_coeff(t))\n\n def marginal_std(self, t):\n \"\"\"\n Compute sigma_t of a given continuous-time label t in [0, T].\n \"\"\"\n return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))\n\n def marginal_lambda(self, t):\n \"\"\"\n Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].\n \"\"\"\n log_mean_coeff = self.marginal_log_mean_coeff(t)\n log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))\n return log_mean_coeff - log_std\n\n def inverse_lambda(self, lamb):\n \"\"\"\n Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.\n \"\"\"\n if self.schedule == 'linear':\n tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))\n Delta = self.beta_0 ** 2 + tmp\n return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)\n elif self.schedule == 'discrete':\n log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)\n t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]),\n torch.flip(self.t_array.to(lamb.device), [1]))\n return t.reshape((-1,))" } ]
import torch import comfy.utils import latent_preview from .sampling import gaussian_diffusion as gd from .sampling.dpm_solver import model_wrapper, DPM_Solver, NoiseScheduleVP from comfy.sample import prepare_sampling, prepare_noise, cleanup_additional_models, get_models_from_cond
20,027
def sample_pixart(model, seed, steps, cfg, noise_schedule, noise_schedule_vp, positive, negative, latent_image): """ Mostly just a wrapper around the reference code. """ # prepare model noise = prepare_noise(latent_image, seed) real_model, _, _, _, models = prepare_sampling(model, noise.shape, positive, negative, noise_mask=None) # negative cond cond = positive[0][0] raw_uncond = negative[0][0] # Sampler seems to want the same dim for cond and uncond # truncate uncond to the length of cond # if shorter, pad uncond with y_null null_y = real_model.diffusion_model.y_embedder.y_embedding[None].repeat(latent_image.shape[0], 1, 1) uncond = null_y[:, :cond.shape[1], :] uncond[:, :raw_uncond.shape[1], :] = raw_uncond[:, :cond.shape[1], :] if raw_uncond.shape[1] > cond.shape[1]: print("PixArt: Warning. Your negative prompt is too long.") uncond[:, -1, :] = raw_uncond[:, -1, :] # add back EOS token # Move inputs cond = cond.to(model.load_device).to(real_model.diffusion_model.dtype) uncond = uncond.to(model.load_device).to(real_model.diffusion_model.dtype) noise = noise.to(model.load_device).to(real_model.diffusion_model.dtype) # preview pbar = comfy.utils.ProgressBar(steps) previewer = latent_preview.get_previewer(model.load_device, model.model.latent_format) ## Noise schedule.
def sample_pixart(model, seed, steps, cfg, noise_schedule, noise_schedule_vp, positive, negative, latent_image): """ Mostly just a wrapper around the reference code. """ # prepare model noise = prepare_noise(latent_image, seed) real_model, _, _, _, models = prepare_sampling(model, noise.shape, positive, negative, noise_mask=None) # negative cond cond = positive[0][0] raw_uncond = negative[0][0] # Sampler seems to want the same dim for cond and uncond # truncate uncond to the length of cond # if shorter, pad uncond with y_null null_y = real_model.diffusion_model.y_embedder.y_embedding[None].repeat(latent_image.shape[0], 1, 1) uncond = null_y[:, :cond.shape[1], :] uncond[:, :raw_uncond.shape[1], :] = raw_uncond[:, :cond.shape[1], :] if raw_uncond.shape[1] > cond.shape[1]: print("PixArt: Warning. Your negative prompt is too long.") uncond[:, -1, :] = raw_uncond[:, -1, :] # add back EOS token # Move inputs cond = cond.to(model.load_device).to(real_model.diffusion_model.dtype) uncond = uncond.to(model.load_device).to(real_model.diffusion_model.dtype) noise = noise.to(model.load_device).to(real_model.diffusion_model.dtype) # preview pbar = comfy.utils.ProgressBar(steps) previewer = latent_preview.get_previewer(model.load_device, model.model.latent_format) ## Noise schedule.
betas = torch.tensor(gd.get_named_beta_schedule(noise_schedule, 1000))
3
2023-10-20 21:19:44+00:00
24k
amitfin/oref_alert
custom_components/oref_alert/area_utils.py
[ { "identifier": "CITY_ALL_AREAS", "path": "custom_components/oref_alert/metadata/city_all_areas.py", "snippet": "CITY_ALL_AREAS = {\n \"אשדוד - כל האזורים\": [\n \"אשדוד - א,ב,ד,ה\",\n \"אשדוד - איזור תעשייה צפוני\",\n \"אשדוד - ג,ו,ז\",\n \"אשדוד - ח,ט,י,יג,יד,טז\",\n \"אשדוד -יא,יב,טו,יז,מרינה,סיט\",\n ],\n \"אשקלון - כל האזורים\": [\"אשקלון - דרום\", \"אשקלון - צפון\"],\n \"באר שבע - כל האזורים\": [\n \"באר שבע - דרום\",\n \"באר שבע - מזרח\",\n \"באר שבע - מערב\",\n \"באר שבע - צפון\",\n ],\n \"הרצליה - כל האזורים\": [\"הרצליה - מערב\", \"הרצליה - מרכז וגליל ים\"],\n \"חדרה - כל האזורים\": [\n \"חדרה - מזרח\",\n \"חדרה - מערב\",\n \"חדרה - מרכז\",\n \"חדרה - נווה חיים\",\n ],\n \"חיפה - כל האזורים\": [\n \"חיפה - כרמל ועיר תחתית\",\n \"חיפה - מערב\",\n \"חיפה - נווה שאנן ורמות כרמל\",\n \"חיפה - קריית חיים ושמואל\",\n \"חיפה-מפרץ\",\n ],\n \"ירושלים - כל האזורים\": [\n \"ירושלים - אזור תעשייה עטרות\",\n \"ירושלים - דרום\",\n \"ירושלים - כפר עקב\",\n \"ירושלים - מזרח\",\n \"ירושלים - מערב\",\n \"ירושלים - מרכז\",\n \"ירושלים - צפון\",\n ],\n \"נתניה - כל האזורים\": [\"נתניה - מזרח\", \"נתניה - מערב\"],\n \"ראשון לציון - כל האזורים\": [\"ראשון לציון - מזרח\", \"ראשון לציון - מערב\"],\n \"רמת גן - כל האזורים\": [\"רמת גן - מזרח\", \"רמת גן - מערב\"],\n \"תל אביב - כל האזורים\": [\n \"תל אביב - דרום העיר ויפו\",\n \"תל אביב - מזרח\",\n \"תל אביב - מרכז העיר\",\n \"תל אביב - עבר הירקון\",\n ],\n}" }, { "identifier": "DISTRICT_AREAS", "path": "custom_components/oref_alert/metadata/district_to_areas.py", "snippet": "DISTRICT_AREAS = {\n \"מחוז אילת\": [\"אזור תעשייה שחורת\", \"אילות\", \"אילת\"],\n \"מחוז בקעה\": [\n \"ארגמן\",\n \"בקעות\",\n \"גיתית\",\n \"גלגל\",\n \"חמדת\",\n \"חמרה\",\n \"ייט''ב\",\n \"יפית\",\n \"מבואות יריחו\",\n \"מחולה\",\n \"מכורה\",\n \"מעלה אפרים\",\n \"משואה\",\n \"משכיות\",\n \"נעמה\",\n \"נערן\",\n \"נתיב הגדוד\",\n \"פצאל\",\n \"רועי\",\n \"רותם\",\n \"שדמות מחולה\",\n \"תומר\",\n ],\n \"מחוז בקעת בית שאן\": [\n \"אזור תעשייה צבאים\",\n \"בית אלפא וחפציבה\",\n \"בית השיטה\",\n \"בית יוסף\",\n \"בית שאן\",\n \"גשר\",\n \"חוות עדן\",\n \"חמדיה\",\n \"טייבה בגלבוע\",\n \"טירת צבי\",\n \"ירדנה\",\n \"כפר גמילה מלכישוע\",\n \"כפר רופין\",\n \"מולדת\",\n \"מירב\",\n \"מנחמיה\",\n \"מסילות\",\n \"מעוז חיים\",\n \"מעלה גלבוע\",\n \"נוה איתן\",\n \"נווה אור\",\n \"ניר דוד\",\n \"עין הנצי''ב\",\n \"רוויה\",\n \"רחוב\",\n \"רשפים\",\n \"שדה אליהו\",\n \"שדה נחום\",\n \"שדי תרומות\",\n \"שלוחות\",\n \"שלפים\",\n \"תל תאומים\",\n ],\n \"מחוז גולן דרום\": [\n \"אבני איתן\",\n \"אזור תעשייה בני יהודה\",\n \"אלוני הבשן\",\n \"אלי עד\",\n \"אלמגור\",\n \"אניעם\",\n \"אפיק\",\n \"אשדות יעקב איחוד\",\n \"אשדות יעקב מאוחד\",\n \"בני יהודה וגבעת יואב\",\n \"גשור\",\n \"האון\",\n \"חד נס\",\n \"חמת גדר\",\n \"חספין\",\n \"יונתן\",\n \"כנף\",\n \"כפר חרוב\",\n \"מבוא חמה\",\n \"מיצר\",\n \"מסדה\",\n \"מעגן\",\n \"מעלה גמלא\",\n \"נאות גולן\",\n \"נוב\",\n \"נטור\",\n \"עין גב\",\n \"קדמת צבי\",\n \"קצרין\",\n \"קצרין - אזור תעשייה\",\n \"קשת\",\n \"רמות\",\n \"רמת מגשימים\",\n \"שער הגולן\",\n \"תל קציר\",\n ],\n \"מחוז גולן צפון\": [\n \"אודם\",\n \"אורטל\",\n \"אל רום\",\n \"בוקעתא\",\n \"מג'דל שמס\",\n \"מסעדה\",\n \"מרום גולן\",\n \"נווה אטי''ב\",\n \"נמרוד\",\n \"עין זיוון\",\n \"עין קנייא\",\n \"קלע\",\n \"שעל\",\n ],\n \"מחוז גליל עליון\": [\n \"אבו סנאן\",\n \"אור הגנוז\",\n \"אזור תעשייה בר-לב\",\n \"אזור תעשייה חצור הגלילית\",\n \"אזור תעשייה כרמיאל\",\n \"אזור תעשייה צ.ח.ר\",\n \"אזור תעשייה שער נעמן\",\n \"אחיהוד\",\n \"איילת השחר\",\n \"אליפלט\",\n \"אמירים\",\n \"אמנון\",\n \"אפק\",\n \"אשרת\",\n \"בוסתן הגליל\",\n \"ביריה\",\n \"בית ג'אן\",\n \"בית העמק\",\n \"בענה\",\n \"בר יוחאי\",\n \"ג'דידה מכר\",\n \"ג'וליס\",\n \"גדות\",\n \"גיתה\",\n \"דיר אל-אסד\",\n \"הר-חלוץ\",\n \"חולתה\",\n \"חצור הגלילית\",\n \"חרשים\",\n \"טובא זנגריה\",\n \"טל-אל\",\n \"ינוח-ג'ת\",\n \"יסוד המעלה\",\n \"יסעור\",\n \"ירכא\",\n \"כורזים ורד הגליל\",\n \"כחל\",\n \"כיסרא סמיע\",\n \"כישור\",\n \"כליל\",\n \"כמון\",\n \"כפר הנשיא\",\n \"כפר יסיף\",\n \"כפר מסריק\",\n \"כפר שמאי\",\n \"כרכום\",\n \"כרמיאל\",\n \"לבון\",\n \"לוחמי הגטאות\",\n \"לפידות\",\n \"מג'דל כרום\",\n \"מגדל תפן\",\n \"מזרעה\",\n \"מחניים\",\n \"מירון\",\n \"מכמנים\",\n \"מנחת מחניים\",\n \"מרכז אזורי מרום גליל\",\n \"משמר הירדן\",\n \"נחף\",\n \"נס עמים\",\n \"נתיב השיירה\",\n \"סאג'ור\",\n \"ספסופה - כפר חושן\",\n \"עין אל אסד\",\n \"עין המפרץ\",\n \"עין כמונים\",\n \"עכו\",\n \"עכו - אזור תעשייה\",\n \"עמוקה\",\n \"עמיעד\",\n \"עמקה\",\n \"פלך\",\n \"פרוד\",\n \"צורית גילון\",\n \"צפת\",\n \"קדיתא\",\n \"קדרים\",\n \"ראמה\",\n \"ראש פינה\",\n \"רגבה\",\n \"שבי ציון\",\n \"שדה אליעזר\",\n \"שומרת\",\n \"שזור\",\n \"שייח' דנון\",\n \"שפר\",\n \"תובל\",\n ],\n \"מחוז גליל תחתון\": [\n \"אזור תעשייה צמח\",\n \"אזור תעשייה קדמת גליל\",\n \"אלומות\",\n \"אפיקים\",\n \"ארבל\",\n \"אתר ההנצחה גולני\",\n \"בית זרע\",\n \"בית ירח\",\n \"גבעת אבני\",\n \"גינוסר\",\n \"דגניה א\",\n \"דגניה ב\",\n \"הודיות\",\n \"הזורעים\",\n \"המכללה האקדמית כנרת\",\n \"ואדי אל חמאם\",\n \"חוקוק\",\n \"טבריה\",\n \"יבנאל\",\n \"כינרת מושבה\",\n \"כינרת קבוצה\",\n \"כפר זיתים\",\n \"כפר חיטים\",\n \"כפר כמא\",\n \"כפר נהר הירדן\",\n \"לביא\",\n \"לבנים\",\n \"מגדל\",\n \"מצפה\",\n \"פוריה כפר עבודה\",\n \"פוריה נווה עובד\",\n \"פוריה עילית\",\n \"רביד\",\n \"שדה אילן\",\n \"שרונה\",\n ],\n \"מחוז דן\": [\n \"אור יהודה\",\n \"אזור\",\n \"בני ברק\",\n \"בת-ים\",\n \"גבעת השלושה\",\n \"גבעת שמואל\",\n \"גבעתיים\",\n \"גני תקווה\",\n \"גת רימון\",\n \"הרצליה - מערב\",\n \"הרצליה - מרכז וגליל ים\",\n \"חולון\",\n \"יהוד-מונוסון\",\n \"כפר סירקין\",\n \"כפר שמריהו\",\n \"מגשימים\",\n \"מעש\",\n \"מקווה ישראל\",\n \"מתחם פי גלילות\",\n \"סביון\",\n \"סינמה סיטי גלילות\",\n \"פתח תקווה\",\n \"קריית אונו\",\n \"רמת גן - מזרח\",\n \"רמת גן - מערב\",\n \"רמת השרון\",\n \"תל אביב - דרום העיר ויפו\",\n \"תל אביב - מזרח\",\n \"תל אביב - מרכז העיר\",\n \"תל אביב - עבר הירקון\",\n ],\n \"מחוז דרום הנגב\": [\n \"אבו קרינאת\",\n \"אבו תלול\",\n \"אורון תעשייה ומסחר\",\n \"אזור תעשייה דימונה\",\n \"אזור תעשייה רותם\",\n \"אל פורעה\",\n \"אשלים\",\n \"באר מילכה\",\n \"ביר הדאג'\",\n \"בית סוהר נפחא\",\n \"דימונה\",\n \"הר הנגב\",\n \"ואדי אל נעם דרום\",\n \"חירן\",\n \"טללים\",\n \"ירוחם\",\n \"כמהין\",\n \"כסייפה\",\n \"מדרשת בן גוריון\",\n \"ממשית\",\n \"מצפה רמון\",\n \"מרחב עם\",\n \"מרעית\",\n \"משאבי שדה\",\n \"ניצנה\",\n \"סעייה-מולדה\",\n \"עבדת\",\n \"עזוז\",\n \"ערד\",\n \"ערערה בנגב\",\n \"קדש ברנע\",\n \"קצר-א-סיר\",\n \"רביבים\",\n \"רתמים\",\n \"שאנטי במדבר\",\n \"שדה בוקר\",\n \"תל ערד\",\n ],\n \"מחוז הכרמל\": [\n \"אזור תעשייה ניר עציון\",\n \"בית אורן\",\n \"בית סוהר קישון\",\n \"בית צבי\",\n \"בת שלמה\",\n \"גבע כרמל\",\n \"גבעת וולפסון\",\n \"דור\",\n \"דלית אל כרמל\",\n \"הבונים\",\n \"יערות הכרמל\",\n \"כלא דמון\",\n \"כפר הנוער ימין אורד\",\n \"כרם מהר''ל\",\n \"מאיר שפיה\",\n \"מגדים\",\n \"מרכז מיר''ב\",\n \"נווה ים\",\n \"נחשולים\",\n \"ניר עציון\",\n \"עופר\",\n \"עין איילה\",\n \"עין הוד\",\n \"עין חוד\",\n \"עין כרמל\",\n \"עספיא\",\n \"עתלית\",\n \"פוריידיס\",\n \"צרופה\",\n ],\n \"מחוז המפרץ\": [\n \"אושה\",\n \"איבטין\",\n \"בית עלמין תל רגב\",\n \"יגור\",\n \"כפר ביאליק\",\n \"כפר המכבי\",\n \"כפר חסידים\",\n \"קריית אתא\",\n \"קריית ביאליק\",\n \"קריית ים\",\n \"קריית מוצקין\",\n \"רכסים\",\n \"רמת יוחנן\",\n ],\n \"מחוז העמקים\": [\n \"אום אל-גנם\",\n \"אורנים\",\n \"אזור תעשייה אלון התבור\",\n \"אזור תעשייה מבואות הגלבוע\",\n \"אזור תעשייה ציפורית\",\n \"אחוזת ברק\",\n \"אילניה\",\n \"אכסאל\",\n \"אל-ח'וואלד מערב\",\n \"אלון הגליל\",\n \"אלוני אבא\",\n \"אלונים\",\n \"בית לחם הגלילית\",\n \"בית סוהר שיטה וגלבוע\",\n \"בית קשת\",\n \"בית שערים\",\n \"בלפוריה\",\n \"בסמת טבעון\",\n \"גבעת אלה\",\n \"גבת\",\n \"גדיש\",\n \"גדעונה\",\n \"גזית\",\n \"גן נר\",\n \"גניגר\",\n \"דבוריה\",\n \"דברת\",\n \"דחי\",\n \"הושעיה\",\n \"היוגב\",\n \"הסוללים\",\n \"הרדוף\",\n \"זרזיר\",\n \"ח'וואלד\",\n \"חג'אג'רה\",\n \"טמרה בגלבוע\",\n \"יזרעאל\",\n \"יפיע\",\n \"יפעת\",\n \"ישובי אומן\",\n \"ישובי יעל\",\n \"כדורי\",\n \"כעביה\",\n \"כעביה טבאש\",\n \"כפר ברוך\",\n \"כפר גדעון\",\n \"כפר החורש\",\n \"כפר טבאש\",\n \"כפר יהושע\",\n \"כפר יחזקאל\",\n \"כפר כנא\",\n \"כפר מצר\",\n \"כפר קיש\",\n \"כפר תבור\",\n \"כפר תקווה\",\n \"מגדל העמק\",\n \"מגן שאול\",\n \"מוקיבלה\",\n \"מזרע\",\n \"מלאה\",\n \"מנשית זבדה\",\n \"מרחביה מושב\",\n \"מרחביה קיבוץ\",\n \"מרכז אומן\",\n \"מרכז חבר\",\n \"משהד\",\n \"נהלל\",\n \"נוף הגליל\",\n \"נופית\",\n \"נורית\",\n \"נין\",\n \"ניר יפה\",\n \"נעורה\",\n \"נצרת\",\n \"סואעד חמירה\",\n \"סולם\",\n \"סנדלה\",\n \"עדי\",\n \"עילוט\",\n \"עין דור\",\n \"עין חרוד\",\n \"עין חרוד איחוד\",\n \"עין מאהל\",\n \"עפולה\",\n \"ציפורי\",\n \"קבוצת גבע\",\n \"קריית טבעון-בית זייד\",\n \"ראס עלי\",\n \"ריינה\",\n \"רם און\",\n \"רמת דוד\",\n \"רמת ישי\",\n \"רמת צבי\",\n \"שבלי\",\n \"שדה יעקב\",\n \"שדמות דבורה\",\n \"שמשית\",\n \"שער העמקים\",\n \"שריד\",\n \"תחנת רכבת כפר יהושוע\",\n \"תל יוסף\",\n \"תל עדשים\",\n \"תמרת\",\n ],\n \"מחוז השפלה\": [\n \"אזור תעשייה נשר - רמלה\",\n \"אחיסמך\",\n \"אחיעזר\",\n \"אירוס\",\n \"באר יעקב\",\n \"בית דגן\",\n \"בית חנן\",\n \"בית חשמונאי\",\n \"בית עובד\",\n \"בית עוזיאל\",\n \"בן שמן\",\n \"גאליה\",\n \"גזר\",\n \"גיבתון\",\n \"גינתון\",\n \"גן שורק\",\n \"גן שלמה\",\n \"גנות\",\n \"גני הדר\",\n \"גני יוחנן\",\n \"זיתן\",\n \"חולדה\",\n \"חמד\",\n \"יגל\",\n \"יד רמב''ם\",\n \"יסודות\",\n \"יציץ\",\n \"ישרש\",\n \"כפר ביל''ו\",\n \"כפר בן נון\",\n \"כפר חב''ד\",\n \"כפר נוער בן שמן\",\n \"כפר שמואל\",\n \"כרם בן שמן\",\n \"כרמי יוסף\",\n \"לוד\",\n \"מזכרת בתיה\",\n \"מצליח\",\n \"משמר איילון\",\n \"משמר דוד\",\n \"משמר השבעה\",\n \"נטעים\",\n \"ניר צבי\",\n \"נס ציונה\",\n \"נען\",\n \"נצר חזני\",\n \"נצר סרני\",\n \"סתריה\",\n \"עזריה\",\n \"עיינות\",\n \"פארק תעשיות פלמחים\",\n \"פדיה\",\n \"פתחיה\",\n \"צפריה\",\n \"קריית עקרון\",\n \"ראשון לציון - מזרח\",\n \"ראשון לציון - מערב\",\n \"רחובות\",\n \"רמות מאיר\",\n \"רמלה\",\n \"תעשיון צריפין\",\n ],\n \"מחוז ואדי ערה\": [\n \"אום אל פחם\",\n \"אום אל קוטוף\",\n \"אזור תעשייה יקנעם עילית\",\n \"אזור תעשייה מבוא כרמל\",\n \"אל עריאן\",\n \"אליקים\",\n \"באקה אל גרבייה\",\n \"בית סוהר מגידו\",\n 'בסמ\"ה',\n \"ברטעה\",\n \"ג'ת\",\n \"גבעת ניל''י\",\n \"גבעת עוז\",\n \"גלעד\",\n \"דליה\",\n \"חריש\",\n \"יקנעם המושבה והזורע\",\n \"יקנעם עילית\",\n \"כפר קרע\",\n \"מגל\",\n \"מדרך עוז\",\n \"מועאוויה\",\n \"מי עמי\",\n \"מייסר\",\n \"מעלה עירון\",\n \"מצפה אילן\",\n \"מצר\",\n \"משמר העמק\",\n \"עין אל-סהלה\",\n \"עין העמק\",\n \"עין השופט\",\n \"ערערה\",\n \"קיבוץ מגידו\",\n \"קציר\",\n \"רגבים\",\n \"רמות מנשה\",\n \"רמת השופט\",\n ],\n \"מחוז יהודה\": [\n \"אדורה\",\n \"אדוריים\",\n \"אזור תעשייה מישור אדומים\",\n \"אזור תעשייה מיתרים\",\n \"אלון\",\n \"אלון שבות\",\n \"אליאב\",\n \"אלעזר\",\n \"אמציה\",\n \"אפרת\",\n \"בית חגי\",\n \"בית יתיר\",\n \"ביתר עילית\",\n \"בני דקלים\",\n \"בת עין\",\n \"גבעות\",\n \"הר גילה\",\n \"הר עמשא\",\n \"חברון\",\n \"חוות שדה בר\",\n \"טנא עומרים\",\n \"כפר אדומים\",\n \"כפר אלדד\",\n \"כפר עציון\",\n \"כרמי צור\",\n \"כרמי קטיף\",\n \"כרמל\",\n \"מגדל עוז\",\n \"מיצד\",\n \"מעון\",\n \"מעלה אדומים\",\n \"מעלה חבר\",\n \"מעלה עמוס\",\n \"מעלה רחבעם\",\n \"מצפה יריחו\",\n \"נגוהות\",\n \"נווה דניאל\",\n \"נופי פרת\",\n \"נוקדים\",\n \"נטע\",\n \"סוסיא\",\n \"עלמון\",\n \"עשאהל\",\n \"עתניאל\",\n \"פני קדם\",\n \"קדר\",\n \"קרית ארבע\",\n \"ראש צורים\",\n \"שומריה\",\n \"שמעה\",\n \"שני ליבנה\",\n \"שקף\",\n \"תלם\",\n \"תקוע\",\n ],\n \"מחוז ים המלח\": [\n \"אבנת\",\n \"אלמוג\",\n \"בית הערבה\",\n \"בתי מלון ים המלח\",\n \"ורד יריחו\",\n \"מלונות ים המלח מרכז\",\n \"מצדה\",\n \"מצוקי דרגות\",\n \"מצפה שלם\",\n \"מרחצאות עין גדי\",\n \"מרכז אזורי מגילות\",\n \"נאות הכיכר\",\n \"נווה זוהר\",\n \"עין בוקק\",\n \"עין גדי\",\n \"עין תמר\",\n \"קליה\",\n ],\n \"מחוז ירושלים\": [\n \"אבן ספיר\",\n \"אורה\",\n \"בית זית\",\n \"גבעון החדשה\",\n \"גבעת זאב\",\n \"ירושלים - אזור תעשייה עטרות\",\n \"ירושלים - דרום\",\n \"ירושלים - כפר עקב\",\n \"ירושלים - מזרח\",\n \"ירושלים - מערב\",\n \"ירושלים - מרכז\",\n \"ירושלים - צפון\",\n \"מבשרת ציון\",\n \"מוצא עילית\",\n \"נבי סמואל\",\n \"עמינדב\",\n \"פנימיית עין כרם\",\n \"רמת רחל\",\n ],\n \"מחוז ירקון\": [\n \"אזור תעשייה אפק ולב הארץ\",\n \"אזור תעשייה חבל מודיעין\",\n \"אלעד\",\n \"בארות יצחק\",\n \"בית נחמיה\",\n \"בית עריף\",\n \"בני עטרות\",\n \"ברקת\",\n \"גבעת כ''ח\",\n \"גמזו\",\n \"גני מודיעין\",\n \"חדיד\",\n \"חשמונאים\",\n \"טירת יהודה\",\n \"כפר דניאל\",\n \"כפר האורנים\",\n \"כפר טרומן\",\n \"כפר רות\",\n \"לפיד\",\n \"מבוא חורון\",\n \"מבוא מודיעים\",\n \"מודיעין\",\n \"מודיעין - ישפרו סנטר\",\n \"מודיעין - ליגד סנטר\",\n \"מודיעין עילית\",\n \"מזור\",\n \"מתתיהו\",\n \"נוף איילון\",\n \"נופך\",\n \"נחלים\",\n \"נחשונים\",\n \"עינת\",\n \"ראש העין\",\n \"רינתיה\",\n \"שהם\",\n \"שילת\",\n \"שעלבים\",\n \"תעשיון חצב\",\n ],\n \"מחוז לכיש\": [\n \"אביגדור\",\n \"אבן שמואל\",\n \"אורות\",\n \"אזור תעשייה באר טוביה\",\n \"אזור תעשייה כנות\",\n \"אזור תעשייה עד הלום\",\n \"אזור תעשייה קריית גת\",\n \"אזור תעשייה תימורים\",\n \"אחווה\",\n \"אחוזם\",\n \"איתן\",\n \"אל עזי\",\n \"אלומה\",\n \"אמונים\",\n \"אשדוד - א,ב,ד,ה\",\n \"אשדוד - איזור תעשייה צפוני\",\n \"אשדוד - ג,ו,ז\",\n \"אשדוד - ח,ט,י,יג,יד,טז\",\n \"אשדוד -יא,יב,טו,יז,מרינה,סיט\",\n \"באר טוביה\",\n \"ביצרון\",\n \"בית אלעזרי\",\n \"בית גמליאל\",\n \"בית חלקיה\",\n \"בית עזרא\",\n \"בן זכאי\",\n \"בני דרום\",\n \"בני עי''ש\",\n \"בני ראם\",\n \"בניה\",\n \"גבעת ברנר\",\n \"גבעת וושינגטון\",\n \"גבעתי\",\n \"גדרה\",\n \"גן הדרום\",\n \"גן יבנה\",\n \"גני טל\",\n \"גת\",\n \"ורדון\",\n \"זבדיאל\",\n \"זוהר\",\n \"זרחיה\",\n \"חפץ חיים\",\n \"חצב\",\n \"חצור\",\n \"יבנה\",\n \"יד בנימין\",\n \"יד נתן\",\n \"ינון\",\n \"כנות\",\n \"כפר אביב\",\n \"כפר אחים\",\n \"כפר הנגיד\",\n \"כפר הרי''ף וצומת ראם\",\n \"כפר ורבורג\",\n \"כפר מרדכי\",\n \"כרם ביבנה\",\n \"לכיש\",\n \"מישר\",\n \"מנוחה\",\n \"מעון צופיה\",\n \"מרכז שפירא\",\n \"משגב דב\",\n \"משואות יצחק\",\n \"מתחם בני דרום\",\n \"נגבה\",\n \"נהורה\",\n \"נוגה\",\n \"נווה מבטח\",\n \"נועם\",\n \"נחלה\",\n \"ניר בנים\",\n \"ניר גלים\",\n \"ניר ח''ן\",\n \"סגולה\",\n \"עוזה\",\n \"עוצם\",\n \"עזר\",\n \"עזריקם\",\n \"עין צורים\",\n \"ערוגות\",\n \"עשרת\",\n \"פארק תעשייה ראם\",\n \"פלמחים\",\n \"קבוצת יבנה\",\n \"קדמה\",\n \"קדרון\",\n \"קוממיות\",\n \"קריית גת, כרמי גת\",\n \"קריית מלאכי\",\n \"רבדים\",\n \"רווחה\",\n \"שדה דוד\",\n \"שדה יואב\",\n \"שדה משה\",\n \"שדה עוזיהו\",\n \"שדמה\",\n \"שחר\",\n \"שלווה\",\n \"שפיר\",\n \"שתולים\",\n \"תימורים\",\n \"תלמי יחיאל\",\n \"תלמים\",\n ],\n \"מחוז מנשה\": [\n \"אביאל\",\n \"אור עקיבא\",\n \"אזור תעשייה קיסריה\",\n \"אזור תעשייה רגבים\",\n \"אלוני יצחק\",\n \"בית חנניה\",\n \"בית ספר אורט בנימינה\",\n \"בנימינה\",\n \"ברקאי\",\n \"ג'סר א-זרקא\",\n \"גבעת חביבה\",\n \"גבעת עדה\",\n \"גן השומרון\",\n \"גן שמואל\",\n \"החותרים\",\n \"זכרון יעקב\",\n \"חדרה - מזרח\",\n \"חדרה - מערב\",\n \"חדרה - מרכז\",\n \"חדרה - נווה חיים\",\n \"חיפה - כרמל ועיר תחתית\",\n \"חיפה - מערב\",\n \"חיפה - נווה שאנן ורמות כרמל\",\n \"חיפה - קריית חיים ושמואל\",\n \"חיפה-מפרץ\",\n \"טירת כרמל\",\n \"כפר גלים\",\n \"כפר גליקסון\",\n \"כפר פינס\",\n \"להבות חביבה\",\n \"מאור\",\n \"מעגן מיכאל\",\n \"מעיין צבי\",\n \"מענית\",\n \"מרכז ימי קיסריה\",\n \"משמרות\",\n \"נשר\",\n \"עין עירון\",\n \"עין שמר\",\n \"עמיקם\",\n \"פרדס חנה-כרכור\",\n \"קיסריה\",\n \"רמת הנדיב\",\n \"שדה יצחק\",\n \"שדות ים\",\n \"שער מנשה\",\n \"תלמי אלעזר\",\n ],\n \"מחוז מערב הנגב\": [\n \"אופקים\",\n \"אורים\",\n \"אזור תעשייה נ.ע.מ\",\n \"אשבול\",\n \"אשל הנשיא\",\n \"בטחה\",\n \"בית הגדי\",\n \"ברור חיל\",\n \"ברוש\",\n \"גבולות\",\n \"גילת\",\n \"דורות\",\n \"דניאל\",\n \"זרועה\",\n \"חוות שיקמים\",\n \"יושיביה\",\n \"מבועים\",\n \"מסלול\",\n \"מעגלים, גבעולים, מלילות\",\n \"ניר משה\",\n \"ניר עקיבא\",\n \"נתיבות\",\n \"פדויים\",\n \"פטיש\",\n \"פעמי תש''ז\",\n \"צאלים\",\n \"קלחים\",\n \"קריית חינוך מרחבים\",\n \"רוחמה\",\n \"רנן\",\n \"שבי דרום\",\n \"שדה צבי\",\n \"שיבולים\",\n \"שרשרת\",\n \"תאשור\",\n \"תדהר\",\n \"תלמי ביל''ו\",\n \"תפרח\",\n ],\n \"מחוז מערב לכיש\": [\n \"אזור תעשייה הדרומי אשקלון\",\n \"אזור תעשייה צפוני אשקלון\",\n \"אשקלון - דרום\",\n \"אשקלון - צפון\",\n \"באר גנים\",\n \"בית שקמה\",\n \"ברכיה\",\n \"בת הדר\",\n \"גיאה\",\n \"הודיה\",\n \"חלץ\",\n \"כוכב מיכאל\",\n \"כפר סילבר\",\n \"מבקיעים\",\n \"משען\",\n \"ניצן\",\n \"ניצנים\",\n \"ניר ישראל\",\n \"תלמי יפה\",\n ],\n \"מחוז מרכז הגליל\": [\n \"אבטליון\",\n \"אזור תעשייה תרדיון\",\n \"אעבלין\",\n \"אשבל\",\n \"אשחר\",\n \"בועיינה-נוג'ידאת\",\n \"ביר אלמכסור\",\n \"בית סוהר צלמון\",\n \"בית רימון\",\n \"דיר חנא\",\n \"דמיידה\",\n \"הררית יחד\",\n \"חוסנייה\",\n \"חזון\",\n \"חנתון\",\n \"טורעאן\",\n \"טמרה\",\n \"טפחות\",\n \"יובלים\",\n \"יודפת\",\n \"יעד\",\n \"כאבול\",\n \"כאוכב אבו אלהיג'א\",\n \"כלנית\",\n \"כפר חנניה\",\n \"כפר מנדא\",\n \"לוטם וחמדון\",\n \"מורן\",\n \"מורשת\",\n \"מנוף\",\n \"מסד\",\n \"מע'אר\",\n \"מעלה צביה\",\n \"מצפה אבי''ב\",\n \"מצפה נטופה\",\n \"מרכז אזורי משגב\",\n \"סכנין\",\n \"סלמה\",\n \"עוזייר\",\n \"עילבון\",\n \"עינבר\",\n \"עצמון - שגב\",\n \"עראבה\",\n \"ערב אל-נעים\",\n \"קורנית\",\n \"ראס אל-עין\",\n \"רומאנה\",\n \"רומת אל הייב\",\n \"רקפת\",\n \"שורשים\",\n \"שכניה\",\n \"שעב\",\n \"שפרעם\",\n ],\n \"מחוז מרכז הנגב\": [\n \"אום בטין\",\n \"אזור תעשייה עידן הנגב\",\n \"אל סייד\",\n \"אשכולות\",\n \"אתר דודאים\",\n \"באר שבע - דרום\",\n \"באר שבע - מזרח\",\n \"באר שבע - מערב\",\n \"באר שבע - צפון\",\n \"בית קמה\",\n \"גבעות בר\",\n \"גבעות גורל\",\n \"דביר\",\n \"חורה\",\n \"חצרים\",\n \"כרמים\",\n \"כרמית\",\n \"להב\",\n \"להבים\",\n \"לקיה\",\n \"מיתר\",\n \"משמר הנגב\",\n \"מתחם צומת שוקת\",\n \"נאות חובב\",\n \"נבטים\",\n \"סנסנה\",\n \"עומר\",\n \"רהט\",\n \"שגב שלום\",\n \"שובל\",\n \"תארבין\",\n \"תל שבע\",\n ],\n \"מחוז עוטף עזה\": [\n \"אבשלום\",\n \"אור הנר\",\n \"ארז\",\n \"בארי\",\n \"בני נצרים\",\n \"גבים, מכללת ספיר\",\n \"גברעם\",\n \"דקל\",\n \"זיקים\",\n \"זמרת, שובה\",\n \"חולית\",\n \"יבול\",\n \"יד מרדכי\",\n \"יכיני\",\n \"יתד\",\n \"כיסופים\",\n \"כפר מימון ותושיה\",\n \"כפר עזה\",\n \"כרם שלום\",\n \"כרמיה\",\n \"מבטחים, עמיעוז, ישע\",\n \"מגן\",\n \"מטווח ניר עם\",\n \"מפלסים\",\n \"נווה\",\n \"נחל עוז\",\n \"ניר יצחק\",\n \"ניר עוז\",\n \"נירים\",\n \"נתיב העשרה\",\n \"סופה\",\n \"סעד\",\n \"עין הבשור\",\n \"עין השלושה\",\n \"עלומים\",\n \"פרי גן\",\n \"צוחר, אוהד\",\n \"רעים\",\n \"שדה אברהם\",\n \"שדה ניצן\",\n \"שדרות, איבים, ניר עם\",\n \"שוקדה\",\n \"שלומית\",\n \"תלמי אליהו\",\n \"תלמי יוסף\",\n \"תקומה\",\n \"תקומה וחוות יזרעם\",\n ],\n \"מחוז ערבה\": [\n \"אל עמארני, אל מסק\",\n \"אליפז ומכרות תמנע\",\n \"באר אורה\",\n \"גרופית\",\n \"חוות ערנדל\",\n \"חי-בר יטבתה\",\n \"חצבה\",\n \"יהל\",\n \"יטבתה\",\n \"כושי רמון\",\n \"לוטן\",\n \"נאות סמדר\",\n \"נווה חריף\",\n \"סמר\",\n \"ספיר\",\n \"עידן\",\n \"עין חצבה\",\n \"עין יהב\",\n \"עיר אובות\",\n \"פארן\",\n \"צופר\",\n \"צוקים\",\n \"קטורה\",\n \"שחרות\",\n \"שיטים\",\n ],\n \"מחוז קו העימות\": [\n \"אביבים\",\n \"אבירים\",\n \"אבן מנחם\",\n \"אדמית\",\n \"אזור תעשייה אכזיב מילואות\",\n \"אזור תעשייה רמת דלתון\",\n \"אילון\",\n \"אלקוש\",\n \"בית הלל\",\n \"בית ספר שדה מירון\",\n \"בן עמי\",\n \"בצת\",\n \"ברעם\",\n \"ג'ש - גוש חלב\",\n \"גונן\",\n \"גורן\",\n \"גורנות הגליל\",\n \"געתון\",\n \"גשר הזיו\",\n \"דוב''ב\",\n \"דישון\",\n \"דלתון\",\n \"דפנה\",\n \"הגושרים\",\n \"הילה\",\n \"זרעית\",\n \"חוסן\",\n \"חורפיש\",\n \"חניתה\",\n \"יחיעם\",\n \"יערה\",\n \"יפתח\",\n \"יראון\",\n \"כברי\",\n \"כפר בלום\",\n \"כפר גלעדי\",\n \"כפר ורדים\",\n \"כפר יובל\",\n \"כפר סאלד\",\n \"כרם בן זמרה\",\n \"להבות הבשן\",\n \"לימן\",\n \"מטולה\",\n \"מלכיה\",\n \"מנות\",\n \"מנרה\",\n \"מעונה\",\n \"מעיין ברוך\",\n \"מעיליא\",\n \"מעלות תרשיחא\",\n \"מצובה\",\n \"מרגליות\",\n \"מרכז אזורי מבואות חרמון\",\n \"משגב עם\",\n \"מתת\",\n \"נאות מרדכי\",\n \"נהריה\",\n \"נווה זיו\",\n \"נטועה\",\n \"סאסא\",\n \"סער\",\n \"ע'ג'ר\",\n \"עבדון\",\n \"עברון\",\n \"עין יעקב\",\n \"עלמה\",\n \"עמיר\",\n \"ערב אל עראמשה\",\n \"פסוטה\",\n \"פקיעין\",\n \"פקיעין החדשה\",\n \"צבעון\",\n \"צוריאל\",\n \"קיבוץ דן\",\n \"קריית שמונה\",\n \"ראש הנקרה\",\n \"ריחאנייה\",\n \"רמות נפתלי\",\n \"שאר ישוב\",\n \"שדה נחמיה\",\n \"שומרה\",\n \"שלומי\",\n \"שמיר\",\n \"שניר\",\n \"שתולה\",\n \"תל חי\",\n ],\n \"מחוז שומרון\": [\n \"אבני חפץ\",\n \"אזור תעשייה בראון\",\n \"אזור תעשייה שער בנימין\",\n \"אחיה\",\n \"איתמר\",\n \"אלון מורה\",\n \"אריאל\",\n \"בית אל\",\n \"בית אריה\",\n \"בית חורון\",\n \"ברוכין\",\n \"ברקן\",\n \"גבע בנימין\",\n \"גבעת אסף\",\n \"גבעת הראל וגבעת הרואה\",\n \"דולב\",\n \"הר ברכה\",\n \"חוות גלעד\",\n \"חוות יאיר\",\n \"חיננית\",\n \"חלמיש\",\n \"חרמש\",\n \"חרשה\",\n \"טל מנשה\",\n \"טלמון\",\n \"יצהר\",\n \"יקיר\",\n \"כוכב השחר\",\n \"כוכב יעקב\",\n \"כפר תפוח\",\n \"מבוא דותן\",\n \"מגדלים\",\n \"מגרון\",\n \"מעלה לבונה\",\n \"מעלה מכמש\",\n \"מעלה שומרון\",\n \"נופי נחמיה\",\n \"נופים\",\n \"נחליאל\",\n \"ניל''י\",\n \"נעלה\",\n \"נריה\",\n \"עדי עד\",\n \"עופרים\",\n \"עטרת\",\n \"עלי\",\n \"עלי זהב\",\n \"עמיחי\",\n \"עמנואל\",\n \"ענב\",\n \"עפרה\",\n \"פדואל\",\n \"פסגות\",\n \"קדומים\",\n \"קידה\",\n \"קריית נטפים\",\n \"קרני שומרון\",\n \"רבבה\",\n \"רחלים\",\n \"ריחן\",\n \"רימונים\",\n \"שבות רחל\",\n \"שבי שומרון\",\n \"שילה\",\n \"שקד\",\n \"תל ציון\",\n ],\n \"מחוז שפלת יהודה\": [\n \"אבו-גוש\",\n \"אביעזר\",\n \"אדרת\",\n \"אזור תעשייה ברוש\",\n \"אזור תעשייה הר טוב - צרעה\",\n \"אשתאול\",\n \"בית גוברין\",\n \"בית מאיר\",\n \"בית ניר\",\n \"בית נקופה\",\n \"בית שמש\",\n \"בקוע\",\n \"בר גיורא\",\n \"גבעות עדן\",\n \"גבעת יערים\",\n \"גבעת ישעיהו\",\n \"גיזו\",\n \"גלאון\",\n \"גפן\",\n \"הר אדר\",\n \"הראל\",\n \"זכריה\",\n \"זנוח\",\n \"טל שחר\",\n \"יד השמונה\",\n \"ישעי\",\n \"כסלון\",\n \"כפר אוריה\",\n \"כפר זוהרים\",\n \"כפר מנחם\",\n \"לוזית\",\n \"לטרון\",\n \"מבוא ביתר\",\n \"מחסיה\",\n \"מטע\",\n \"מסילת ציון\",\n \"מעלה החמישה\",\n \"נווה אילן\",\n \"נווה מיכאל - רוגלית\",\n \"נווה שלום\",\n \"נחושה\",\n \"נחם\",\n \"נחשון\",\n \"נטף\",\n \"נס הרים\",\n \"נתיב הל''ה\",\n \"עגור\",\n \"עין נקובא\",\n \"עין ראפה\",\n \"צובה\",\n \"צור הדסה\",\n \"צלפון\",\n \"צפרירים\",\n \"צרעה\",\n \"קריית יערים\",\n \"קריית ענבים\",\n \"רטורנו - גבעת שמש\",\n \"רמת רזיאל\",\n \"שדות מיכה\",\n \"שואבה\",\n \"שורש\",\n \"שריגים - ליאון\",\n \"תירוש\",\n \"תעוז\",\n \"תרום\",\n ],\n \"מחוז שרון\": [\n \"אביחיל\",\n \"אבן יהודה\",\n \"אודים\",\n \"אורנית\",\n \"אזור תעשייה טירה\",\n \"אזור תעשייה עמק חפר\",\n \"אחיטוב\",\n \"אייל\",\n \"אליכין\",\n \"אלישיב\",\n \"אלישמע\",\n \"אלפי מנשה\",\n \"אלקנה\",\n \"אמץ\",\n \"ארסוף\",\n \"בארותיים\",\n \"בורגתה\",\n \"בחן\",\n \"בית ברל\",\n \"בית הלוי\",\n \"בית חזון\",\n \"בית חרות\",\n \"בית יהושע\",\n \"בית ינאי\",\n \"בית יצחק - שער חפר\",\n \"בית סוהר השרון\",\n \"ביתן אהרן\",\n \"בני דרור\",\n \"בני ציון\",\n \"בצרה\",\n \"בת חן\",\n \"בת חפר\",\n \"ג'לג'וליה\",\n \"גאולי תימן\",\n \"גאולים\",\n \"גבעת חיים איחוד\",\n \"גבעת חיים מאוחד\",\n \"גבעת חן\",\n \"גבעת שפירא\",\n \"גן חיים\",\n \"גן יאשיה\",\n \"גנות הדר\",\n \"גני עם\",\n \"געש\",\n \"הדר עם\",\n \"הוד השרון\",\n \"המעפיל\",\n \"המרכז האקדמי רופין\",\n \"העוגן\",\n \"זמר\",\n \"חבצלת השרון וצוקי ים\",\n \"חגור\",\n \"חגלה\",\n \"חופית\",\n \"חורשים\",\n \"חיבת ציון\",\n \"חניאל\",\n \"חרב לאת\",\n \"חרוצים\",\n \"חרות\",\n \"טייבה\",\n \"טירה\",\n \"יד חנה\",\n \"ינוב\",\n \"יעף\",\n \"יקום\",\n \"ירחיב\",\n \"ירקונה\",\n \"כוכב יאיר - צור יגאל\",\n \"כפר ברא\",\n \"כפר הס\",\n \"כפר הרא''ה\",\n \"כפר ויתקין\",\n \"כפר חיים\",\n \"כפר ידידיה\",\n \"כפר יונה\",\n \"כפר יעבץ\",\n \"כפר מונש\",\n \"כפר מל''ל\",\n \"כפר נטר\",\n \"כפר סבא\",\n \"כפר עבודה\",\n \"כפר קאסם\",\n \"מכון וינגייט\",\n \"מכמורת\",\n \"מעברות\",\n \"מרכז אזורי דרום השרון\",\n \"משמר השרון\",\n \"משמרת\",\n \"מתן\",\n \"נווה ימין\",\n \"נווה ירק\",\n \"נורדיה\",\n \"ניצני עוז\",\n \"ניר אליהו\",\n \"נירית\",\n \"נעורים\",\n \"נתניה - מזרח\",\n \"נתניה - מערב\",\n \"סלעית\",\n \"עדנים\",\n \"עולש\",\n \"עזריאל\",\n \"עין החורש\",\n \"עין ורד\",\n \"עין שריד\",\n \"עץ אפרים\",\n \"פורת\",\n \"פרדסיה\",\n \"צופים\",\n \"צופית\",\n \"צור יצחק\",\n \"צור משה\",\n \"צור נתן\",\n \"קדימה-צורן\",\n \"קלנסווה\",\n \"רמות השבים\",\n \"רמת הכובש\",\n \"רעננה\",\n \"רשפון\",\n \"שדה ורבורג\",\n \"שדי חמד\",\n \"שושנת העמקים\",\n \"שער אפרים\",\n \"שערי תקווה\",\n \"שפיים\",\n \"תחנת רכבת ראש העין\",\n \"תל יצחק\",\n \"תל מונד\",\n \"תנובות\",\n ],\n}" } ]
from .metadata.city_all_areas import CITY_ALL_AREAS from .metadata.district_to_areas import DISTRICT_AREAS
18,992
"""Utilities for metadata information.""" def expand_areas_and_groups(areas_and_groups: list[str]) -> list[str]: """Expand groups (if exists) to areas.""" areas = [] for area_or_group in areas_and_groups: if area_or_group in CITY_ALL_AREAS: areas.extend(CITY_ALL_AREAS[area_or_group])
"""Utilities for metadata information.""" def expand_areas_and_groups(areas_and_groups: list[str]) -> list[str]: """Expand groups (if exists) to areas.""" areas = [] for area_or_group in areas_and_groups: if area_or_group in CITY_ALL_AREAS: areas.extend(CITY_ALL_AREAS[area_or_group])
elif area_or_group in DISTRICT_AREAS:
1
2023-10-18 11:16:41+00:00
24k
RobertCsordas/moe
tasks/simple/language_model/transformer_lm_mixin.py
[ { "identifier": "TransformerLanguageModel", "path": "models/transformer_language_model.py", "snippet": "class TransformerLanguageModel(LoggingLayer, torch.nn.Module):\n def __init__(self, voc_size: int, embedding_size: Optional[int], state_size: int, dropout: float,\n tied_embedding: bool, layers: List[torch.nn.Module], n_prev_states: int,\n n_prev_states_test: Optional[int] = None, adaptive_cutoffs: List[int] = [],\n same_length_eval: bool = True, norm_before_output: bool = False,\n p_drop_layer: float = 0.0, use_last_state: bool = False, same_length: bool = False,\n output_mode: str = \"normal\"):\n\n super().__init__()\n\n self.embedding = torch.nn.Embedding(voc_size, embedding_size or state_size)\n # with torch.no_grad():\n # self.embedding.weight.uniform_(-0.1, 0.1)\n\n torch.nn.init.xavier_uniform_(self.embedding.weight)\n\n self.shared_layers = all([la is layers[0] for la in layers])\n\n if embedding_size is None:\n self.embedding_adapter = lambda x: x\n else:\n self.embedding_adapter = torch.nn.Linear(embedding_size, state_size)\n\n self.dropout = torch.nn.Dropout(dropout)\n self.layers = torch.nn.ModuleList(layers)\n self.output_adapter = lambda x: x\n self.n_prev_states = n_prev_states\n self.n_prev_states_test = n_prev_states_test or n_prev_states\n self.same_length_eval = same_length_eval\n self.embedding_scale = math.sqrt(state_size)\n self.p_drop_layer = p_drop_layer\n self.use_last_state = use_last_state\n self.same_length = same_length\n self.iter = 0\n self.output_mode = output_mode\n\n assert self.output_mode in {\"normal\", \"sum\", \"geometric\", \"sigmoid\"}\n\n if self.output_mode in {\"geometric\", \"sigmoid\"}:\n self.output_gate = torch.nn.Linear(state_size, 1)\n\n self.adaptive = bool(adaptive_cutoffs)\n\n out_proj_size = (embedding_size or state_size) if tied_embedding else state_size\n if self.adaptive:\n self.output = framework.layers.CustomAdaptiveLogSoftmaxWithLoss(\n out_proj_size, voc_size, adaptive_cutoffs, div_value=1,\n tied_to=self.embedding if tied_embedding else None)\n else:\n self.output = torch.nn.Linear(out_proj_size, voc_size)\n\n if norm_before_output or self.output_mode in {\"sum\", \"sigmoid\"}:\n self.out_norm = torch.nn.LayerNorm(state_size)\n else:\n self.out_norm = lambda x: x\n\n if tied_embedding:\n if not self.adaptive:\n self.output.weight = self.embedding.weight\n if embedding_size is not None:\n self.output_adapter = torch.nn.Linear(state_size, embedding_size)\n\n @staticmethod\n def generate_history_mask(sz: int, device: torch.device) -> torch.Tensor:\n return torch.tril(torch.ones(sz, sz, dtype=torch.bool, device=device), diagonal=-1)\n\n def gen_output(self, x: torch.Tensor, target: Optional[torch.Tensor]) -> torch.Tensor:\n net = self.out_norm(x)\n net = self.output_adapter(net)\n net = self.dropout(net)\n\n if self.adaptive:\n net = self.output(net.transpose(0, 1), target)\n else:\n net = self.output(net.transpose(0, 1))\n\n return net\n\n def accumulate_output(self, features: List[torch.Tensor]) -> torch.Tensor:\n if self.output_mode == \"sum\":\n return sum(features)\n elif self.output_mode in {\"geometric\", \"sigmoid\"}:\n # Must cast it to float16, otherwise pytorch will crash after a few hundred iterations with an\n # incomprehensible error in the gradient scaler\n gates = torch.sigmoid(torch.cat([self.output_gate(f).float() for f in features], -1))\n if self.output_mode == \"geometric\":\n ngates = torch.cumprod(1.0 - gates, -1)\n scores = torch.cat([gates[..., 0:1], gates[..., 1:] * ngates[..., :-1]], -1)\n else:\n scores = gates\n\n if self.iter % 100 == 0 and self.training:\n self.log(\"output_gate_mean\", framework.visualize.plot.Barplot(scores.flatten(end_dim=-2).mean(0)))\n # return sum(f * scores[..., i: i+1] for i, f in enumerate(features))\n f = scores.unsqueeze(-2) @ torch.stack(features, -2)\n return f.squeeze(-2)\n else:\n assert False, \"Invalid output mode\"\n\n def forward(self, x: torch.Tensor, target: Optional[torch.Tensor], state) -> Tuple[torch.Tensor, Any]:\n causality_mask = Transformer.generate_square_subsequent_mask(x.shape[0], x.device)\n\n net = self.dropout(self.embedding(x.T.long()))\n net = self.embedding_adapter(net)\n net = net * self.embedding_scale\n\n new_state = []\n features = [net]\n\n n_prev_states = self.n_prev_states if self.training else self.n_prev_states_test\n\n same_length = self.same_length or ((not self.training) and self.same_length_eval)\n if same_length and state is not None:\n causality_mask = [self.generate_history_mask(x.shape[0], x.device)] + \\\n [torch.zeros_like(causality_mask)] * (len(state[0]) - 1) + [causality_mask]\n causality_mask = torch.cat(causality_mask, -1)\n\n plot_cossim = (self.iter % 100 == 0 and self.training)\n for li, l in enumerate(self.layers):\n if n_prev_states > 0:\n if li == 0:\n # Pos offset should be constant for all layers\n pos_offset = sum(s.shape[1] for s in state[0]) if state is not None else 0\n\n # Concatenate the new state with the previous states\n li_r = 0 if self.use_last_state else li\n s = (state[li_r] + [net]) if state is not None else [net]\n attend_to = torch.cat(s, 1)\n\n if not self.use_last_state:\n s[-1] = s[-1].detach()\n new_state.append(s[-n_prev_states:])\n else:\n pos_offset = None\n attend_to = None\n\n net_o = l(net, mask=AttentionMask(None, causality_mask), attend_to=attend_to,\n pos_offset=pos_offset)\n\n if plot_cossim or self.output_mode != \"normal\":\n features.append(net_o)\n\n with torch.no_grad():\n ndiff = torch.norm(net_o - net, p=2, dim=-1)\n n_in = torch.norm(net, p=2, dim=-1)\n self.log(f\"activation_norm/abs_update_layer_{li}\", ndiff.mean())\n self.log(f\"activation_norm/in_layer_{li}\", n_in.mean())\n self.log(f\"activation_norm/rel_update_layer_{li}\", (ndiff/n_in.clamp(min=torch.finfo(n_in.dtype).eps)).mean())\n\n if self.training and self.p_drop_layer > 0.0:\n net = torch.where(torch.rand_like(net_o[..., 0:1]) < self.p_drop_layer, net, net_o)\n else:\n net = net_o\n\n if self.use_last_state and n_prev_states > 0:\n # If we carry over the last state, save it here\n new_state = [((state[0] if state is not None else []) + [net.detach()])[-n_prev_states:]]\n\n if self.output_mode != \"normal\":\n net = self.accumulate_output(features)\n\n if plot_cossim:\n with torch.no_grad():\n f_sample = [f.view(-1, f.shape[-1])[:1024] for f in features]\n f_sample_all = torch.stack(f_sample, -2)\n scores = framework.utils.cossim(f_sample_all, f_sample_all).mean(0)\n self.log(\"feature_cossim\", framework.visualize.plot.Heatmap(scores, range=(0, 1), textval=False))\n\n if self.output_mode != \"normal\":\n f_sample = [self.accumulate_output(f_sample[:i]) for i in range(1, len(f_sample)+1)]\n f_sample_all = torch.stack(f_sample, -2)\n\n outs = F.softmax(self.gen_output(f_sample_all, target).transpose(0, 1), -1)\n scores = framework.utils.cossim(outs, outs).mean(0)\n self.log(\"out_dist_cossim\", framework.visualize.plot.Heatmap(scores, range=(0, 1), textval=False))\n\n real_out = outs[:, -1]\n for i in range(outs.shape[-2] - 1):\n self.log(f\"out_diff_{i}\", (outs[:, i] - real_out).norm(dim=-1, p=1).mean())\n\n del outs\n del features\n\n net = self.gen_output(net, target)\n self.iter += 1\n\n return net, new_state" }, { "identifier": "task", "path": "tasks/task_db.py", "snippet": "def task(name: Optional[str] = None):\n def wrapper(cls):\n n = TASK_PREFIX + (name or camel_to_snake(cls.__name__))\n assert n not in TASKS, f\"Task {n} already exists\"\n TASKS[n] = cls\n return cls\n return wrapper" }, { "identifier": "args", "path": "tasks/task_db.py", "snippet": "def args(fn):\n global ARGS_REGISTERS\n ARGS_REGISTERS.append(fn)\n return fn" }, { "identifier": "RelativeTransformerEncoderLayer", "path": "layers/transformer/relative_transformer.py", "snippet": "class RelativeTransformerEncoderLayer(torch.nn.Module):\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,\n attention_dropout=0, test_pos_clamp: Optional[int] = None, drop_expand: bool = True,\n head_projection_size: Optional[int] = None, ln_after_attention: bool = True):\n super().__init__()\n self.ln_after_attention = ln_after_attention\n self.self_attn = FixedRelativeMultiheadAttention(\n d_model, nhead, dropout=attention_dropout, test_pos_clamp=test_pos_clamp,\n projection_size=head_projection_size)\n self.linear1 = torch.nn.Linear(d_model, dim_feedforward)\n self.dropout = torch.nn.Dropout(dropout) if drop_expand else lambda x: x\n self.linear2 = torch.nn.Linear(dim_feedforward, d_model)\n\n if ln_after_attention:\n self.norm1 = torch.nn.LayerNorm(d_model)\n self.norm2 = torch.nn.LayerNorm(d_model)\n self.dropout1 = torch.nn.Dropout(dropout)\n self.dropout2 = torch.nn.Dropout(dropout)\n\n self.activation = activation\n self.reset_parameters()\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n src2 = self.self_attn(src, attend_to if attend_to is not None else src, mask, pos_offset=pos_offset)\n src = src + self.dropout1(src2)\n src = self.norm1(src) if self.ln_after_attention else src\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))\n src = src + self.dropout2(src2)\n src = self.norm2(src)\n return src\n\n def reset_parameters(self):\n torch.nn.init.xavier_normal_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu')\n if self.activation is F.relu else 1.0)\n torch.nn.init.xavier_uniform_(self.linear2.weight)" }, { "identifier": "PrelnRelativeTransformerEncoderLayer", "path": "layers/transformer/relative_preln_transformer.py", "snippet": "class PrelnRelativeTransformerEncoderLayer(RelativeTransformerEncoderLayer):\n is_preln = True\n\n def __init__(self, d_model, nhead, n_layers: int, dim_feedforward=2048, dropout=0.1,\n activation: ActivationFunction = F.relu, attention_dropout=0, test_pos_clamp: Optional[int] = None,\n drop_expand: bool = True, head_projection_size: Optional[int] = None):\n super().__init__(\n d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward, dropout=dropout,\n activation=activation, attention_dropout=attention_dropout, test_pos_clamp=test_pos_clamp,\n drop_expand=drop_expand, head_projection_size=head_projection_size)\n\n reset_prenorm_params(self, n_layers)\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n src2 = self.norm1(src)\n src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask,\n pos_offset=pos_offset)\n src = src + self.dropout1(src2)\n src2 = self.norm2(src)\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))\n src = src + self.dropout2(src2)\n return src" }, { "identifier": "PrelnRelativeKVMemTransformerEncoderLayer", "path": "layers/transformer/relative_preln_kvmem_transformer.py", "snippet": "class PrelnRelativeKVMemTransformerEncoderLayer(torch.nn.Module):\n def __init__(self, d_model, nhead, n_keys: Union[int, Tuple[int, int]], n_layers: int, dim_feedforward=2048,\n dropout=0.1, activation: ActivationFunction = F.relu, attention_dropout=0,\n test_pos_clamp: Optional[int] = None, pkm_heads: int = 1, pkm_stochastic: bool = True,\n pkm_custom_init: int = 0, pkm_slice_values: bool = False,\n pkm_knn: int = 32, linproj: bool = False, head_merge_topk: bool = False, load_balance: bool = True,\n kvmem_dropout: str = \"none\", kvmem_randomize_indices: bool = False, kvmem_query_bias: bool = False,\n standard_parallel: bool = False, approx_topk: bool = False, factorize: bool = False,\n full_key: bool = False, key_redundancy_factor: int = 1, two_stage: bool = False,\n factors: Optional[List[int]] = None, head_exclusive: bool = False,\n head_projection_size: Optional[int] = None):\n super().__init__()\n self.self_attn = FixedRelativeMultiheadAttention(\n d_model, nhead, dropout=attention_dropout, test_pos_clamp=test_pos_clamp,\n projection_size=head_projection_size)\n\n self.pkm = LowrankApproximate2Layer(\n d_model, n_keys, pkm_heads, stochastic=pkm_stochastic, custom_init=pkm_custom_init,\n weight_scale=math.sqrt(2.0 / n_layers), slice_values=pkm_slice_values, knn=pkm_knn,\n head_merge_topk=head_merge_topk, load_balance=load_balance, dropout=dropout,\n query_proj=linproj, randomize_indices=kvmem_randomize_indices, dropout_mode=kvmem_dropout,\n query_bias=kvmem_query_bias, approx=approx_topk, factorize=factorize, full_key=full_key,\n key_redundancy_factor=key_redundancy_factor, two_stage=two_stage, factors=factors,\n head_exclusive=head_exclusive, activation=activation)\n\n self.norm1 = torch.nn.LayerNorm(d_model)\n self.norm2 = torch.nn.LayerNorm(d_model)\n self.dropout = torch.nn.Dropout(dropout)\n\n self.activation = activation\n self.standard_parallel = standard_parallel\n\n reset_prenorm_params(self, n_layers)\n\n if self.standard_parallel:\n self.linear1 = torch.nn.Linear(d_model, dim_feedforward, bias=False)\n self.linear2 = torch.nn.Linear(dim_feedforward, d_model, bias=False)\n\n initializer = self.pkm.get_custom_init()\n\n s_real = dim_feedforward + self.pkm.size\n # s_real = dim_feedforward + self.pkm.heads * self.pkm.knn\n initializer(self.linear2.weight, std=math.sqrt(2 / (n_layers * s_real)))\n initializer(self.pkm.values.weight, std=math.sqrt(2 / (n_layers * s_real)))\n initializer(self.linear1.weight, std=math.sqrt(2 / (n_layers * d_model)))\n\n if self.pkm.two_stage:\n initializer(self.pkm.full_keys, std=math.sqrt(2 / (n_layers * d_model)))\n\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n src2 = self.norm1(src)\n src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask,\n pos_offset=pos_offset)\n src = src + self.dropout(src2)\n src2 = self.norm2(src)\n src3 = self.pkm(src2)\n\n if self.standard_parallel:\n src3 = src3 + self.linear2(self.dropout(self.activation(self.linear1(src2))))\n\n src = src + self.dropout(src3)\n return src" }, { "identifier": "RelativeMoeTransformerEncoderLayer", "path": "layers/transformer/relative_moe_transformer.py", "snippet": "class RelativeMoeTransformerEncoderLayer(LoggingLayer, torch.nn.Module):\n def __init__(self, d_model, nhead, n_experts: int, expert_size: int, n_layers: int, dim_feedforward=2048,\n dropout=0.1, activation: ActivationFunction = F.relu, attention_dropout=0,\n test_pos_clamp: Optional[int] = None, knn: int = 0,\n standard_parallel: bool = False, custom_init: int = 0,\n dropout_mode: str = \"none\", selection_mode: str = \"add\",\n perplexity_reg: float = 0.0, key_mode: str = \"moe\", half_key: bool = False,\n n_heads: int = 1, norm_keys: bool = False, perplexity_reg_mode: str=\"step\",\n n_random: int = 0, reg_type: str = \"normal\", std_correction: bool = False,\n topk_mode: str = \"full\", head_projection_size: Optional[int] = None,\n activation_after_topk: bool = False, weight_grouping: str = \"none\",\n kmeans_distance: str = \"cosine\", drop_parallel: bool = True, block_expert_sel_in_grad: bool = False,\n mlp_selection: bool = False, classification_target: str = \"sum\",\n normalize_expert_sel_init: bool = False, norm_key_init: bool = False, norm_value_init: bool = False,\n norm_standard_parallel_values: bool = False, identical_init: bool = False,\n topological_sel_reg: float = 0.0, topological_expert_reg: float = 0.0,\n gumbel_select_only: bool = False, topk_value_norm_compensation: bool = False,\n norm_expert_scores: bool = False, sel_input_cluster_init: bool = False,\n init_norm_mode: str = \"full\", sel_bias: bool = False,\n bias: bool = False, rescale_normed: bool = False, sel_norm: str = \"none\",\n rescale_grads: bool = False, gumbel_decay: int = 0, preln: bool = True, ln_affine: bool = True,\n sinkhorn_local: bool = False, sinkhorn_n_iters: int = 3, moe_dropout_factor: float = 1.0,\n drop_expert: float = 0.0, expert_size_init: bool = False, sync_distributed: bool = True,\n modulation_amplitude: float = 0.5, invisible_selection: bool = False,\n slope_multiplier: float = 1.0, moe_init_scale: float = 1.0):\n super().__init__()\n self.preln = preln\n self.i = 0\n self.self_attn = FixedRelativeMultiheadAttention(\n d_model, nhead, dropout=attention_dropout, test_pos_clamp=test_pos_clamp,\n projection_size=head_projection_size)\n\n std_scale = math.sqrt(2.0 / n_layers) if preln else 1.0\n std_scale *= math.sqrt(moe_init_scale)\n\n self.pkm = MoE(\n d_model, n_experts, expert_size, knn=knn, dropout=dropout * moe_dropout_factor, dropout_mode=dropout_mode,\n weight_scale=std_scale, custom_init=custom_init, selection_mode=selection_mode,\n perplexity_reg=perplexity_reg, key_mode=key_mode, half_key=half_key, n_heads=n_heads,\n norm_keys=norm_keys, perplexity_reg_mode=perplexity_reg_mode, n_random=n_random,\n reg_type=reg_type, std_correction=std_correction, topk_mode=topk_mode,\n activation_after_topk=activation_after_topk, weight_grouping=weight_grouping,\n kmeans_distance=kmeans_distance, activation=activation, block_expert_sel_in_grad=block_expert_sel_in_grad,\n mlp_selection=mlp_selection, classification_target=classification_target,\n normalize_expert_sel_init=normalize_expert_sel_init, norm_key_init=norm_key_init,\n norm_value_init=norm_value_init, identical_init=identical_init, topological_sel_reg=topological_sel_reg,\n topological_expert_reg=topological_expert_reg, gumbel_select_only=gumbel_select_only,\n topk_value_norm_compensation=topk_value_norm_compensation, norm_expert_scores=norm_expert_scores,\n sel_input_cluster_init=sel_input_cluster_init,\n n_parallel_expert_channels=dim_feedforward if standard_parallel else 0,\n init_norm_mode=init_norm_mode, sel_bias=sel_bias, bias=bias, rescale_normed=rescale_normed,\n sel_norm=sel_norm, rescale_grads=rescale_grads, gumbel_decay=gumbel_decay,\n sinkhorn_local=sinkhorn_local, sinkhorn_n_iters=sinkhorn_n_iters, expert_dropout=drop_expert,\n expert_size_init=expert_size_init, sync_distributed=sync_distributed,\n modulation_amplitude=modulation_amplitude, invisible_selection=invisible_selection,\n slope_multiplier=slope_multiplier)\n\n self.norm1 = torch.nn.LayerNorm(d_model, elementwise_affine=ln_affine)\n self.norm2 = torch.nn.LayerNorm(d_model, elementwise_affine=ln_affine)\n self.dropout = torch.nn.Dropout(dropout)\n\n self.activation = activation\n self.standard_parallel = standard_parallel\n self.drop_parallel = drop_parallel\n\n if preln:\n reset_prenorm_params(self, n_layers)\n\n if self.standard_parallel:\n self.linear1 = torch.nn.Linear(d_model, dim_feedforward, bias=bias)\n self.linear2 = torch.nn.Linear(dim_feedforward, d_model, bias=False)\n\n s_real = dim_feedforward + self.pkm.size\n # s_real = dim_feedforward + self.pkm.heads * self.pkm.knn\n\n init = self.pkm.get_initializer()\n\n init(self.linear1.weight, std=std_scale * math.sqrt(1.0 / d_model))\n init(self.linear2.weight, std=std_scale * math.sqrt(1.0 / s_real))\n\n if norm_standard_parallel_values:\n with torch.no_grad():\n self.linear2.weight.div_(self.linear2.weight.norm(dim=0, keepdim=True))\n\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n\n src2 = self.norm1(src) if self.preln else src\n src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask,\n pos_offset=pos_offset)\n src = src + self.dropout(src2)\n\n if self.preln:\n src2 = self.norm2(src)\n else:\n src = src2 = self.norm1(src)\n\n if self.i == 3:\n with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], record_shapes=True) as prof:\n src3 = self.pkm(src2)\n prof.export_chrome_trace(\"trace.json\")\n assert False\n else:\n src3 = self.pkm(src2)\n\n # self.i += 1\n\n if self.standard_parallel:\n x = self.linear1(src2)\n with torch.no_grad():\n self.log(\"standard_parallel_relu_pass_rate\", (x > 0).flatten(end_dim=-2).float().mean().item())\n x = self.activation(x)\n if self.drop_parallel:\n x = self.dropout(x)\n src3 = src3 + self.linear2(x)\n\n src = src + self.dropout(src3)\n if not self.preln:\n src = self.norm2(src)\n\n return src" }, { "identifier": "TopkTransformer", "path": "layers/transformer/topk_transformer.py", "snippet": "class TopkTransformer(PrelnRelativeTransformerEncoderLayer, LoggingLayer):\n def __init__(self, d_model, nhead, n_layers: int, dim_feedforward=2048, dropout=0.1,\n activation: ActivationFunction = F.relu, attention_dropout=0,\n test_pos_clamp: Optional[int] = None, drop_expand: bool = True, k: int = 32,\n use_norm: bool = True, head_projection_size: Optional[int] = None):\n\n super().__init__(d_model, nhead, n_layers, dim_feedforward, dropout, activation, attention_dropout,\n test_pos_clamp, drop_expand, head_projection_size=head_projection_size)\n\n LoggingLayer.__init__(self)\n self.k = k\n self.use_norm = use_norm\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n src2 = self.norm1(src)\n src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask,\n pos_offset=pos_offset)\n src = src + self.dropout1(src2)\n src2 = self.norm2(src)\n\n middle = self.dropout(self.activation(self.linear1(src2)))\n\n with torch.no_grad():\n if self.use_norm:\n norms = self.linear2.weight.norm(dim=0)\n vals = - middle * norms\n else:\n vals = - middle\n mask = vals > vals.kthvalue(self.k, keepdim=True)[0]\n\n self.log(\"relu_pass_rate_before\", (middle > 0).float().mean())\n\n middle = middle.masked_fill(mask, 0)\n\n self.log(\"topk_positive_rate\", (middle > 0).float().sum(-1).mean()/self.k)\n\n src2 = self.linear2(middle)\n src = src + self.dropout2(src2)\n return src" }, { "identifier": "MoE", "path": "layers/moe_layer.py", "snippet": "class MoE(LoggingLayer, RegularizedLayer, OncePerIterLayer, torch.nn.Module):\n def __init__(self, dmodel: int, n_experts: int, expert_size: int, n_heads: int, knn: int = 0,\n dropout: float = 0, weight_scale: float = 1.0, custom_init: int = 0,\n dropout_mode: str = \"none\", selection_mode: str = \"add\", perplexity_reg: float = 0.0,\n key_mode: str = \"moe\", half_key: bool = False, norm_keys: bool = False,\n perplexity_reg_mode: str=\"step\", n_random: int = 0, reg_type: str = \"entropy\",\n std_correction: bool = False, topk_mode: str = \"full\", activation_after_topk: bool = False,\n weight_grouping: str = \"none\", kmeans_distance: str = \"cosine\",\n activation = lambda x: F.relu(x, inplace=True), block_expert_sel_in_grad: bool = False,\n mlp_selection: bool = False, classification_target: str = \"sum\",\n normalize_expert_sel_init: bool = False, norm_key_init: bool = False, norm_value_init: bool = False,\n identical_init: bool = False, topological_sel_reg: float = 0.0, topological_expert_reg: float = 0.0,\n gumbel_select_only: bool = False, topk_value_norm_compensation: bool = False,\n norm_expert_scores: bool = False, sel_input_cluster_init: bool = False,\n n_parallel_expert_channels: int = 0, init_norm_mode: str = \"full\", sel_bias: bool = False,\n bias: bool = False, rescale_normed: bool = False, sel_norm: str = \"none\",\n rescale_grads: bool = False, gumbel_decay: int = 0, v_dim: Optional[int] = None,\n sinkhorn_local: bool = False, sinkhorn_n_iters: int = 3, expert_dropout: float = 0.0,\n expert_size_init: bool = False, sync_distributed: bool = False,\n modulation_amplitude: float = 0.5, invisible_selection: bool = False,\n slope_multiplier: float = 1.0):\n\n super().__init__()\n self.custom_init = custom_init\n self.k_dim = dmodel\n self.v_dim = v_dim if v_dim is not None else dmodel\n self.n_experts = n_experts\n self.expert_size = expert_size\n self.size = self.n_experts * self.expert_size\n self.knn = knn\n self.dropout = dropout\n self.dropout_mode = dropout_mode\n self.selection_mode = selection_mode\n self.perplexity_reg = perplexity_reg\n self.half_key = half_key\n self.key_mode = key_mode\n self.k_vec_dim = self.k_dim // (2 if half_key else 1)\n self.n_heads = n_heads\n self.norm_keys = norm_keys\n self.perplexity_reg_mode = perplexity_reg_mode\n self.n_random = n_random\n self.reg_type = reg_type\n self.topk_mode = topk_mode\n self.activation_after_topk = activation_after_topk\n self.weight_grouping = weight_grouping\n self.kmeans_distance = kmeans_distance\n self.activation = activation\n self.block_expert_sel_in_grad = block_expert_sel_in_grad\n self.mlp_selection = mlp_selection\n self.classification_target = classification_target\n self.weight_scale = weight_scale\n self.normalize_expert_sel_init = normalize_expert_sel_init\n self.norm_key_init = norm_key_init\n self.norm_value_init = norm_value_init\n self.identical_init = identical_init\n self.topological_sel_reg = topological_sel_reg\n self.topological_expert_reg = topological_expert_reg\n self.gumbel_select_only = gumbel_select_only\n self.topk_value_norm_compensation = topk_value_norm_compensation\n self.norm_expert_scores = norm_expert_scores\n self.sel_input_cluster_init = sel_input_cluster_init\n self.iter = 0\n self.layer = 0\n self.initalized = False\n self.rescale_normed = rescale_normed\n self.sel_norm = sel_norm\n self.rescale_grads = rescale_grads\n self.gumbel_decay = gumbel_decay\n self.was_training = True\n self.sinkhorn_local = sinkhorn_local\n self.sinkhorn_n_iters = sinkhorn_n_iters\n self.expert_dropout = expert_dropout\n self.reg_counts = 0\n self.sync_distributed = sync_distributed and torch.distributed.is_initialized()\n self.modulation_amplitude = modulation_amplitude\n self.invisible_selection = invisible_selection\n self.slope_multiplier = slope_multiplier\n\n self.coocurence = None\n\n assert self.selection_mode in {\"add\", \"gate\", \"sigmoid\", \"gumbel\", \"hard_gumbel\", \"gumbel_sigmoid\", \"sinkhorn\", \"sinkhorn2\", \"sinkmoid\", \"sinkmax\", \"sinkhorn_local\", \"mul\", \"random\", \"sinkmoid2\", \"sinkmax2\", \"modulate\"}\n assert self.perplexity_reg_mode in {\"step\", \"global\", \"time\", \"global_time\"}\n assert self.dropout_mode in {\"none\", \"score\"}\n assert self.reg_type in {\"perplexity\", \"variance\", \"entropy\", \"l2\", \"switch\"}\n assert self.topk_mode in {\"full\", \"l1_approx\", \"approx\"}\n assert self.weight_grouping in {\"none\", \"keys_only\", \"keys_and_experts\"}\n assert self.classification_target in {\"sum\", \"max\"}\n assert self.sel_norm in {\"none\", \"cos\", \"input\", \"weights\"}\n\n if selection_mode in {\"mul\"} and activation_after_topk:\n raise ValueError(\"Activation after topk is not supported with mul selection\")\n\n if self.sel_norm != \"none\" and mlp_selection:\n raise ValueError(\"normalization not supported with mlp_selection\")\n\n if std_correction and self.selection_mode in {\"add\"}:\n if key_mode == \"both\":\n self.key_std_correction = math.sqrt(3)\n else:\n self.key_std_correction = math.sqrt(2)\n elif std_correction and self.selection_mode in {\"sigmoid\", \"sinkmoid\", \"sinkmoid2\"}:\n self.key_std_correction = 2.0\n else:\n self.key_std_correction = 1.0\n\n if self.key_mode in {\"moe\", \"both\"}:\n self.keys = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim, self.expert_size))\n self.get_initializer()(self.keys, std=dmodel ** -0.5 * weight_scale * self.key_std_correction)\n else:\n self.keys = None\n\n if bias:\n self.bias = torch.nn.Parameter(torch.zeros(self.n_experts, self.expert_size))\n self.o_bias = torch.nn.Parameter(torch.zeros(self.v_dim))\n else:\n self.bias = None\n self.o_bias = None\n\n if self.key_mode in {\"shared\", \"both\"}:\n self.shared_keys = torch.nn.Parameter(torch.empty(self.k_vec_dim, self.expert_size))\n self.get_initializer()(self.shared_keys, std=dmodel ** -0.5 * weight_scale * self.key_std_correction)\n else:\n self.shared_keys = None\n\n self.values = torch.nn.Parameter(torch.empty(self.n_experts, self.expert_size, self.v_dim))\n\n if self.mlp_selection:\n self.sel = torch.nn.Sequential(\n torch.nn.Linear(self.k_vec_dim, dmodel),\n torch.nn.ReLU(),\n torch.nn.Linear(dmodel, self.n_experts, bias=bias)\n )\n self.get_initializer()(self.sel[0].weight, std=self.k_vec_dim ** -0.5 * weight_scale * self.key_std_correction)\n self.get_initializer()(self.sel[-1].weight, std=dmodel ** -0.5 * weight_scale * self.key_std_correction)\n self.expert_sel = None\n else:\n self.sel = lambda x: F.linear(x, self.expert_sel, self.sel_bias)\n self.expert_sel = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim))\n self.sel_bias = torch.nn.Parameter(torch.zeros(self.n_experts)) if sel_bias else None\n\n self.get_initializer()(self.expert_sel, std=self.k_vec_dim ** -0.5 * weight_scale)\n\n if init_norm_mode == \"full\":\n real_size = self.size\n elif init_norm_mode == \"selected_experts\":\n real_size = self.expert_size * self.n_heads\n elif init_norm_mode == \"selected_channels\":\n real_size = self.knn\n elif init_norm_mode == \"expert_size\":\n real_size = self.expert_size\n else:\n raise ValueError(\"Unknown init_norm_mode\")\n\n real_size += n_parallel_expert_channels\n\n if expert_size_init:\n real_size = self.expert_size\n\n self.get_initializer()(self.values, std=real_size ** -0.5 * weight_scale)\n self.sel_hist = []\n self.index_sel_counts = 0\n self.index_sel_norm = 0\n\n self.index_sel_counts_100 = 0\n self.index_sel_norm_100 = 0\n\n self.sel_count_log = None\n\n self.register_buffer(\"kv_sel_counts\", torch.zeros(self.n_experts, self.expert_size), persistent=False)\n self.register_buffer(\"kv_sel_counts_100\", torch.zeros_like(self.kv_sel_counts))\n\n if self.rescale_normed and self.sel_norm != \"none\":\n self.sel_scale = torch.nn.Parameter(torch.ones([1]))\n else:\n self.sel_scale = 1.0\n\n if self.norm_expert_scores:\n self.expert_scale = torch.nn.Parameter(torch.full([1], math.sqrt(expert_size)))\n\n self.register_buffer(\"seq\", torch.arange(max(self.knn, self.n_heads, self.n_experts, self.k_dim, self.v_dim), dtype=torch.long), persistent=False)\n self.regroup_weights()\n\n def keys_to_logical_order(self, keys: torch.Tensor) -> torch.Tensor:\n k = keys.view(self.n_experts, self.k_vec_dim, self.expert_size)\n return k.permute(0, 2, 1).contiguous().view(-1, self.k_vec_dim)\n\n def keys_from_logical_order(self, keys: torch.Tensor) -> torch.Tensor:\n return keys.view(self.n_experts, self.expert_size, self.k_vec_dim).permute(0, 2, 1).contiguous().view(self.n_experts * self.k_vec_dim, self.expert_size)\n\n def init_sel(self, x: torch.Tensor):\n if not self.sel_input_cluster_init:\n return\n\n with torch.no_grad():\n from kmeans_pytorch import kmeans\n _, cluster_centers = kmeans(\n X=x, num_clusters=self.n_experts, distance=self.kmeans_distance, device=torch.device('cuda')\n )\n\n self.expert_sel.set_(cluster_centers.to(self.expert_sel.device).contiguous())\n if self.normalize_expert_sel_init:\n self.renorm_keep_std(self.expert_sel, dim=1)\n\n def renorm_keep_std(self, weight: torch.Tensor, dim: int = 0):\n with torch.no_grad():\n std = weight.std()\n weight.div_(weight.norm(dim=dim, keepdim=True))\n weight.mul_(std / weight.std())\n\n def regroup_weights(self) -> Optional[torch.Tensor]:\n with torch.no_grad():\n\n if self.norm_key_init:\n self.renorm_keep_std(self.keys.view(self.n_experts, self.k_vec_dim, self.expert_size), dim=1)\n\n if self.norm_value_init:\n self.renorm_keep_std(self.values, dim=1)\n\n if self.identical_init:\n k = self.keys.view(self.n_experts, self.k_vec_dim, self.expert_size)\n self.keys.set_(k[:1].expand_as(k).reshape_as(self.keys))\n\n v = self.values.view(self.n_experts, self.expert_size, self.v_dim)\n self.values.set_(v[:1].expand_as(v).reshape_as(self.values))\n\n ids = None\n if self.weight_grouping != \"none\":\n # self.n_experts * self.k_vec_dim, self.expert_size\n k = self.keys_to_logical_order(self.keys)\n\n from kmeans_pytorch import kmeans\n cluster_ids_x, cluster_centers = kmeans(\n X=k, num_clusters=self.n_experts, distance=self.kmeans_distance, device=torch.device('cuda')\n )\n\n _, ids = cluster_ids_x.sort()\n k = self.keys_from_logical_order(k[ids])\n\n self.keys.set_(k.contiguous())\n self.values.set_(self.values[ids].contiguous())\n if self.weight_grouping == \"keys_and_experts\":\n self.expert_sel.set_(cluster_centers.contiguous().to(self.expert_sel.device))\n else:\n self.get_initializer()(self.expert_sel, std=self.k_vec_dim ** -0.5 * self.weight_scale)\n\n if self.normalize_expert_sel_init:\n self.renorm_keep_std(self.expert_sel, dim=1)\n\n return ids\n\n def patch_optimizer_state(self, optimizer: torch.optim.AdamW, ids: torch.Tensor):\n if self.weight_grouping == \"none\":\n return\n\n with torch.no_grad():\n ks = optimizer.state[self.keys]\n vs = optimizer.state[self.values]\n\n for p in {\"exp_avg\", \"exp_avg_sq\"}:\n k = self.keys_to_logical_order(ks[p])\n ks[p].set_(self.keys_from_logical_order(k[ids]))\n\n vs[p].set_(vs[p][ids])\n\n es = optimizer.state[self.expert_sel]\n for p in {\"exp_avg\", \"exp_avg_sq\", 'step'}:\n es[p].zero_()\n\n def get_initializer(self):\n return torch.nn.init.normal_ if self.custom_init in {0} else utils.init.trunc_normal_\n\n def sparse_matmul(self, indices: torch.Tensor, values: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:\n return F.embedding_bag(indices, weight.type_as(values), per_sample_weights=values, mode=\"sum\", sparse=False)\n\n # def sparse_matmul(self, indices: torch.Tensor, values: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:\n # sin = torch.sparse_csr_tensor(\n # crow_indices=torch.arange(0, values.nelement() + 1, values.shape[-1], device=indices.device),\n # col_indices=indices.flatten(),\n # values=values.flatten(),\n # size=(values.shape[0], weight.shape[0])\n # )\n # return sin @ weight.type_as(values)\n\n def pre_train_forward(self):\n if self.norm_keys:\n with torch.no_grad():\n self.keys.div_(self.keys.norm(dim=-1, keepdim=True))\n\n if self.topk_value_norm_compensation:\n with torch.no_grad():\n self.value_norms = self.values.norm(2, dim=-1)\n\n def topoloss(self, x: torch.Tensor) -> torch.Tensor:\n return (F.mse_loss(x[1:], x[:-1], reduction='mean') +\n F.mse_loss(x[1:], x[:-1], reduction='mean'))\n\n def ani(self, x: torch.Tensor) -> torch.Tensor:\n assert x.ndim == 2\n chunk_size = 32\n\n xnorm = F.normalize(x, 2, dim=-1)\n\n accu = 0\n for i in range(0, x.shape[0], chunk_size):\n a = xnorm[i: i + chunk_size]\n sims = xnorm @ a.T\n sims[i : i + chunk_size].fill_diagonal_(0)\n accu += sims.sum()\n\n return accu / (x.shape[0] * (x.shape[0] - 1))\n\n def log_expert_sel_usage(self, prefix: str, channel_sel_counts: torch.Tensor):\n sel_nonzero = (channel_sel_counts != 0).type(torch.float).sum(axis=-1) / self.expert_size\n self.log(f\"{prefix}/mean\", sel_nonzero.mean())\n self.log(f\"{prefix}/min\", sel_nonzero.min())\n self.log(f\"{prefix}/max\", sel_nonzero.max())\n\n\n def post_train_forward(self):\n if self.training and self.rescale_grads:\n self.values.grad.view(self.n_experts, -1).mul_(self.rescale[:, None])\n self.keys.grad.view(self.n_experts, -1).mul_(self.rescale[:, None])\n self.expert_sel.grad.mul_(self.rescale[:, None])\n\n def pre_train_forward(self):\n if self.training and not self.was_training:\n sorted_counts = self.index_sel_counts.sort(descending=True).values\n self.log(\"test_exert_channel_usage\", framework.visualize.plot.Barplot(sorted_counts, xlabel=\"expert\", ylabel=\"usage count\"), drop_old=True)\n\n self.layer = 0\n if self.sel_hist:\n self.sel_hist = []\n self.index_sel_counts = 0\n self.index_sel_norm = 0\n self.reg_counts = 0\n\n def before_loss(self):\n if self.sel_hist:\n # Concatenate against time dimension. Important for the within-batch regularization\n sel = torch.cat(self.sel_hist, -2)\n self.add_perplexity_reg(sel)\n\n self.sel_hist = []\n\n if self.topological_sel_reg > 0:\n self.add_reg(lambda: self.topological_sel_reg * self.topoloss(self.expert_sel))\n\n if self.topological_expert_reg > 0:\n self.add_reg(lambda: self.topological_expert_reg * (\n self.topoloss(self.keys.view(self.n_experts, -1)) +\n self.topoloss(self.values.view(self.n_experts, -1))\n ))\n\n if self.rescale_grads:\n self.rescale = 1.0 / self.index_sel_counts.clamp(min=1)\n\n # json.dumps\n\n\n if self.index_sel_norm > 0:\n if self.training:\n with torch.no_grad():\n self.log(\"usag_rel_perplexity_all_layers\", utils.relative_perplexity(self.index_sel_counts / self.index_sel_norm))\n self.log(\"dead_expert_proportion_all_layers\", (self.index_sel_counts == 0).float().sum() / self.n_experts)\n\n self.log_expert_sel_usage(\"exert_channel_usage\", self.kv_sel_counts)\n\n self.kv_sel_counts_100.add_(self.kv_sel_counts)\n self.kv_sel_counts.zero_()\n\n self.index_sel_counts_100 = self.index_sel_counts_100 + self.index_sel_counts\n self.index_sel_norm_100 = self.index_sel_norm_100 + self.index_sel_norm\n\n if self.training and self.iter % 100 == 0:\n norm_cnt = self.index_sel_counts_100 / self.index_sel_norm_100\n self.log(\"usag_rel_perplexity_100\", utils.relative_perplexity(norm_cnt))\n self.log(\"dead_expert_proportion_100\", (self.index_sel_counts_100 == 0).float().sum() / self.n_experts)\n\n sorted_counts = self.index_sel_counts_100.sort(descending=True).values\n self.log(\"usage_counts_100\", framework.visualize.plot.Barplot(sorted_counts, xlabel=\"expert\", ylabel=\"usage count\"), drop_old=True)\n\n\n self.log_expert_sel_usage(\"exert_channel_usage_100\", self.kv_sel_counts_100)\n self.kv_sel_counts_100.zero_()\n\n self.index_sel_counts_100 = 0\n self.index_sel_norm_100 = 0\n\n self.log(\"ani/keys\", self.ani(self.keys_to_logical_order(self.keys)))\n self.log(\"ani/values\", self.ani(self.values.flatten(0, -2)))\n self.log(\"ani/expert_sel\", self.ani(self.expert_sel.T))\n\n if self.training:\n self.iter += 1\n\n def topk(self, x: torch.Tensor, k: int, approx: bool) -> Tuple[torch.Tensor, torch.Tensor]:\n if approx:\n x = x.view(*x.shape[:-1], k, -1)\n scores, ind = x.max(-1)\n return scores, self.seq[:k] * x.shape[-1] + ind\n else:\n return x.topk(k, dim=-1, sorted=False)\n\n def add_perplexity_reg(self, sel: torch.Tensor):\n sync_distributed = self.sync_distributed and (self.perplexity_reg_mode not in {\"time\", \"global_time\"})\n\n def log_mean(x: torch.Tensor, dim: int = 0):\n if sync_distributed:\n xlse = framework.utils.distributed_ops.logsumexp(x, dim=dim)\n\n # Normalize\n n = torch.tensor(x.shape[dim]).to(x.device)\n torch.distributed.all_reduce(n, op=torch.distributed.ReduceOp.SUM)\n return xlse - n.log()\n else:\n return x.logsumexp(dim) - math.log(x.shape[dim])\n\n if self.perplexity_reg_mode in {\"time\", \"global_time\"}:\n sel = sel.flatten(0, -3)\n else:\n sel = sel.flatten(0, -2)\n\n # Note: sel are raw logits, no matter what activation is used\n if self.perplexity_reg > 0:\n if self.reg_type == \"perplexity\":\n sel_d = F.log_softmax(sel, dim=-1)\n sel_d = log_mean(sel_d, -2)\n loss = lambda: self.perplexity_reg * ( - utils.relative_perplexity_l(sel_d).mean())\n elif self.reg_type == \"entropy\":\n sel_d = F.log_softmax(sel, dim=-1)\n sel_d = log_mean(sel_d, -2)\n loss = lambda: self.perplexity_reg * ( - utils.entropy_l(sel_d).mean())\n elif self.reg_type == \"variance\":\n if sync_distributed:\n raise NotImplementedError(\"Variance regularization is not supported in distributed mode\")\n avg_sel = sel.mean(-2)\n loss = lambda: self.perplexity_reg * avg_sel.var(-1).mean()\n elif self.reg_type == \"l2\":\n loss = lambda: self.perplexity_reg * sel.pow(2).mean()\n elif self.reg_type == \"switch\":\n if sync_distributed:\n torch.distributed.all_reduce(self.reg_counts, op=torch.distributed.ReduceOp.SUM)\n\n p_sel_real = self.reg_counts / self.reg_counts.sum(-1, keepdims=True)\n if self.perplexity_reg_mode in {\"time\", \"global_time\"}:\n p_sel_real = p_sel_real.unsqueeze(-2)\n\n loss = lambda: self.perplexity_reg * (F.softmax(sel, dim=-1) * p_sel_real).mean()\n self.reg_counts = 0\n else:\n assert False\n\n self.add_reg(loss, \"moe\")\n\n def compute_scores(self, input: torch.Tensor, index: CVMMSel, expert_scores: torch.Tensor, shared_score: Optional[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:\n if self.keys is not None:\n # scores = self.sparse_matmul(\n # (self.seq[:input.shape[-1]] + index[:, None] * (self.k_dim // (2 if self.half_key else 1))),\n # input,\n # self.keys\n # )\n scores = cvmm(input, index, self.keys)\n if self.shared_keys is not None:\n scores = scores + shared_score\n else:\n scores = shared_score\n\n if self.bias is not None:\n scores = scores + self.bias[index.raw_sel]\n\n if self.invisible_selection:\n unmodulated_scores = scores\n scores = scores.detach()\n\n if self.selection_mode in {\"add\"}:\n with torch.no_grad():\n self.log(\"expert_key_positive_rate\", (scores > 0).type_as(scores).mean())\n scores = scores + expert_scores[..., None]\n elif self.selection_mode in {\"mul\"}:\n scores = scores * expert_scores[..., None]\n elif self.selection_mode in {\"gate\", \"sigmoid\", \"gumbel\", \"gumbel_sigmoid\", \"sinkhorn\", \"sinkhorn2\", \"sinkmoid\", \"sinkmax\", \"random\", \"modulate\", \"sinkmoid2\"}:\n # Handle it later\n pass\n elif self.selection_mode == \"hard_gumbel\":\n s = (torch.ones_like(expert_scores) - expert_scores).detach() + expert_scores\n scores = scores * s[..., None]\n\n if self.invisible_selection and scores is not unmodulated_scores:\n scores = unmodulated_scores + scores - scores.detach()\n\n scores = self.activation(scores)\n\n if self.norm_expert_scores:\n scores = F.normalize(scores, 1, dim=-1) * self.expert_scale\n\n if self.selection_mode in {\"gate\", \"sigmoid\", \"gumbel\", \"gumbel_sigmoid\", \"sinkhorn\", \"sinkhorn2\", \"sinkmoid\", \"sinkmax\", \"modulate\", \"sinkmoid2\"}:\n if self.invisible_selection:\n unmodulated_scores = scores\n scores = scores.detach()\n scores = scores * expert_scores[..., None]\n if self.invisible_selection:\n scores = unmodulated_scores + scores - scores.detach()\n\n if self.train and self.iter % 10 == 0:\n with torch.no_grad():\n gt0 = (scores > 0).float()\n gt0_s = gt0.sum()\n if self.selection_mode in {\"add\"}:\n self.log(\"k1_vs_k2_magnitude\", (scores / expert_scores[..., None]).sum() / gt0_s - 1)\n\n self.log(\"relu_pass_rate\", gt0_s / scores.numel())\n\n self.kv_sel_counts.index_add_(0, index.raw_sel.flatten(), gt0.flatten(end_dim=-2))\n\n\n # elif self.selection_mode in {\"predict_rank\"}:\n # self.add_reg(lambda: self.rank_loss(expert_scores, scores.detach().sum(-1)))\n\n if self.dropout > 0 and self.dropout_mode != \"none\":\n scores = F.dropout(scores, self.dropout, training=self.training)\n\n # indices = torch.arange(0, scores.shape[-1], device=input.device) + index[:, None] * self.expert_size\n return scores\n\n def sel_activation(self, sel: torch.Tensor, seq_len: int) -> Tuple[torch.Tensor, torch.Tensor]:\n reg_sel = sel\n if self.selection_mode in {\"gumbel\", \"hard_gumbel\"}:\n if self.training:\n sel = F.gumbel_softmax(sel)\n else:\n sel = F.softmax(sel)\n elif self.selection_mode == \"gumbel_sigmoid\":\n if self.training and (self.gumbel_decay == 0 or self.gumbel_decay > self.iter):\n noise = gumbel_sigmoid_noise(sel)\n if self.gumbel_decay:\n noise = noise * (1 - self.iter / self.gumbel_decay)\n sel = sel + noise\n else:\n sel = F.sigmoid(sel)\n elif self.selection_mode in {\"sinkhorn\", \"sinkmoid\", \"sinkmax\"}:\n if self.training:\n if self.sinkhorn_local:\n sel = sel.view(-1, seq_len, sel.shape[-1])\n\n for _ in range(self.sinkhorn_n_iters):\n if self.sinkhorn_local or (not self.sync_distributed):\n sel = sel - torch.logsumexp(sel, -2, keepdim=True)\n else:\n sel = sel - framework.utils.distributed_ops.logsumexp(sel, -2, keepdim=True)\n\n sel = sel - torch.logsumexp(sel, -1, keepdim=True)\n reg_sel = sel\n\n if self.sinkhorn_local:\n sel = sel.flatten(end_dim=-2).exp()\n\n sel = sel.exp()\n elif self.selection_mode == \"sinkmoid\":\n sel = F.sigmoid(sel)\n else:\n sel = F.softmax(sel, dim=-1)\n elif self.selection_mode in {\"sinkhorn2\", \"sinkmoid2\", \"sinkmax2\"}:\n if self.training:\n sel = self.sinkhorn(sel, self.selection_mode != \"sinkmoid2\")\n elif self.selection_mode == \"sinkmoid\":\n sel = F.sigmoid(sel)\n else:\n sel = F.softmax(sel, dim=-1)\n elif self.selection_mode in {\"sigmoid\"}:\n sel = torch.sigmoid(sel)\n elif self.selection_mode in {\"modulate\"}:\n sel = torch.tanh(sel) * (self.modulation_amplitude / 0.5) + 1\n elif self.selection_mode in {\"add\"}:\n sel = sel\n elif self.selection_mode in {\"mul\"}:\n sel = sel.abs()\n reg_sel = sel\n elif self.selection_mode in {\"gate\"}:\n sel = F.softmax(sel, dim=-1)\n with torch.no_grad():\n self.log(\"expert_rel_perplexity_per_selection\", utils.relative_perplexity(sel).mean())\n else:\n assert False\n\n return sel, reg_sel\n\n def sinkhorn(self, x: torch.Tensor, normalize:bool = True) -> torch.Tensor:\n # Based on\n A, B = x.shape[-2:]\n\n a = torch.zeros_like(x[..., 0, :])\n b = torch.zeros_like(x[..., 0])\n\n for _ in range(self.sinkhorn_n_iters):\n b = math.log(A) - (x - a[..., None, :]).logsumexp(-1)\n if self.sync_distributed:\n a = math.log(B) - framework.utils.distributed_ops.logsumexp(x - b[..., None], -2)\n else:\n a = math.log(B) - (x - b[..., None]).logsumexp(-2)\n\n r = (a[..., None, :] + b[..., None] + x).exp()\n\n if normalize and self.sync_distributed:\n A = torch.tensor(A, device=x.device)\n A = torch.distributed.reduce_all(A, op=torch.distributed.ReduceOp.SUM)\n A = A.item()\n return (r / (A * B)) if normalize else r\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n if not self.initalized:\n self.init_sel(input)\n self.initalized = True\n\n out = 0\n\n if self.half_key:\n in1 = input[..., :self.k_dim // 2]\n in2 = input[..., self.k_dim // 2:]\n else:\n in1 = in2 = input\n\n if self.selection_mode != \"random\":\n if self.block_expert_sel_in_grad:\n in1 = in1.detach()\n\n sel = self.sel(in1) * self.slope_multiplier\n\n if self.sel_norm == \"cos\":\n sel = sel / (in1.norm(dim=-1, keepdim=True) * self.expert_sel.norm(dim=-1)[None]) * self.sel_scale\n elif self.sel_norm == \"weights\":\n sel = sel * (self.sel_scale / self.expert_sel.norm(dim=-1)[None])\n elif self.sel_norm == \"input\":\n sel = sel * (self.sel_scale / in1.norm(dim=-1, keepdim=True))\n\n sel_raw = reg_sel = sel\n\n inv_val = float(\"-inf\")\n\n if (not self.activation_after_topk) or self.selection_mode in {\"sinkhorn\", \"sinkhorn2\", \"gumbel\", \"hard_gumbel\", \"gumbel_sigmoid\", \"sinkmoid\", \"sinkmax\", \"mul\", \"sinkmoid2\"}:\n # Sinkhorn should be always applied before top-k\n sel, reg_sel = self.sel_activation(sel, input.shape[-2])\n if self.selection_mode not in {\"sinkmoid\", \"sinkmoid2\"}:\n inv_val = 0\n\n if self.training and self.expert_dropout > 0:\n if self.selection_mode not in {\"sigmoid\", \"modulate\", \"gate\", \"sinkmoid\", \"sinkmoid2\"}:\n raise ValueError(\"Expert dropout not supported in this mode\")\n\n mask = torch.rand_like(sel) < self.expert_dropout\n sel2 = sel.masked_fill(mask, inv_val)\n else:\n sel2 = sel\n\n sel_val, sel_index = self.topk(sel2, self.n_heads, self.topk_mode in {\"l1_approx\", \"approx\"})\n\n if self.activation_after_topk or (self.selection_mode in {\"sinkmoid\", \"sinkmax\", \"mul\", \"sinkmoid2\"}) or (self.gumbel_select_only and self.selection_mode in {\"gumbel\", \"hard_gumbel\", \"gumbel_sigmoid\", \"gumbel_sigmoid\", \"sinkmax\"}):\n sel_val = torch.gather(sel_raw, -1, sel_index)\n if self.selection_mode in {\"gumbel_sigmoid\", \"sinkmoid\", \"sinkmoid2\"}:\n sel_val = torch.sigmoid(sel_val)\n elif self.selection_mode in {\"sinkhorn\", \"sinkhorn2\"}:\n # In case of sinkhorn, simulate the effect of post-topk activation by renormalizing\n sel_val = F.normalize(sel_val, p=1, dim=-1)\n else:\n sel_val, reg_sel = self.sel_activation(sel_val, input.shape[-2])\n else:\n sel_index = torch.randint(0, self.n_experts, (*input.shape[:-1], self.n_heads), device=input.device)\n sel_val = torch.ones_like(sel_index, dtype=input.dtype, device=input.device)\n reg_sel = None\n\n\n record_counts_now = (self.training and self.iter % 10 == 0) or (not self.training)\n\n if not self.training:\n sel_index_flat = sel_index.flatten(end_dim=-2)\n if self.coocurence is None:\n self.coocurence = torch.zeros([self.n_experts, self.n_experts], device=sel_index_flat.device, dtype=torch.long)\n\n for h1 in range(self.n_heads):\n for h2 in range(self.n_heads):\n ind_flat = sel_index_flat[..., h1] * self.n_experts + sel_index_flat[..., h2]\n values = torch.tensor([1], device=self.coocurence.device, dtype=self.coocurence.dtype).expand_as(ind_flat)\n # values = sel_val[..., h2].flatten()\n self.coocurence.flatten().put_(ind_flat, values, accumulate=True)\n # self.coocurence[sel_index_flat[..., h1], sel_index_flat[..., h2]] += 1\n\n if record_counts_now or self.reg_type == \"switch\":\n reg_counts = F.one_hot(sel_index, self.n_experts).type_as(input)\n\n if self.reg_type == \"switch\":\n reg_counts2 = reg_counts.view(*input.shape[:-2], input.shape[-2] * self.n_heads, self.n_experts)\n if self.perplexity_reg_mode == \"time\":\n reg_counts2 = reg_counts2.sum(-2)\n else:\n reg_counts2 = reg_counts2.flatten(end_dim=-2).sum(0)\n\n self.reg_counts = self.reg_counts + reg_counts2\n\n if record_counts_now:\n with torch.no_grad():\n sel_counts = reg_counts.flatten(end_dim=-2).sum(0)\n cnt = sel_index.nelement()\n\n p_expert_sel = sel_counts / cnt\n\n self.index_sel_counts = self.index_sel_counts + sel_counts\n self.index_sel_norm = self.index_sel_norm + cnt\n\n if self.training:\n self.log(\"min_sel_score\", sel_val.min(dim=-1).values.mean())\n self.log(\"max_sel_score\", sel_val.max(dim=-1).values.mean())\n\n sel_oh = F.one_hot(sel_index, self.n_experts).sum(-2).bool()\n if self.layer >= 1 and self.training:\n self.log(f\"layer_sel_overlap_{self.layer}\", ((self.prev_sel_oh & sel_oh).sum(-1).float() / self.n_heads).mean())\n\n self.prev_sel_oh = sel_oh\n\n ppl = utils.relative_perplexity(p_expert_sel)\n self.log(\"usage_rel_perplexity\", ppl)\n self.log(\"dead_expert_proportion\", (p_expert_sel == 0).float().sum() / self.n_experts)\n\n if self.perplexity_reg_mode in {\"step\", \"time\"}:\n self.add_perplexity_reg(reg_sel)\n elif self.perplexity_reg > 0 and self.training:\n self.sel_hist.append(reg_sel)\n\n shared_score = (in2 @ self.shared_keys) if self.shared_keys is not None else None\n\n scores_l = []\n\n sel_indices = [cvmm_prepare_sel(sel_index[..., h].int(), self.n_experts) for h in range(sel_index.shape[-1])]\n\n for h in range(sel_index.shape[-1]):\n hi = sel_indices[h]\n\n scores = self.compute_scores(in2, hi, sel_val[..., h], shared_score)\n scores_l.append(scores)\n\n if self.knn > 0 or self.selection_mode == \"classify\":\n with torch.no_grad():\n scores = torch.cat(scores_l, -1)\n\n if self.knn > 0:\n with torch.no_grad():\n tresh = scores.kthvalue(scores.shape[-1] - self.knn, -1).values\n\n scores_l = [s.masked_fill_(s < tresh[:, None], 0) for s in scores_l]\n\n out = 0\n for (hi, scores) in zip(sel_indices, scores_l):\n out = out + cvmm(scores, hi, self.values)\n\n # indices = torch.cat(ind_l, dim=-1)\n # scores = torch.cat(scores_l, dim=-1)\n\n if self.selection_mode == \"classify\":\n self.add_reg(lambda: self.cls_loss(sel_val, scores))\n\n # if self.knn > 0:\n # if self.topk_value_norm_compensation:\n # norms = self.value_norms[None].expand(indices.shape[0], -1).gather(-1, indices)\n # scores2 = scores * norms\n # _, ind2 = self.topk(scores2, self.knn, self.topk_mode == \"approx\")\n # indices = indices.gather(-1, ind2)\n # scores = scores.gather(-1, ind2)\n # else:\n # scores, ind2 = self.topk(scores, self.knn, self.topk_mode == \"approx\")\n # indices = indices.gather(-1, ind2)\n\n # if self.n_random > 0 and self.selection_mode not in {\"predict\", \"classify\"}:\n # with torch.no_grad():\n # rind = torch.arange(0, self.n_experts, device=input.device)\n # rind = torch.masked_select(rind, ~F.one_hot(sel_index, self.n_experts).sum(-2).bool()).view(in_flat.shape[0],-1)\n # rind = rind.gather(-1, torch.randint(0, rind.shape[-1], size=[*rind.shape[:-1], self.n_random], device=rind.device))\n\n # ind_l = [indices]\n # scores_l = [scores]\n # for i in range(self.n_random):\n # hi = rind[..., i]\n # indices, scores = self.compute_scores(in2, hi, sel.gather(-1, hi[:, None]).squeeze(), shared_score)\n\n # ind_l.append(indices)\n # scores_l.append(scores)\n\n # indices = torch.cat(ind_l, dim=-1)\n # scores = torch.cat(scores_l, dim=-1)\n\n # out = self.sparse_matmul(indices, scores, self.values)\n\n self.layer += 1\n\n self.was_training = self.training\n res = out.view(*input.shape[:-1], self.v_dim)\n if self.o_bias is not None:\n res = res + self.o_bias\n return res\n\n def dump_logs(self, save_dir: str):\n if self.coocurence is not None:\n os.makedirs(save_dir, exist_ok=True)\n torch.save(self.coocurence, os.path.join(save_dir, \"coocurence.pt\"))\n\n def get_logs(self) -> Dict[str, Any]:\n res = super().get_logs()\n\n if self.coocurence is not None:\n coo = self.coocurence / self.coocurence.diagonal().clamp(min=1)[:, None]\n res[\"expert_coocurence\"] = framework.visualize.plot.Heatmap(coo, xlabel=\"expert\", ylabel=\"expert\", textval=False)\n self.coocurence = None\n return res" }, { "identifier": "Result", "path": "interfaces/result.py", "snippet": "class Result:\n outputs: torch.Tensor\n loss: torch.Tensor\n\n batch_dim = 0\n\n def plot(self) -> Dict[str, Any]:\n return {}\n\n @property\n def batch_size(self) -> int:\n return self.outputs.shape[self.batch_dim]\n\n @staticmethod\n def merge(l: List, batch_weights: Optional[List[float]] = None):\n if len(l) == 1:\n return l[0]\n batch_weights = batch_weights if batch_weights is not None else [1] * len(l)\n loss = sum([r.loss * w for r, w in zip(l, batch_weights)]) / sum(batch_weights)\n out = torch.stack([r.outputs for r in l], l[0].batch_dim)\n return l[0].__class__(out, loss)" } ]
import framework import torch import torch.nn import torch.nn.functional as F import torch.utils.data import math from typing import List, Tuple, Dict, Any from models import TransformerLanguageModel from ... import task, args from layers.transformer import RelativeTransformerEncoderLayer, PrelnRelativeTransformerEncoderLayer from layers.transformer.relative_preln_kvmem_transformer import PrelnRelativeKVMemTransformerEncoderLayer from layers.transformer.relative_moe_transformer import RelativeMoeTransformerEncoderLayer from layers.transformer.topk_transformer import TopkTransformer from layers.moe_layer import MoE from interfaces import Result
18,710
parser.add_argument("-moe.perplexity_reg_mode", default="step", choice=["step", "global", "time", "global_time"]) parser.add_argument("-moe.reg_type", default="entropy", choice=["perplexity", "variance", "entropy", "l2", "switch", "normal"]) parser.add_argument("-moe.key_mode", default="moe", choice=["moe", "both", "shared"]) parser.add_argument("-moe.half_key", default=False) parser.add_argument("-moe.norm_keys", default=False) parser.add_argument("-moe.kmeans_distance", default='cosine', choice=['cosine', 'euclidean']) parser.add_argument("-moe.n_random", default=0) parser.add_argument("-moe.std_correction", default=False) parser.add_argument("-moe.topk_mode", default="full", choice=["full", "l1_approx", "approx"]) parser.add_argument("-moe.activation_after_topk", default=False) parser.add_argument("-moe.weight_grouping", default="none", choice=["none", "keys_only", "keys_and_experts"]) parser.add_argument("-moe.drop_parallel", default=True) parser.add_argument("-moe.mlp_selection", default=False) parser.add_argument("-moe.block_expert_sel_in_grad", default=False) parser.add_argument("-moe.classification_target", default="sum", choice=["sum", "max"]) parser.add_argument("-moe.recluster_steps", default="", parser=parser.int_list_parser) parser.add_argument("-moe.norm_key_init", default=False) parser.add_argument("-moe.norm_value_init", default=False) parser.add_argument("-moe.norm_expert_sel_init", default=False) parser.add_argument("-moe.norm_standard_parallel_values", default=False) parser.add_argument("-moe.identical_init", default=False) parser.add_argument("-moe.topological_sel_reg", default=0.0) parser.add_argument("-moe.topological_expert_reg", default=0.0) parser.add_argument("-moe.sel_lr_multipler", default=1.0) parser.add_argument("-moe.expert_lr_multipler", default=1.0) parser.add_argument("-moe.gumbel_select_only", default=False) parser.add_argument("-moe.topk_value_norm_compensation", default=False) parser.add_argument("-moe.norm_expert_scores", default=False) parser.add_argument("-moe.sel_input_cluster_init", default=False) parser.add_argument("-moe.init_norm_mode", default="full") parser.add_argument("-moe.bias", default=False) parser.add_argument("-moe.sel_bias", default=False) parser.add_argument("-moe.rescale_normed", default=False) parser.add_argument("-moe.sel_norm", default="none", choice=["none", "cos", "input", "weights"]) parser.add_argument("-moe.rescale_grads", default=False) parser.add_argument("-moe.gumbel_decay", default=0) parser.add_argument("-moe.sinkhorn_local", default=False) parser.add_argument("-moe.sinkhron_n_iters", default=3) parser.add_argument("-moe.dropout_factor", default=1.0) parser.add_argument("-moe.drop_expert", default=0.0) parser.add_argument("-moe.expert_size_init", default=False) parser.add_argument("-moe.sync_distributed", default=True) parser.add_argument("-moe.modulation_amplitude", default=0.5) parser.add_argument("-moe.invisible_selection", default=False) parser.add_argument("-moe.slope_multiplier", default=1.0) parser.add_argument("-moe.init_scale", default=1.0) parser.add_argument("-kvmem.linproj", default=False) parser.add_argument("-kvmem.head_merge_topk", default=False) parser.add_argument("-kvmem.load_balance", default=False) parser.add_argument("-kvmem.dropout", default="none", choice=["none", "early", "late", "weight", "score"]) parser.add_argument("-kvmem.randomize_indices", default=False) parser.add_argument("-kvmem.standard_parallel", default=False) parser.add_argument("-kvmem.query_bias", default=False) parser.add_argument("-kvmem.approx_topk", default=False) parser.add_argument("-kvmem.norm_values", default=False) parser.add_argument("-kvmem.factorize", default=False) parser.add_argument("-kvmem.full_key", default=False) parser.add_argument("-kvmem.key_redundancy_factor", default=1) parser.add_argument("-kvmem.two_stage", default=False) parser.add_argument("-kvmem.head_exclusive", default=False) parser.add_argument("-transformer.topk_value", default=32) parser.add_argument("-transformer.universal.nonshared", default=0) parser.add_argument("-transformer.topk_use_norm", default=True) parser.add_argument("-transformer.activation", default="relu", choice=["relu", "topk", "gelu", "identity", "sigmoid", "softmax"]) parser.add_argument("-transformer.p_drop_layer", default=0.0) parser.add_argument("-transformer.head_projection_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.ln_affine", default=True) parser.add_argument("-transformer.ln_after_attention", default=True) parser.add_argument("-transformer.output_mode", default="normal", choice=["normal", "sum", "geometric", "sigmoid"]) @task() class TransformerLMMixin: helper: framework.helpers.TrainingHelper def is_preln(self) -> bool: return "preln" in self.helper.args.transformer.variant def topk_activation(self, x: torch.Tensor) -> torch.Tensor: nx = -x return torch.masked_fill(x, nx <= nx.kthvalue(self.helper.args.transformer.topk_value, keepdim=True)[0], 0) def get_layers(self) -> List[torch.nn.Module]: # pyright: reportOptionalMemberAccess=false if self.helper.args.transformer.activation == "relu": activation = F.relu elif self.helper.args.transformer.activation == "topk": activation = self.topk_activation elif self.helper.args.transformer.activation == "identity": activation = lambda x: x elif self.helper.args.transformer.activation == "sigmoid": activation = torch.sigmoid elif self.helper.args.transformer.activation == "gelu": activation = F.gelu elif self.helper.args.transformer.activation == "softmax": activation = lambda x: F.softmax(x, dim=-1) else: raise ValueError(f"Invalid activation: {self.helper.args.transformer.activation}") base_args = dict( d_model=self.helper.args.state_size, nhead=self.helper.args.transformer.n_heads, dim_feedforward=int(self.helper.args.state_size * self.helper.args.transformer.ff_multiplier), dropout=self.helper.args.dropout, activation=activation ) extra_args = {} if not self.helper.args.transformer.variant.endswith("_gelu") else { "activation": F.gelu, "drop_expand": False } if self.helper.args.transformer.variant in {"preln_relative"}: mklayer = lambda: PrelnRelativeTransformerEncoderLayer( **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size,) elif self.helper.args.transformer.variant in {"preln_topk"}:
@args def a(parser: framework.helpers.ArgumentParser): parser.add_argument("-lm.trafo.context_blocks", default=1) parser.add_argument("-lm.trafo.test_context_blocks", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.test_pos_clamp", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.same_length_eval", default=False) parser.add_argument("-lm.trafo.same_length", default=False) parser.add_argument("-lm.trafo.last_layer_context", default=False) parser.add_argument("-lm.trafo.xl_init", default=False) parser.add_argument("-lm.trafo.embedding_mode_init", default="default", choice=["default", "scale_to_sqrt_dmodel", "init_to_sqrt_dmodel", "one_and_scale_to_sqrt_dmodel", "like_preln"]) parser.add_argument("-pkm.n_keys", default="128", parser=parser.int_list_parser) parser.add_argument("-pkm.n_heads", default=1) parser.add_argument("-pkm.knn", default=32) parser.add_argument("-pkm.stochastic", default=False) parser.add_argument("-pkm.query_batchnorm", default=False) parser.add_argument("-pkm.custom_init", default=0) parser.add_argument("-pkm.slice_values", default=False) parser.add_argument("-pkm.slice_proj", default=False) parser.add_argument("-pkm.sample_smallest", default=False) parser.add_argument("-moe.n_experts", default=128) parser.add_argument("-moe.expert_size", default=128) parser.add_argument("-moe.selection_mode", default="add", choice=["add", "gate", "sigmoid", "gumbel", "hard_gumbel", "predict", "predict_mlp", "classify", "gumbel_sigmoid", "sinkhorn", "sinkhorn2", "sinkmoid", "sinkmax", "moe", "mul", "random", "sinkmoid2", "sinkmax2", "modulate"]) parser.add_argument("-moe.perplexity_reg", default=0.0) parser.add_argument("-moe.perplexity_reg_mode", default="step", choice=["step", "global", "time", "global_time"]) parser.add_argument("-moe.reg_type", default="entropy", choice=["perplexity", "variance", "entropy", "l2", "switch", "normal"]) parser.add_argument("-moe.key_mode", default="moe", choice=["moe", "both", "shared"]) parser.add_argument("-moe.half_key", default=False) parser.add_argument("-moe.norm_keys", default=False) parser.add_argument("-moe.kmeans_distance", default='cosine', choice=['cosine', 'euclidean']) parser.add_argument("-moe.n_random", default=0) parser.add_argument("-moe.std_correction", default=False) parser.add_argument("-moe.topk_mode", default="full", choice=["full", "l1_approx", "approx"]) parser.add_argument("-moe.activation_after_topk", default=False) parser.add_argument("-moe.weight_grouping", default="none", choice=["none", "keys_only", "keys_and_experts"]) parser.add_argument("-moe.drop_parallel", default=True) parser.add_argument("-moe.mlp_selection", default=False) parser.add_argument("-moe.block_expert_sel_in_grad", default=False) parser.add_argument("-moe.classification_target", default="sum", choice=["sum", "max"]) parser.add_argument("-moe.recluster_steps", default="", parser=parser.int_list_parser) parser.add_argument("-moe.norm_key_init", default=False) parser.add_argument("-moe.norm_value_init", default=False) parser.add_argument("-moe.norm_expert_sel_init", default=False) parser.add_argument("-moe.norm_standard_parallel_values", default=False) parser.add_argument("-moe.identical_init", default=False) parser.add_argument("-moe.topological_sel_reg", default=0.0) parser.add_argument("-moe.topological_expert_reg", default=0.0) parser.add_argument("-moe.sel_lr_multipler", default=1.0) parser.add_argument("-moe.expert_lr_multipler", default=1.0) parser.add_argument("-moe.gumbel_select_only", default=False) parser.add_argument("-moe.topk_value_norm_compensation", default=False) parser.add_argument("-moe.norm_expert_scores", default=False) parser.add_argument("-moe.sel_input_cluster_init", default=False) parser.add_argument("-moe.init_norm_mode", default="full") parser.add_argument("-moe.bias", default=False) parser.add_argument("-moe.sel_bias", default=False) parser.add_argument("-moe.rescale_normed", default=False) parser.add_argument("-moe.sel_norm", default="none", choice=["none", "cos", "input", "weights"]) parser.add_argument("-moe.rescale_grads", default=False) parser.add_argument("-moe.gumbel_decay", default=0) parser.add_argument("-moe.sinkhorn_local", default=False) parser.add_argument("-moe.sinkhron_n_iters", default=3) parser.add_argument("-moe.dropout_factor", default=1.0) parser.add_argument("-moe.drop_expert", default=0.0) parser.add_argument("-moe.expert_size_init", default=False) parser.add_argument("-moe.sync_distributed", default=True) parser.add_argument("-moe.modulation_amplitude", default=0.5) parser.add_argument("-moe.invisible_selection", default=False) parser.add_argument("-moe.slope_multiplier", default=1.0) parser.add_argument("-moe.init_scale", default=1.0) parser.add_argument("-kvmem.linproj", default=False) parser.add_argument("-kvmem.head_merge_topk", default=False) parser.add_argument("-kvmem.load_balance", default=False) parser.add_argument("-kvmem.dropout", default="none", choice=["none", "early", "late", "weight", "score"]) parser.add_argument("-kvmem.randomize_indices", default=False) parser.add_argument("-kvmem.standard_parallel", default=False) parser.add_argument("-kvmem.query_bias", default=False) parser.add_argument("-kvmem.approx_topk", default=False) parser.add_argument("-kvmem.norm_values", default=False) parser.add_argument("-kvmem.factorize", default=False) parser.add_argument("-kvmem.full_key", default=False) parser.add_argument("-kvmem.key_redundancy_factor", default=1) parser.add_argument("-kvmem.two_stage", default=False) parser.add_argument("-kvmem.head_exclusive", default=False) parser.add_argument("-transformer.topk_value", default=32) parser.add_argument("-transformer.universal.nonshared", default=0) parser.add_argument("-transformer.topk_use_norm", default=True) parser.add_argument("-transformer.activation", default="relu", choice=["relu", "topk", "gelu", "identity", "sigmoid", "softmax"]) parser.add_argument("-transformer.p_drop_layer", default=0.0) parser.add_argument("-transformer.head_projection_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.ln_affine", default=True) parser.add_argument("-transformer.ln_after_attention", default=True) parser.add_argument("-transformer.output_mode", default="normal", choice=["normal", "sum", "geometric", "sigmoid"]) @task() class TransformerLMMixin: helper: framework.helpers.TrainingHelper def is_preln(self) -> bool: return "preln" in self.helper.args.transformer.variant def topk_activation(self, x: torch.Tensor) -> torch.Tensor: nx = -x return torch.masked_fill(x, nx <= nx.kthvalue(self.helper.args.transformer.topk_value, keepdim=True)[0], 0) def get_layers(self) -> List[torch.nn.Module]: # pyright: reportOptionalMemberAccess=false if self.helper.args.transformer.activation == "relu": activation = F.relu elif self.helper.args.transformer.activation == "topk": activation = self.topk_activation elif self.helper.args.transformer.activation == "identity": activation = lambda x: x elif self.helper.args.transformer.activation == "sigmoid": activation = torch.sigmoid elif self.helper.args.transformer.activation == "gelu": activation = F.gelu elif self.helper.args.transformer.activation == "softmax": activation = lambda x: F.softmax(x, dim=-1) else: raise ValueError(f"Invalid activation: {self.helper.args.transformer.activation}") base_args = dict( d_model=self.helper.args.state_size, nhead=self.helper.args.transformer.n_heads, dim_feedforward=int(self.helper.args.state_size * self.helper.args.transformer.ff_multiplier), dropout=self.helper.args.dropout, activation=activation ) extra_args = {} if not self.helper.args.transformer.variant.endswith("_gelu") else { "activation": F.gelu, "drop_expand": False } if self.helper.args.transformer.variant in {"preln_relative"}: mklayer = lambda: PrelnRelativeTransformerEncoderLayer( **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size,) elif self.helper.args.transformer.variant in {"preln_topk"}:
mklayer = lambda: TopkTransformer(
7
2023-10-16 11:26:45+00:00
24k
boppreh/hello_tls
src/hello_tls/scan.py
[ { "identifier": "ClientHello", "path": "src/hello_tls/protocol.py", "snippet": "class ScanError(Exception):\nclass ServerAlertError(ScanError):\nclass BadServerResponse(ScanError):\nclass ServerHello:\nclass ClientHello:\n def __init__(self, level: AlertLevel, description: AlertDescription):\ndef _make_stream_parser(packets: Iterable[bytes]) -> Tuple[Callable[[int], bytes], Callable[[], int]]:\n def read_next(length: int) -> bytes:\ndef _bytes_to_int(b: bytes) -> int:\ndef parse_server_hello(packets: Iterable[bytes]) -> ServerHello:\ndef make_client_hello(client_hello: ClientHello) -> bytes:\n def prefix_length(block_name: str, width_bytes: int = 2) -> Iterator[None]:" }, { "identifier": "AlertDescription", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class AlertDescription(Enum):\n \"\"\" Different alert messages that can be sent by the server. \"\"\"\n close_notify = b'\\x00'\n unexpected_message = b'\\x0a'\n bad_record_mac = b'\\x14'\n record_overflow = b'\\x16'\n handshake_failure = b'\\x28'\n bad_certificate = b'\\x2a'\n unsupported_certificate = b'\\x2b'\n certificate_revoked = b'\\x2c'\n certificate_expired = b'\\x2d'\n certificate_unknown = b'\\x2e'\n illegal_parameter = b'\\x2f'\n unknown_ca = b'\\x30'\n access_denied = b'\\x31'\n decode_error = b'\\x32'\n decrypt_error = b'\\x33'\n protocol_version = b'\\x46'\n insufficient_security = b'\\x47'\n internal_error = b'\\x50'\n inappropriate_fallback = b'\\x56'\n user_canceled = b'\\x5a'\n missing_extension = b'\\x6d'\n unsupported_extension = b'\\x6e'\n unrecognized_name = b'\\x70'\n bad_certificate_status_response = b'\\x71'\n unknown_psk_identity = b'\\x73'\n certificate_required = b'\\x74'\n no_application_protocol = b'\\x78'" }, { "identifier": "CipherSuite", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class CipherSuite(Enum):\n def __repr__(self):\n return self.name\n def __new__(cls, value, *rest, **kwds):\n obj = object.__new__(cls)\n obj._value_ = value\n return obj\n # Annotate each cipher suite with the protocols it's supported at.\n # Default to all but TLS 1.3, because that's the most common.\n def __init__(self, _: bytes, protocols: Sequence[Protocol] = (Protocol.SSLv3, Protocol.TLS1_0, Protocol.TLS1_1, Protocol.TLS1_2)):\n self.protocols = protocols\n\n # Pseudo cipher suite, not actually picked.\n #TLS_EMPTY_RENEGOTIATION_INFO_SCSV = b\"\\x00\\xff\"\n\n # TLS 1.3 cipher suites.\n TLS_AES_128_GCM_SHA256 = b\"\\x13\\x01\", (Protocol.TLS1_3,)\n TLS_AES_256_GCM_SHA384 = b\"\\x13\\x02\", (Protocol.TLS1_3,)\n TLS_CHACHA20_POLY1305_SHA256 = b\"\\x13\\x03\", (Protocol.TLS1_3,)\n TLS_AES_128_CCM_SHA256 = b\"\\x13\\x04\", (Protocol.TLS1_3,)\n TLS_AES_128_CCM_8_SHA256 = b\"\\x13\\x05\", (Protocol.TLS1_3,)\n\n # Cipher suite that had its number reassigned.\n OLD_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = b'\\xcc\\x13'\n \n # Cipher suites adapted from IANA assignments:\n # https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-4\n TLS_AEGIS_128L_SHA256 = b'\\x13\\x07' # [draft-irtf-cfrg-aegis-aead-00]\n TLS_AEGIS_256_SHA384 = b'\\x13\\x06' # [draft-irtf-cfrg-aegis-aead-00]\n TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x19' # [RFC4346]\n TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 = b'\\x00\\x17' # [RFC4346][RFC6347]\n TLS_DH_anon_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x1B' # [RFC5246]\n TLS_DH_anon_WITH_AES_128_CBC_SHA = b'\\x00\\x34' # [RFC5246]\n TLS_DH_anon_WITH_AES_128_CBC_SHA256 = b'\\x00\\x6C' # [RFC5246]\n TLS_DH_anon_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA6' # [RFC5288]\n TLS_DH_anon_WITH_AES_256_CBC_SHA = b'\\x00\\x3A' # [RFC5246]\n TLS_DH_anon_WITH_AES_256_CBC_SHA256 = b'\\x00\\x6D' # [RFC5246]\n TLS_DH_anon_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA7' # [RFC5288]\n TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x46' # [RFC6209]\n TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x5A' # [RFC6209]\n TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x47' # [RFC6209]\n TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x5B' # [RFC6209]\n TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x46' # [RFC5932]\n TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBF' # [RFC5932]\n TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x84' # [RFC6367]\n TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x89' # [RFC5932]\n TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC5' # [RFC5932]\n TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x85' # [RFC6367]\n TLS_DH_anon_WITH_DES_CBC_SHA = b'\\x00\\x1A' # [RFC8996]\n TLS_DH_anon_WITH_RC4_128_MD5 = b'\\x00\\x18' # [RFC5246][RFC6347]\n TLS_DH_anon_WITH_SEED_CBC_SHA = b'\\x00\\x9B' # [RFC4162]\n TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x0B' # [RFC4346]\n TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x0D' # [RFC5246]\n TLS_DH_DSS_WITH_AES_128_CBC_SHA = b'\\x00\\x30' # [RFC5246]\n TLS_DH_DSS_WITH_AES_128_CBC_SHA256 = b'\\x00\\x3E' # [RFC5246]\n TLS_DH_DSS_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA4' # [RFC5288]\n TLS_DH_DSS_WITH_AES_256_CBC_SHA = b'\\x00\\x36' # [RFC5246]\n TLS_DH_DSS_WITH_AES_256_CBC_SHA256 = b'\\x00\\x68' # [RFC5246]\n TLS_DH_DSS_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA5' # [RFC5288]\n TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x3E' # [RFC6209]\n TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x58' # [RFC6209]\n TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x3F' # [RFC6209]\n TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x59' # [RFC6209]\n TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x42' # [RFC5932]\n TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBB' # [RFC5932]\n TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x82' # [RFC6367]\n TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x85' # [RFC5932]\n TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC1' # [RFC5932]\n TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x83' # [RFC6367]\n TLS_DH_DSS_WITH_DES_CBC_SHA = b'\\x00\\x0C' # [RFC8996]\n TLS_DH_DSS_WITH_SEED_CBC_SHA = b'\\x00\\x97' # [RFC4162]\n TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x0E' # [RFC4346]\n TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x10' # [RFC5246]\n TLS_DH_RSA_WITH_AES_128_CBC_SHA = b'\\x00\\x31' # [RFC5246]\n TLS_DH_RSA_WITH_AES_128_CBC_SHA256 = b'\\x00\\x3F' # [RFC5246]\n TLS_DH_RSA_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA0' # [RFC5288]\n TLS_DH_RSA_WITH_AES_256_CBC_SHA = b'\\x00\\x37' # [RFC5246]\n TLS_DH_RSA_WITH_AES_256_CBC_SHA256 = b'\\x00\\x69' # [RFC5246]\n TLS_DH_RSA_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA1' # [RFC5288]\n TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x40' # [RFC6209]\n TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x54' # [RFC6209]\n TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x41' # [RFC6209]\n TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x55' # [RFC6209]\n TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x43' # [RFC5932]\n TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBC' # [RFC5932]\n TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x7E' # [RFC6367]\n TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x86' # [RFC5932]\n TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC2' # [RFC5932]\n TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x7F' # [RFC6367]\n TLS_DH_RSA_WITH_DES_CBC_SHA = b'\\x00\\x0F' # [RFC8996]\n TLS_DH_RSA_WITH_SEED_CBC_SHA = b'\\x00\\x98' # [RFC4162]\n TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x11' # [RFC4346]\n TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x13' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_128_CBC_SHA = b'\\x00\\x32' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = b'\\x00\\x40' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA2' # [RFC5288]\n TLS_DHE_DSS_WITH_AES_256_CBC_SHA = b'\\x00\\x38' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = b'\\x00\\x6A' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA3' # [RFC5288]\n TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x42' # [RFC6209]\n TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x56' # [RFC6209]\n TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x43' # [RFC6209]\n TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x57' # [RFC6209]\n TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x44' # [RFC5932]\n TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBD' # [RFC5932]\n TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x80' # [RFC6367]\n TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x87' # [RFC5932]\n TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC3' # [RFC5932]\n TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x81' # [RFC6367]\n TLS_DHE_DSS_WITH_DES_CBC_SHA = b'\\x00\\x12' # [RFC8996]\n TLS_DHE_DSS_WITH_SEED_CBC_SHA = b'\\x00\\x99' # [RFC4162]\n TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x8F' # [RFC4279]\n TLS_DHE_PSK_WITH_AES_128_CBC_SHA = b'\\x00\\x90' # [RFC4279]\n TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 = b'\\x00\\xB2' # [RFC5487]\n TLS_DHE_PSK_WITH_AES_128_CCM = b'\\xC0\\xA6' # [RFC6655]\n TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 = b'\\x00\\xAA' # [RFC5487]\n TLS_DHE_PSK_WITH_AES_256_CBC_SHA = b'\\x00\\x91' # [RFC4279]\n TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 = b'\\x00\\xB3' # [RFC5487]\n TLS_DHE_PSK_WITH_AES_256_CCM = b'\\xC0\\xA7' # [RFC6655]\n TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 = b'\\x00\\xAB' # [RFC5487]\n TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x66' # [RFC6209]\n TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x6C' # [RFC6209]\n TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x67' # [RFC6209]\n TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x6D' # [RFC6209]\n TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x96' # [RFC6367]\n TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x90' # [RFC6367]\n TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x97' # [RFC6367]\n TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x91' # [RFC6367]\n TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAD' # [RFC7905]\n TLS_DHE_PSK_WITH_NULL_SHA = b'\\x00\\x2D' # [RFC4785]\n TLS_DHE_PSK_WITH_NULL_SHA256 = b'\\x00\\xB4' # [RFC5487]\n TLS_DHE_PSK_WITH_NULL_SHA384 = b'\\x00\\xB5' # [RFC5487]\n TLS_DHE_PSK_WITH_RC4_128_SHA = b'\\x00\\x8E' # [RFC4279][RFC6347]\n TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x14' # [RFC4346]\n TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x16' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_128_CBC_SHA = b'\\x00\\x33' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = b'\\x00\\x67' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_128_CCM = b'\\xC0\\x9E' # [RFC6655]\n TLS_DHE_RSA_WITH_AES_128_CCM_8 = b'\\xC0\\xA2' # [RFC6655]\n TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = b'\\x00\\x9E' # [RFC5288]\n TLS_DHE_RSA_WITH_AES_256_CBC_SHA = b'\\x00\\x39' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = b'\\x00\\x6B' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_256_CCM = b'\\xC0\\x9F' # [RFC6655]\n TLS_DHE_RSA_WITH_AES_256_CCM_8 = b'\\xC0\\xA3' # [RFC6655]\n TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = b'\\x00\\x9F' # [RFC5288]\n TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x44' # [RFC6209]\n TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x52' # [RFC6209]\n TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x45' # [RFC6209]\n TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x53' # [RFC6209]\n TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x45' # [RFC5932]\n TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBE' # [RFC5932]\n TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x7C' # [RFC6367]\n TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x88' # [RFC5932]\n TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC4' # [RFC5932]\n TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x7D' # [RFC6367]\n TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAA' # [RFC7905]\n TLS_DHE_RSA_WITH_DES_CBC_SHA = b'\\x00\\x15' # [RFC8996]\n TLS_DHE_RSA_WITH_SEED_CBC_SHA = b'\\x00\\x9A' # [RFC4162]\n TLS_ECCPWD_WITH_AES_128_CCM_SHA256 = b'\\xC0\\xB2' # [RFC8492]\n TLS_ECCPWD_WITH_AES_128_GCM_SHA256 = b'\\xC0\\xB0' # [RFC8492]\n TLS_ECCPWD_WITH_AES_256_CCM_SHA384 = b'\\xC0\\xB3' # [RFC8492]\n TLS_ECCPWD_WITH_AES_256_GCM_SHA384 = b'\\xC0\\xB1' # [RFC8492]\n TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x17' # [RFC8422]\n TLS_ECDH_anon_WITH_AES_128_CBC_SHA = b'\\xC0\\x18' # [RFC8422]\n TLS_ECDH_anon_WITH_AES_256_CBC_SHA = b'\\xC0\\x19' # [RFC8422]\n TLS_ECDH_anon_WITH_NULL_SHA = b'\\xC0\\x15' # [RFC8422]\n TLS_ECDH_anon_WITH_RC4_128_SHA = b'\\xC0\\x16' # [RFC8422][RFC6347]\n TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x03' # [RFC8422]\n TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x04' # [RFC8422]\n TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x25' # [RFC5289]\n TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 = b'\\xC0\\x2D' # [RFC5289]\n TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x05' # [RFC8422]\n TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x26' # [RFC5289]\n TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 = b'\\xC0\\x2E' # [RFC5289]\n TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x4A' # [RFC6209]\n TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x5E' # [RFC6209]\n TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x4B' # [RFC6209]\n TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x5F' # [RFC6209]\n TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x74' # [RFC6367]\n TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x88' # [RFC6367]\n TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x75' # [RFC6367]\n TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x89' # [RFC6367]\n TLS_ECDH_ECDSA_WITH_NULL_SHA = b'\\xC0\\x01' # [RFC8422]\n TLS_ECDH_ECDSA_WITH_RC4_128_SHA = b'\\xC0\\x02' # [RFC8422][RFC6347]\n TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x0D' # [RFC8422]\n TLS_ECDH_RSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x0E' # [RFC8422]\n TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x29' # [RFC5289]\n TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 = b'\\xC0\\x31' # [RFC5289]\n TLS_ECDH_RSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x0F' # [RFC8422]\n TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x2A' # [RFC5289]\n TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 = b'\\xC0\\x32' # [RFC5289]\n TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x4E' # [RFC6209]\n TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x62' # [RFC6209]\n TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x4F' # [RFC6209]\n TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x63' # [RFC6209]\n TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x78' # [RFC6367]\n TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x8C' # [RFC6367]\n TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x79' # [RFC6367]\n TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x8D' # [RFC6367]\n TLS_ECDH_RSA_WITH_NULL_SHA = b'\\xC0\\x0B' # [RFC8422]\n TLS_ECDH_RSA_WITH_RC4_128_SHA = b'\\xC0\\x0C' # [RFC8422][RFC6347]\n TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x08' # [RFC8422]\n TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x09' # [RFC8422]\n TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x23' # [RFC5289]\n TLS_ECDHE_ECDSA_WITH_AES_128_CCM = b'\\xC0\\xAC' # [RFC7251]\n TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 = b'\\xC0\\xAE' # [RFC7251]\n TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = b'\\xC0\\x2B' # [RFC5289]\n TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x0A' # [RFC8422]\n TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x24' # [RFC5289]\n TLS_ECDHE_ECDSA_WITH_AES_256_CCM = b'\\xC0\\xAD' # [RFC7251]\n TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 = b'\\xC0\\xAF' # [RFC7251]\n TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = b'\\xC0\\x2C' # [RFC5289]\n TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x48' # [RFC6209]\n TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x5C' # [RFC6209]\n TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x49' # [RFC6209]\n TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x5D' # [RFC6209]\n TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x72' # [RFC6367]\n TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x86' # [RFC6367]\n TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x73' # [RFC6367]\n TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x87' # [RFC6367]\n TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xA9' # [RFC7905]\n TLS_ECDHE_ECDSA_WITH_NULL_SHA = b'\\xC0\\x06' # [RFC8422]\n TLS_ECDHE_ECDSA_WITH_RC4_128_SHA = b'\\xC0\\x07' # [RFC8422][RFC6347]\n TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x34' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA = b'\\xC0\\x35' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x37' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_128_CCM_8_SHA256 = b'\\xD0\\x03' # [RFC8442]\n TLS_ECDHE_PSK_WITH_AES_128_CCM_SHA256 = b'\\xD0\\x05' # [RFC8442]\n TLS_ECDHE_PSK_WITH_AES_128_GCM_SHA256 = b'\\xD0\\x01' # [RFC8442]\n TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA = b'\\xC0\\x36' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x38' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_256_GCM_SHA384 = b'\\xD0\\x02' # [RFC8442]\n TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x70' # [RFC6209]\n TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x71' # [RFC6209]\n TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x9A' # [RFC6367]\n TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x9B' # [RFC6367]\n TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAC' # [RFC7905]\n TLS_ECDHE_PSK_WITH_NULL_SHA = b'\\xC0\\x39' # [RFC5489]\n TLS_ECDHE_PSK_WITH_NULL_SHA256 = b'\\xC0\\x3A' # [RFC5489]\n TLS_ECDHE_PSK_WITH_NULL_SHA384 = b'\\xC0\\x3B' # [RFC5489]\n TLS_ECDHE_PSK_WITH_RC4_128_SHA = b'\\xC0\\x33' # [RFC5489][RFC6347]\n TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x12' # [RFC8422]\n TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x13' # [RFC8422]\n TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x27' # [RFC5289]\n TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = b'\\xC0\\x2F' # [RFC5289]\n TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x14' # [RFC8422]\n TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x28' # [RFC5289]\n TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = b'\\xC0\\x30' # [RFC5289]\n TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x4C' # [RFC6209]\n TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x60' # [RFC6209]\n TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x4D' # [RFC6209]\n TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x61' # [RFC6209]\n TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x76' # [RFC6367]\n TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x8A' # [RFC6367]\n TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x77' # [RFC6367]\n TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x8B' # [RFC6367]\n TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xA8' # [RFC7905]\n TLS_ECDHE_RSA_WITH_NULL_SHA = b'\\xC0\\x10' # [RFC8422]\n TLS_ECDHE_RSA_WITH_RC4_128_SHA = b'\\xC0\\x11' # [RFC8422][RFC6347]\n TLS_GOSTR341112_256_WITH_28147_CNT_IMIT = b'\\xC1\\x02' # [RFC9189]\n TLS_GOSTR341112_256_WITH_KUZNYECHIK_CTR_OMAC = b'\\xC1\\x00' # [RFC9189]\n TLS_GOSTR341112_256_WITH_KUZNYECHIK_MGM_L = b'\\xC1\\x03' # [RFC9367]\n TLS_GOSTR341112_256_WITH_KUZNYECHIK_MGM_S = b'\\xC1\\x05' # [RFC9367]\n TLS_GOSTR341112_256_WITH_MAGMA_CTR_OMAC = b'\\xC1\\x01' # [RFC9189]\n TLS_GOSTR341112_256_WITH_MAGMA_MGM_L = b'\\xC1\\x04' # [RFC9367]\n TLS_GOSTR341112_256_WITH_MAGMA_MGM_S = b'\\xC1\\x06' # [RFC9367]\n TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 = b'\\x00\\x29' # [RFC2712]\n TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA = b'\\x00\\x26' # [RFC2712]\n TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 = b'\\x00\\x2A' # [RFC2712]\n TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA = b'\\x00\\x27' # [RFC2712]\n TLS_KRB5_EXPORT_WITH_RC4_40_MD5 = b'\\x00\\x2B' # [RFC2712][RFC6347]\n TLS_KRB5_EXPORT_WITH_RC4_40_SHA = b'\\x00\\x28' # [RFC2712][RFC6347]\n TLS_KRB5_WITH_3DES_EDE_CBC_MD5 = b'\\x00\\x23' # [RFC2712]\n TLS_KRB5_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x1F' # [RFC2712]\n TLS_KRB5_WITH_DES_CBC_MD5 = b'\\x00\\x22' # [RFC2712]\n TLS_KRB5_WITH_DES_CBC_SHA = b'\\x00\\x1E' # [RFC2712]\n TLS_KRB5_WITH_IDEA_CBC_MD5 = b'\\x00\\x25' # [RFC2712]\n TLS_KRB5_WITH_IDEA_CBC_SHA = b'\\x00\\x21' # [RFC2712]\n TLS_KRB5_WITH_RC4_128_MD5 = b'\\x00\\x24' # [RFC2712][RFC6347]\n TLS_KRB5_WITH_RC4_128_SHA = b'\\x00\\x20' # [RFC2712][RFC6347]\n TLS_NULL_WITH_NULL_NULL = b'\\x00\\x00' # [RFC5246]\n TLS_PSK_DHE_WITH_AES_128_CCM_8 = b'\\xC0\\xAA' # [RFC6655]\n TLS_PSK_DHE_WITH_AES_256_CCM_8 = b'\\xC0\\xAB' # [RFC6655]\n TLS_PSK_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x8B' # [RFC4279]\n TLS_PSK_WITH_AES_128_CBC_SHA = b'\\x00\\x8C' # [RFC4279]\n TLS_PSK_WITH_AES_128_CBC_SHA256 = b'\\x00\\xAE' # [RFC5487]\n TLS_PSK_WITH_AES_128_CCM = b'\\xC0\\xA4' # [RFC6655]\n TLS_PSK_WITH_AES_128_CCM_8 = b'\\xC0\\xA8' # [RFC6655]\n TLS_PSK_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA8' # [RFC5487]\n TLS_PSK_WITH_AES_256_CBC_SHA = b'\\x00\\x8D' # [RFC4279]\n TLS_PSK_WITH_AES_256_CBC_SHA384 = b'\\x00\\xAF' # [RFC5487]\n TLS_PSK_WITH_AES_256_CCM = b'\\xC0\\xA5' # [RFC6655]\n TLS_PSK_WITH_AES_256_CCM_8 = b'\\xC0\\xA9' # [RFC6655]\n TLS_PSK_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA9' # [RFC5487]\n TLS_PSK_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x64' # [RFC6209]\n TLS_PSK_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x6A' # [RFC6209]\n TLS_PSK_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x65' # [RFC6209]\n TLS_PSK_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x6B' # [RFC6209]\n TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x94' # [RFC6367]\n TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x8E' # [RFC6367]\n TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x95' # [RFC6367]\n TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x8F' # [RFC6367]\n TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAB' # [RFC7905]\n TLS_PSK_WITH_NULL_SHA = b'\\x00\\x2C' # [RFC4785]\n TLS_PSK_WITH_NULL_SHA256 = b'\\x00\\xB0' # [RFC5487]\n TLS_PSK_WITH_NULL_SHA384 = b'\\x00\\xB1' # [RFC5487]\n TLS_PSK_WITH_RC4_128_SHA = b'\\x00\\x8A' # [RFC4279][RFC6347]\n TLS_RSA_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x08' # [RFC4346]\n TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 = b'\\x00\\x06' # [RFC4346]\n TLS_RSA_EXPORT_WITH_RC4_40_MD5 = b'\\x00\\x03' # [RFC4346][RFC6347]\n TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x93' # [RFC4279]\n TLS_RSA_PSK_WITH_AES_128_CBC_SHA = b'\\x00\\x94' # [RFC4279]\n TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 = b'\\x00\\xB6' # [RFC5487]\n TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 = b'\\x00\\xAC' # [RFC5487]\n TLS_RSA_PSK_WITH_AES_256_CBC_SHA = b'\\x00\\x95' # [RFC4279]\n TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 = b'\\x00\\xB7' # [RFC5487]\n TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 = b'\\x00\\xAD' # [RFC5487]\n TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x68' # [RFC6209]\n TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x6E' # [RFC6209]\n TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x69' # [RFC6209]\n TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x6F' # [RFC6209]\n TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x98' # [RFC6367]\n TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x92' # [RFC6367]\n TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x99' # [RFC6367]\n TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x93' # [RFC6367]\n TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAE' # [RFC7905]\n TLS_RSA_PSK_WITH_NULL_SHA = b'\\x00\\x2E' # [RFC4785]\n TLS_RSA_PSK_WITH_NULL_SHA256 = b'\\x00\\xB8' # [RFC5487]\n TLS_RSA_PSK_WITH_NULL_SHA384 = b'\\x00\\xB9' # [RFC5487]\n TLS_RSA_PSK_WITH_RC4_128_SHA = b'\\x00\\x92' # [RFC4279][RFC6347]\n TLS_RSA_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x0A' # [RFC5246]\n TLS_RSA_WITH_AES_128_CBC_SHA = b'\\x00\\x2F' # [RFC5246]\n TLS_RSA_WITH_AES_128_CBC_SHA256 = b'\\x00\\x3C' # [RFC5246]\n TLS_RSA_WITH_AES_128_CCM = b'\\xC0\\x9C' # [RFC6655]\n TLS_RSA_WITH_AES_128_CCM_8 = b'\\xC0\\xA0' # [RFC6655]\n TLS_RSA_WITH_AES_128_GCM_SHA256 = b'\\x00\\x9C' # [RFC5288]\n TLS_RSA_WITH_AES_256_CBC_SHA = b'\\x00\\x35' # [RFC5246]\n TLS_RSA_WITH_AES_256_CBC_SHA256 = b'\\x00\\x3D' # [RFC5246]\n TLS_RSA_WITH_AES_256_CCM = b'\\xC0\\x9D' # [RFC6655]\n TLS_RSA_WITH_AES_256_CCM_8 = b'\\xC0\\xA1' # [RFC6655]\n TLS_RSA_WITH_AES_256_GCM_SHA384 = b'\\x00\\x9D' # [RFC5288]\n TLS_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x3C' # [RFC6209]\n TLS_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x50' # [RFC6209]\n TLS_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x3D' # [RFC6209]\n TLS_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x51' # [RFC6209]\n TLS_RSA_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x41' # [RFC5932]\n TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBA' # [RFC5932]\n TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x7A' # [RFC6367]\n TLS_RSA_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x84' # [RFC5932]\n TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC0' # [RFC5932]\n TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x7B' # [RFC6367]\n TLS_RSA_WITH_DES_CBC_SHA = b'\\x00\\x09' # [RFC8996]\n TLS_RSA_WITH_IDEA_CBC_SHA = b'\\x00\\x07' # [RFC8996]\n TLS_RSA_WITH_NULL_MD5 = b'\\x00\\x01' # [RFC5246]\n TLS_RSA_WITH_NULL_SHA = b'\\x00\\x02' # [RFC5246]\n TLS_RSA_WITH_NULL_SHA256 = b'\\x00\\x3B' # [RFC5246]\n TLS_RSA_WITH_RC4_128_MD5 = b'\\x00\\x04' # [RFC5246][RFC6347]\n TLS_RSA_WITH_RC4_128_SHA = b'\\x00\\x05' # [RFC5246][RFC6347]\n TLS_RSA_WITH_SEED_CBC_SHA = b'\\x00\\x96' # [RFC4162]\n TLS_SHA256_SHA256 = b'\\xC0\\xB4' # [RFC9150]\n TLS_SHA384_SHA384 = b'\\xC0\\xB5' # [RFC9150]\n TLS_SM4_CCM_SM3 = b'\\x00\\xC7' # [RFC8998]\n TLS_SM4_GCM_SM3 = b'\\x00\\xC6' # [RFC8998]\n TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x1C' # [RFC5054]\n TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA = b'\\xC0\\x1F' # [RFC5054]\n TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA = b'\\xC0\\x22' # [RFC5054]\n TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x1B' # [RFC5054]\n TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x1E' # [RFC5054]\n TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x21' # [RFC5054]\n TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x1A' # [RFC5054]\n TLS_SRP_SHA_WITH_AES_128_CBC_SHA = b'\\xC0\\x1D' # [RFC5054]\n TLS_SRP_SHA_WITH_AES_256_CBC_SHA = b'\\xC0\\x20' # [RFC5054]" }, { "identifier": "Group", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class Group(Enum):\n def __new__(cls, value, *rest, **kwds):\n obj = object.__new__(cls)\n obj._value_ = value\n return obj\n # Annotate each group with whether it's a PQ group.\n def __init__(self, _: bytes, is_pq: bool = False):\n self.is_pq = is_pq\n def __repr__(self):\n return self.name\n \n sect163k1 = b'\\x00\\x01'\n sect163r1 = b'\\x00\\x02'\n sect163r2 = b'\\x00\\x03'\n sect193r1 = b'\\x00\\x04'\n sect193r2 = b'\\x00\\x05'\n sect233k1 = b'\\x00\\x06'\n sect233r1 = b'\\x00\\x07'\n sect239k1 = b'\\x00\\x08'\n sect283k1 = b'\\x00\\x09'\n sect283r1 = b'\\x00\\x0a'\n sect409k1 = b'\\x00\\x0b'\n sect409r1 = b'\\x00\\x0c'\n sect571k1 = b'\\x00\\x0d'\n sect571r1 = b'\\x00\\x0e'\n secp160k1 = b'\\x00\\x0f'\n secp160r1 = b'\\x00\\x10'\n secp160r2 = b'\\x00\\x11'\n secp192k1 = b'\\x00\\x12'\n secp192r1 = b'\\x00\\x13'\n secp224k1 = b'\\x00\\x14'\n secp224r1 = b'\\x00\\x15'\n secp256k1 = b'\\x00\\x16'\n secp256r1 = b'\\x00\\x17'\n secp384r1 = b'\\x00\\x18'\n secp521r1 = b'\\x00\\x19'\n brainpoolP256r1 = b'\\x00\\x1a'\n brainpoolP384r1 = b'\\x00\\x1b'\n brainpoolP512r1 = b'\\x00\\x1c'\n x25519 = b'\\x00\\x1d'\n x448 = b'\\x00\\x1e'\n brainpoolP256r1tls13 = b'\\x00\\x1f'\n brainpoolP384r1tls13 = b'\\x00\\x20'\n brainpoolP512r1tls13 = b'\\x00\\x21'\n GC256A = b'\\x00\\x22'\n GC256B = b'\\x00\\x23'\n GC256C = b'\\x00\\x24'\n GC256D = b'\\x00\\x25'\n GC512A = b'\\x00\\x26'\n GC512B = b'\\x00\\x27'\n GC512C = b'\\x00\\x28'\n curveSM2 = b'\\x00\\x29'\n ffdhe2048 = b'\\x01\\x00'\n ffdhe3072 = b'\\x01\\x01'\n ffdhe4096 = b'\\x01\\x02'\n ffdhe6144 = b'\\x01\\x03'\n ffdhe8192 = b'\\x01\\x04'\n arbitrary_explicit_prime_curves = b'\\xff\\x01'\n arbitrary_explicit_char2_curves = b'\\xff\\x02'\n\n # Somewhat common post-quantum groups, not yet standardized:\n X25519Kyber768Draft00 = b'\\x63\\x99', True\n X25519Kyber768Draft00_obsolete = b'\\xfe\\x31', True\n X25519Kyber512Draft00 = b'\\xfe\\x30', True\n SecP256r1Kyber768Draft00 = b'\\x63\\x9a', True\n\n # Long list of unusual post-quantum groups from liboqs:\n # https://github.com/open-quantum-safe/oqs-provider/blob/main/ALGORITHMS.md?plain=1#L13\n frodo640aes = b'\\x02\\x00', True\n p256_frodo640aes = b'\\x2F\\x00', True\n x25519_frodo640aes = b'\\x2F\\x80', True\n frodo640shake = b'\\x02\\x01', True\n p256_frodo640shake = b'\\x2F\\x01', True\n x25519_frodo640shake = b'\\x2F\\x81', True\n frodo976aes = b'\\x02\\x02', True\n p384_frodo976aes = b'\\x2F\\x02', True\n x448_frodo976aes = b'\\x2F\\x82', True\n frodo976shake = b'\\x02\\x03', True\n p384_frodo976shake = b'\\x2F\\x03', True\n x448_frodo976shake = b'\\x2F\\x83', True\n frodo1344aes = b'\\x02\\x04', True\n p521_frodo1344aes = b'\\x2F\\x04', True\n frodo1344shake = b'\\x02\\x05', True\n p521_frodo1344shake = b'\\x2F\\x05', True\n kyber512 = b'\\x02\\x3A', True\n p256_kyber512 = b'\\x2F\\x3A', True\n x25519_kyber512 = b'\\x2F\\x39', True\n kyber768 = b'\\x02\\x3C', True\n p384_kyber768 = b'\\x2F\\x3C', True\n x448_kyber768 = b'\\x2F\\x90', True\n kyber1024 = b'\\x02\\x3D', True\n p521_kyber1024 = b'\\x2F\\x3D', True\n bikel1 = b'\\x02\\x41', True\n p256_bikel1 = b'\\x2F\\x41', True\n x25519_bikel1 = b'\\x2F\\xAE', True\n bikel3 = b'\\x02\\x42', True\n p384_bikel3 = b'\\x2F\\x42', True\n x448_bikel3 = b'\\x2F\\xAF', True\n bikel5 = b'\\x02\\x43', True\n p521_bikel5 = b'\\x2F\\x43', True\n hqc128 = b'\\x02\\x2C', True\n p256_hqc128 = b'\\x2F\\x2C', True\n x25519_hqc128 = b'\\x2F\\xAC', True\n hqc192 = b'\\x02\\x2D', True\n p384_hqc192 = b'\\x2F\\x2D', True\n x448_hqc192 = b'\\x2F\\xAD', True\n hqc256 = b'\\x02\\x2E', True\n p521_hqc256 = b'\\x2F\\x2E', True\n dilithium2 = b'\\xfe\\xa0', True\n p256_dilithium2 = b'\\xfe\\xa1', True\n rsa3072_dilithium2 = b'\\xfe\\xa2', True\n dilithium3 = b'\\xfe\\xa3', True\n p384_dilithium3 = b'\\xfe\\xa4', True\n dilithium5 = b'\\xfe\\xa5', True\n p521_dilithium5 = b'\\xfe\\xa6', True\n falcon512 = b'\\xfe\\xae', True\n p256_falcon512 = b'\\xfe\\xaf', True\n rsa3072_falcon512 = b'\\xfe\\xb0', True\n falcon1024 = b'\\xfe\\xb1', True\n p521_falcon1024 = b'\\xfe\\xb2', True\n sphincssha2128fsimple = b'\\xfe\\xb3', True\n p256_sphincssha2128fsimple = b'\\xfe\\xb4', True\n rsa3072_sphincssha2128fsimple = b'\\xfe\\xb5', True\n sphincssha2128ssimple = b'\\xfe\\xb6', True\n p256_sphincssha2128ssimple = b'\\xfe\\xb7', True\n rsa3072_sphincssha2128ssimple = b'\\xfe\\xb8', True\n sphincssha2192fsimple = b'\\xfe\\xb9', True\n p384_sphincssha2192fsimple = b'\\xfe\\xba', True\n sphincssha2192ssimple = b'\\xfe\\xbb', True\n p384_sphincssha2192ssimple = b'\\xfe\\xbc', True\n sphincssha2256fsimple = b'\\xfe\\xbd', True\n p521_sphincssha2256fsimple = b'\\xfe\\xbe', True\n sphincssha2256ssimple = b'\\xfe\\xc0', True\n p521_sphincssha2256ssimple = b'\\xfe\\xc1', True\n sphincsshake128fsimple = b'\\xfe\\xc2', True\n p256_sphincsshake128fsimple = b'\\xfe\\xc3', True\n rsa3072_sphincsshake128fsimple = b'\\xfe\\xc4', True\n sphincsshake128ssimple = b'\\xfe\\xc5', True\n p256_sphincsshake128ssimple = b'\\xfe\\xc6', True\n rsa3072_sphincsshake128ssimple = b'\\xfe\\xc7', True\n sphincsshake192fsimple = b'\\xfe\\xc8', True\n p384_sphincsshake192fsimple = b'\\xfe\\xc9', True\n sphincsshake192ssimple = b'\\xfe\\xca', True\n p384_sphincsshake192ssimple = b'\\xfe\\xcb', True\n sphincsshake256fsimple = b'\\xfe\\xcc', True\n p521_sphincsshake256fsimple = b'\\xfe\\xcd', True\n sphincsshake256ssimple = b'\\xfe\\xce', True\n p521_sphincsshake256ssimple = b'\\xfe\\xcf', True" }, { "identifier": "Protocol", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class Protocol(Enum):\n # Keep protocols in order of preference.\n TLS1_3 = b\"\\x03\\x04\"\n TLS1_2 = b\"\\x03\\x03\"\n TLS1_1 = b\"\\x03\\x02\"\n TLS1_0 = b\"\\x03\\x01\"\n SSLv3 = b\"\\x03\\x00\"\n\n def __repr__(self):\n return self.name\n def __lt__(self, other):\n if self.__class__ != other.__class__:\n return NotImplemented\n return self.value < other.value" }, { "identifier": "CompressionMethod", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class CompressionMethod(Enum):\n NULL = b'\\x00'\n DEFLATE = b'\\x01'" } ]
from enum import Enum from multiprocessing.pool import ThreadPool from typing import Iterable, Union, List, Optional, Iterator, Callable, Any from urllib.parse import urlparse from datetime import datetime, timezone from .protocol import ClientHello, ScanError, make_client_hello, parse_server_hello, ServerAlertError, BadServerResponse, ServerHello, logger from .names_and_numbers import AlertDescription, CipherSuite, Group, Protocol, CompressionMethod from OpenSSL import SSL, crypto import socket import re import dataclasses import ssl, select
14,595
# Default number of workers/threads/concurrent connections to use. DEFAULT_MAX_WORKERS: int = 6 # Default socket connection timeout, in seconds. DEFAULT_TIMEOUT: float = 2 class DowngradeError(ScanError): """ Error for servers that attempt to downgrade beyond supported versions. """ pass class ConnectionError(ScanError): """ Class for error in resolving or connecting to a server. """ pass class ProxyError(ConnectionError): """ Class for errors in connecting through a proxy. """ pass @dataclasses.dataclass class ConnectionSettings: """ Settings for a connection to a server, including the host, port, and proxy. """ host: str port: int = 443 proxy: Optional[str] = None timeout_in_seconds: Optional[float] = DEFAULT_TIMEOUT date: datetime = dataclasses.field(default_factory=lambda: datetime.now(tz=timezone.utc).replace(microsecond=0)) def make_socket(settings: ConnectionSettings) -> socket.socket: """ Creates and connects a socket to the target server, through the chosen proxy if any. """ socket_host, socket_port = None, None # To appease the type checker. try: if not settings.proxy: socket_host, socket_port = settings.host, settings.port return socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) if not settings.proxy.startswith('http://'): raise ProxyError("Only HTTP proxies are supported at the moment.", settings.proxy) socket_host, socket_port = parse_target(settings.proxy, 80) sock = socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) sock.send(f"CONNECT {settings.host}:{settings.port} HTTP/1.1\r\nhost:{socket_host}\r\n\r\n".encode('utf-8')) sock_file = sock.makefile('r', newline='\r\n') line = sock_file.readline() if not re.fullmatch(r'HTTP/1\.[01] 200 Connection [Ee]stablished\r\n', line): sock_file.close() sock.close() raise ProxyError("Proxy refused the connection: ", line) while True: if sock_file.readline() == '\r\n': break return sock except TimeoutError as e: raise ConnectionError(f"Connection to {socket_host}:{socket_port} timed out after {settings.timeout_in_seconds} seconds") from e except socket.gaierror as e: raise ConnectionError(f"Could not resolve host {socket_host}") from e except socket.error as e: raise ConnectionError(f"Could not connect to {socket_host}:{socket_port}") from e def send_hello(connection_settings: ConnectionSettings, client_hello: ClientHello) -> ServerHello: """ Sends a Client Hello to the server, and returns the parsed ServerHello. Raises exceptions for the different alert messages the server can send. """ sock = make_socket(connection_settings) sock.send(make_client_hello(client_hello)) packet_stream = iter(lambda: sock.recv(4096), b'') server_hello = parse_server_hello(packet_stream) if server_hello.version not in client_hello.protocols: # Server picked a protocol we didn't ask for. logger.info(f"Server attempted to downgrade protocol to unsupported version {server_hello.version}") raise DowngradeError(f"Server attempted to downgrade from {client_hello.protocols} to {server_hello.version}") return server_hello def _iterate_server_option(connection_settings: ConnectionSettings, client_hello: ClientHello, request_option: str, response_option: str, on_response: Callable[[ServerHello], None] = lambda s: None) -> Iterator[Any]: """ Continually sends Client Hello packets to the server, removing the `response_option` from the list of options each time, until the server rejects the handshake. """ # We'll be mutating the list of options, so make a copy. options_to_test = list(getattr(client_hello, request_option)) # TODO: figure out how to have mypy accept this line. client_hello = dataclasses.replace(client_hello, **{request_option: options_to_test}) # type: ignore logger.info(f"Enumerating server {response_option} with {len(options_to_test)} options and protocols {client_hello.protocols}") while options_to_test: try: logger.debug(f"Offering {len(options_to_test)} {response_option} over {client_hello.protocols}: {options_to_test}") server_hello = send_hello(connection_settings, client_hello) on_response(server_hello) except DowngradeError: break
# Default number of workers/threads/concurrent connections to use. DEFAULT_MAX_WORKERS: int = 6 # Default socket connection timeout, in seconds. DEFAULT_TIMEOUT: float = 2 class DowngradeError(ScanError): """ Error for servers that attempt to downgrade beyond supported versions. """ pass class ConnectionError(ScanError): """ Class for error in resolving or connecting to a server. """ pass class ProxyError(ConnectionError): """ Class for errors in connecting through a proxy. """ pass @dataclasses.dataclass class ConnectionSettings: """ Settings for a connection to a server, including the host, port, and proxy. """ host: str port: int = 443 proxy: Optional[str] = None timeout_in_seconds: Optional[float] = DEFAULT_TIMEOUT date: datetime = dataclasses.field(default_factory=lambda: datetime.now(tz=timezone.utc).replace(microsecond=0)) def make_socket(settings: ConnectionSettings) -> socket.socket: """ Creates and connects a socket to the target server, through the chosen proxy if any. """ socket_host, socket_port = None, None # To appease the type checker. try: if not settings.proxy: socket_host, socket_port = settings.host, settings.port return socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) if not settings.proxy.startswith('http://'): raise ProxyError("Only HTTP proxies are supported at the moment.", settings.proxy) socket_host, socket_port = parse_target(settings.proxy, 80) sock = socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) sock.send(f"CONNECT {settings.host}:{settings.port} HTTP/1.1\r\nhost:{socket_host}\r\n\r\n".encode('utf-8')) sock_file = sock.makefile('r', newline='\r\n') line = sock_file.readline() if not re.fullmatch(r'HTTP/1\.[01] 200 Connection [Ee]stablished\r\n', line): sock_file.close() sock.close() raise ProxyError("Proxy refused the connection: ", line) while True: if sock_file.readline() == '\r\n': break return sock except TimeoutError as e: raise ConnectionError(f"Connection to {socket_host}:{socket_port} timed out after {settings.timeout_in_seconds} seconds") from e except socket.gaierror as e: raise ConnectionError(f"Could not resolve host {socket_host}") from e except socket.error as e: raise ConnectionError(f"Could not connect to {socket_host}:{socket_port}") from e def send_hello(connection_settings: ConnectionSettings, client_hello: ClientHello) -> ServerHello: """ Sends a Client Hello to the server, and returns the parsed ServerHello. Raises exceptions for the different alert messages the server can send. """ sock = make_socket(connection_settings) sock.send(make_client_hello(client_hello)) packet_stream = iter(lambda: sock.recv(4096), b'') server_hello = parse_server_hello(packet_stream) if server_hello.version not in client_hello.protocols: # Server picked a protocol we didn't ask for. logger.info(f"Server attempted to downgrade protocol to unsupported version {server_hello.version}") raise DowngradeError(f"Server attempted to downgrade from {client_hello.protocols} to {server_hello.version}") return server_hello def _iterate_server_option(connection_settings: ConnectionSettings, client_hello: ClientHello, request_option: str, response_option: str, on_response: Callable[[ServerHello], None] = lambda s: None) -> Iterator[Any]: """ Continually sends Client Hello packets to the server, removing the `response_option` from the list of options each time, until the server rejects the handshake. """ # We'll be mutating the list of options, so make a copy. options_to_test = list(getattr(client_hello, request_option)) # TODO: figure out how to have mypy accept this line. client_hello = dataclasses.replace(client_hello, **{request_option: options_to_test}) # type: ignore logger.info(f"Enumerating server {response_option} with {len(options_to_test)} options and protocols {client_hello.protocols}") while options_to_test: try: logger.debug(f"Offering {len(options_to_test)} {response_option} over {client_hello.protocols}: {options_to_test}") server_hello = send_hello(connection_settings, client_hello) on_response(server_hello) except DowngradeError: break
except ServerAlertError as error:
0
2023-10-21 02:00:13+00:00
24k
zhaojw1998/AccoMontage-3
arrangement_utils.py
[ { "identifier": "split_phrases", "path": "piano_arranger/acc_utils.py", "snippet": "def split_phrases(segmentation):\n \"\"\"Split a phrase label string into individual phrase meta info\"\"\"\n if '\\n' not in segmentation:\n segmentation += '\\n'\n phrases = []\n lengths = []\n current = 0\n while segmentation[current] != '\\n':\n if segmentation[current].isalpha():\n j = 1\n while not (segmentation[current + j].isalpha() or segmentation[current + j] == '\\n'):\n j += 1\n phrases.append(segmentation[current])\n lengths.append(int(segmentation[current+1: current+j]))\n current += j\n return [(phrases[i], lengths[i], sum(lengths[:i])) for i in range(len(phrases))] " }, { "identifier": "DisentangleVAE", "path": "piano_arranger/models/Poly_Dis.py", "snippet": "class DisentangleVAE(PytorchModel):\n\n def __init__(self, name, device, chd_encoder, rhy_encoder, decoder,\n chd_decoder):\n super(DisentangleVAE, self).__init__(name, device)\n self.chd_encoder = chd_encoder\n self.rhy_encoder = rhy_encoder\n self.decoder = decoder\n self.num_step = self.decoder.num_step\n self.chd_decoder = chd_decoder\n\n def confuse_prmat(self, pr_mat):\n non_zero_ent = torch.nonzero(pr_mat.long())\n eps = torch.randint(0, 2, (non_zero_ent.size(0),))\n eps = ((2 * eps) - 1).long()\n confuse_ent = torch.clamp(non_zero_ent[:, 2] + eps, min=0, max=127)\n pr_mat[non_zero_ent[:, 0], non_zero_ent[:, 1], confuse_ent] = \\\n pr_mat[non_zero_ent[:, 0], non_zero_ent[:, 1], non_zero_ent[:, 2]]\n return pr_mat\n\n def get_chroma(self, pr_mat):\n bs = pr_mat.size(0)\n pad = torch.zeros(bs, 32, 4).to(self.device)\n pr_mat = torch.cat([pr_mat, pad], dim=-1)\n c = pr_mat.view(bs, 32, -1, 12).contiguous()\n c = c.sum(dim=-2) # (bs, 32, 12)\n c = c.view(bs, 8, 4, 12)\n c = c.sum(dim=-2).float()\n c = torch.log(c + 1)\n return c.to(self.device)\n\n def run(self, x, c, pr_mat, tfr1, tfr2, tfr3, confuse=True):\n embedded_x, lengths = self.decoder.emb_x(x)\n # cc = self.get_chroma(pr_mat)\n dist_chd = self.chd_encoder(c)\n # pr_mat = self.confuse_prmat(pr_mat)\n dist_rhy = self.rhy_encoder(pr_mat)\n z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], True)\n dec_z = torch.cat([z_chd, z_rhy], dim=-1)\n pitch_outs, dur_outs = self.decoder(dec_z, False, embedded_x,\n lengths, tfr1, tfr2)\n recon_root, recon_chroma, recon_bass = self.chd_decoder(z_chd, False,\n tfr3, c)\n return pitch_outs, dur_outs, dist_chd, dist_rhy, recon_root, \\\n recon_chroma, recon_bass\n\n def loss_function(self, x, c, recon_pitch, recon_dur, dist_chd,\n dist_rhy, recon_root, recon_chroma, recon_bass,\n beta, weights, weighted_dur=False):\n recon_loss, pl, dl = self.decoder.recon_loss(x, recon_pitch, recon_dur,\n weights, weighted_dur)\n kl_loss, kl_chd, kl_rhy = self.kl_loss(dist_chd, dist_rhy)\n chord_loss, root, chroma, bass = self.chord_loss(c, recon_root,\n recon_chroma,\n recon_bass)\n loss = recon_loss + beta * kl_loss + chord_loss\n return loss, recon_loss, pl, dl, kl_loss, kl_chd, kl_rhy, chord_loss, \\\n root, chroma, bass\n\n def chord_loss(self, c, recon_root, recon_chroma, recon_bass):\n loss_fun = nn.CrossEntropyLoss()\n root = c[:, :, 0: 12].max(-1)[-1].view(-1).contiguous()\n chroma = c[:, :, 12: 24].long().view(-1).contiguous()\n bass = c[:, :, 24:].max(-1)[-1].view(-1).contiguous()\n\n recon_root = recon_root.view(-1, 12).contiguous()\n recon_chroma = recon_chroma.view(-1, 2).contiguous()\n recon_bass = recon_bass.view(-1, 12).contiguous()\n root_loss = loss_fun(recon_root, root)\n chroma_loss = loss_fun(recon_chroma, chroma)\n bass_loss = loss_fun(recon_bass, bass)\n chord_loss = root_loss + chroma_loss + bass_loss\n return chord_loss, root_loss, chroma_loss, bass_loss\n\n def kl_loss(self, *dists):\n # kl = kl_with_normal(dists[0])\n kl_chd = kl_with_normal(dists[0])\n kl_rhy = kl_with_normal(dists[1])\n kl_loss = kl_chd + kl_rhy\n return kl_loss, kl_chd, kl_rhy\n\n def loss(self, x, c, pr_mat, dt_x, tfr1=0., tfr2=0., tfr3=0., beta=0.1, weights=(1, 0.5)):\n #print(pr_mat.shape, dt_x.shape)\n outputs = self.run(x, c, pr_mat, tfr1, tfr2, tfr3)\n loss = self.loss_function(x, c, *outputs, beta, weights)\n return loss\n\n # def inference(self, c, pr_mat):\n # self.eval()\n # with torch.no_grad():\n # dist_chd = self.chd_encoder(c)\n # # pr_mat = self.confuse_prmat(pr_mat)\n # dist_rhy = self.rhy_encoder(pr_mat)\n # z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], True)\n # dec_z = torch.cat([z_chd, z_rhy], dim=-1)\n # pitch_outs, dur_outs = self.decoder(dec_z, True, None,\n # None, 0., 0.)\n # est_x, _, _ = self.decoder.output_to_numpy(pitch_outs, dur_outs)\n # return est_x\n #\n # def swap(self, c1, c2, pr_mat1, pr_mat2, fix_rhy, fix_chd):\n # pr_mat = pr_mat1 if fix_rhy else pr_mat2\n # c = c1 if fix_chd else c2\n # est_x = self.inference(c, pr_mat)\n # return est_x\n\n def inference_encode(self, pr_mat, c):\n self.eval()\n with torch.no_grad():\n dist_chd = self.chd_encoder(c)\n dist_rhy = self.rhy_encoder(pr_mat)\n return dist_chd, dist_rhy\n\n def inference_decode(self, z_chd, z_rhy):\n self.eval()\n with torch.no_grad():\n dec_z = torch.cat([z_chd, z_rhy], dim=-1)\n pitch_outs, dur_outs = self.decoder(dec_z, True, None,\n None, 0., 0.)\n est_x, _, _ = self.decoder.output_to_numpy(pitch_outs, dur_outs)\n return est_x\n\n def inference(self, pr_mat, c, sample):\n self.eval()\n with torch.no_grad():\n dist_chd = self.chd_encoder(c)\n dist_rhy = self.rhy_encoder(pr_mat)\n z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], sample)\n dec_z = torch.cat([z_chd, z_rhy], dim=-1)\n pitch_outs, dur_outs = self.decoder(dec_z, True, None,\n None, 0., 0.)\n est_x, _, _ = self.decoder.output_to_numpy(pitch_outs, dur_outs)\n return est_x\n\n def swap(self, pr_mat1, pr_mat2, c1, c2, fix_rhy, fix_chd):\n pr_mat = pr_mat1 if fix_rhy else pr_mat2\n c = c1 if fix_chd else c2\n est_x = self.inference(pr_mat, c, sample=False)\n return est_x\n\n def posterior_sample(self, pr_mat, c, scale=None, sample_chd=True,\n sample_txt=True):\n if scale is None and sample_chd and sample_txt:\n est_x = self.inference(pr_mat, c, sample=True)\n else:\n dist_chd, dist_rhy = self.inference_encode(pr_mat, c)\n if scale is not None:\n mean_chd = dist_chd.mean\n mean_rhy = dist_rhy.mean\n # std_chd = torch.ones_like(dist_chd.mean) * scale\n # std_rhy = torch.ones_like(dist_rhy.mean) * scale\n std_chd = dist_chd.scale * scale\n std_rhy = dist_rhy.scale * scale\n dist_rhy = Normal(mean_rhy, std_rhy)\n dist_chd = Normal(mean_chd, std_chd)\n z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], True)\n if not sample_chd:\n z_chd = dist_chd.mean\n if not sample_txt:\n z_rhy = dist_rhy.mean\n est_x = self.inference_decode(z_chd, z_rhy)\n return est_x\n\n def prior_sample(self, x, c, sample_chd=False, sample_rhy=False,\n scale=1.):\n dist_chd, dist_rhy = self.inference_encode(x, c)\n mean = torch.zeros_like(dist_rhy.mean)\n loc = torch.ones_like(dist_rhy.mean) * scale\n if sample_chd:\n dist_chd = Normal(mean, loc)\n if sample_rhy:\n dist_rhy = Normal(mean, loc)\n z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], True)\n return self.inference_decode(z_chd, z_rhy)\n\n def gt_sample(self, x):\n out = x[:, :, 1:].numpy()\n return out\n\n def interp(self, pr_mat1, c1, pr_mat2, c2, interp_chd=False,\n interp_rhy=False, int_count=10):\n dist_chd1, dist_rhy1 = self.inference_encode(pr_mat1, c1)\n dist_chd2, dist_rhy2 = self.inference_encode(pr_mat2, c2)\n [z_chd1, z_rhy1, z_chd2, z_rhy2] = \\\n get_zs_from_dists([dist_chd1, dist_rhy1, dist_chd2, dist_rhy2],\n False)\n if interp_chd:\n z_chds = self.interp_z(z_chd1, z_chd2, int_count)\n else:\n z_chds = z_chd1.unsqueeze(1).repeat(1, int_count, 1)\n if interp_rhy:\n z_rhys = self.interp_z(z_rhy1, z_rhy2, int_count)\n else:\n z_rhys = z_rhy1.unsqueeze(1).repeat(1, int_count, 1)\n bs = z_chds.size(0)\n z_chds = z_chds.view(bs * int_count, -1).contiguous()\n z_rhys = z_rhys.view(bs * int_count, -1).contiguous()\n estxs = self.inference_decode(z_chds, z_rhys)\n return estxs.reshape((bs, int_count, 32, 15, -1))\n\n def interp_z(self, z1, z2, int_count=10):\n z1 = z1.numpy()\n z2 = z2.numpy()\n zs = torch.stack([self.interp_path(zz1, zz2, int_count)\n for zz1, zz2 in zip(z1, z2)], dim=0)\n return zs\n\n def interp_path(self, z1, z2, interpolation_count=10):\n result_shape = z1.shape\n z1 = z1.reshape(-1)\n z2 = z2.reshape(-1)\n\n def slerp2(p0, p1, t):\n omega = np.arccos(\n np.dot(p0 / np.linalg.norm(p0), p1 / np.linalg.norm(p1)))\n so = np.sin(omega)\n return np.sin((1.0 - t) * omega)[:, None] / so * p0[\n None] + np.sin(\n t * omega)[:, None] / so * p1[None]\n\n percentages = np.linspace(0.0, 1.0, interpolation_count)\n\n normalized_z1 = z1 / np.linalg.norm(z1)\n normalized_z2 = z2 / np.linalg.norm(z2)\n dirs = slerp2(normalized_z1, normalized_z2, percentages)\n length = np.linspace(np.log(np.linalg.norm(z1)),\n np.log(np.linalg.norm(z2)),\n interpolation_count)\n out = (dirs * np.exp(length[:, None])).reshape(\n [interpolation_count] + list(result_shape))\n # out = np.array([(1 - t) * z1 + t * z2 for t in percentages])\n return torch.from_numpy(out).to(self.device).float()\n\n @staticmethod\n def init_model(device=None, chd_size=256, txt_size=256, num_channel=10):\n name = 'disvae'\n if device is None:\n device = torch.device('cuda' if torch.cuda.is_available()\n else 'cpu')\n # chd_encoder = RnnEncoder(36, 1024, 256)\n chd_encoder = RnnEncoder(36, 1024, chd_size)\n # rhy_encoder = TextureEncoder(256, 1024, 256)\n rhy_encoder = TextureEncoder(256, 1024, txt_size, num_channel)\n # pt_encoder = PtvaeEncoder(device=device, z_size=152)\n # chd_decoder = RnnDecoder(z_dim=256)\n chd_decoder = RnnDecoder(z_dim=chd_size)\n # pt_decoder = PtvaeDecoder(note_embedding=None,\n # dec_dur_hid_size=64, z_size=512)\n pt_decoder = PtvaeDecoder(note_embedding=None,\n dec_dur_hid_size=64,\n z_size=chd_size + txt_size)\n\n model = DisentangleVAE(name, device, chd_encoder,\n rhy_encoder, pt_decoder, chd_decoder)\n return model" }, { "identifier": "find_by_length", "path": "piano_arranger/AccoMontage.py", "snippet": "def find_by_length(melody_data, acc_data, chord_data, velocity_data, cc_data, length):\n \"\"\"Search from POP909 phrase data for a certain phrase length.\"\"\"\n melody_record = []\n acc_record = []\n chord_record = []\n velocity_record = []\n cc_record = []\n song_reference = []\n for song_idx in range(acc_data.shape[0]):\n for phrase_idx in range(len(acc_data[song_idx])):\n melody = melody_data[song_idx][phrase_idx]\n if not melody.shape[0] == length * 16:\n continue\n if np.sum(melody[:, :128]) <= 2:\n continue\n melody_record.append(melody)\n acc = acc_data[song_idx][phrase_idx]\n acc_record.append(acc)\n chord = chord_data[song_idx][phrase_idx]\n chord_record.append(chord)\n velocity = velocity_data[song_idx][phrase_idx]\n velocity_record.append(velocity)\n cc = cc_data[song_idx][phrase_idx]\n cc_record.append(cc)\n song_reference.append((song_idx, phrase_idx))\n return np.array(melody_record), np.array(acc_record), np.array(chord_record), np.array(velocity_record), np.array(cc_record), song_reference" }, { "identifier": "dp_search", "path": "piano_arranger/AccoMontage.py", "snippet": "def dp_search(query_phrases, seg_query, acc_pool, edge_weights, texture_filter=None, filter_id=None, spotlights=None, randomness=0):\n \"\"\"Search for texture donors based on dynamic programming.\n * query_phrases: lead sheet in segmented phrases. Shape of each phrase: (T, 142), quantized at 1/4-beat level. This format is defined in R. Yang et al., \"Deep music analogy via latent representation disentanglement,\" ISMIR 2019.\n * seg_query: phrase annotation for the lead sheet. Format of each phrase: (label, length, start). For example, seg_query=[('A', 8, 0), ('A', 8, 8), ('B', 4, 16)].\n * acc_pool: search space for piano texture donors.\n * edge_weights: pre-computed transition scores for texture donor i to i+1.\n * texture_filter: filter on voice number (VN) and rhythmic density (RD).\n * filter_id: specified VN abd RD to filter for the first phrase.\n * spotlights: specified a preference for certain songs and/or artists for the search process.\n * randomness: degree of randomness tobe introduced to the search process.\n \"\"\"\n seg_query = [item[0] + str(item[1]) for item in seg_query] #['A8', 'A8', 'B8', 'B8']\n #Searching for phrase 1\n query_length = [query_phrases[i].shape[0]//16 for i in range(len(query_phrases))]\n mel, acc, chord, _, _, song_ref = acc_pool[query_length[0]]\n mel_set = mel\n rhy_set = np.concatenate((np.sum(mel_set[:, :, :128], axis=-1, keepdims=True), mel_set[:, :, 128: 130]), axis=-1)\n query_rhy = np.concatenate((np.sum(query_phrases[0][:, : 128], axis=-1, keepdims=True), query_phrases[0][:, 128: 130]), axis=-1)[np.newaxis, :, :]\n rhythm_result = cosine_rhy(query_rhy+1e-5, rhy_set+1e-5)\n\n chord_set = chord\n chord_set, num_total, shift_const = chord_shift(chord_set)\n chord_set_TIV = computeTIV(chord_set)\n query_chord = query_phrases[0][:, 130:][::4]\n query_chord_TIV = computeTIV(query_chord)[np.newaxis, :, :]\n chord_score, arg_chord = cosine(query_chord_TIV, chord_set_TIV)\n\n score = .5*rhythm_result + .5*chord_score\n score += randomness * np.random.normal(0, 1, size=len(score)) #to introduce some randomness\n if spotlights is not None:\n for spot_idx in spotlights:\n for ref_idx, ref_item in enumerate(song_ref):\n if ref_item[0] == spot_idx: \n score[ref_idx] += 1\n if filter_id is not None:\n mask = texture_filter[query_length[0]][0][filter_id[0]] * texture_filter[query_length[0]][1][filter_id[1]] - 1\n score += mask\n\n path = [[(i, score[i])] for i in range(acc.shape[0])]\n shift = [[shift_const[i]] for i in arg_chord]\n melody_record = np.argmax(mel_set, axis=-1)\n record = []\n\n #Searching for phrase 2, 3, ...\n for i in tqdm(range(1, len(query_length))):\n mel, acc, chord, _, _, song_ref = acc_pool[query_length[i]]\n weight_key = f\"l_{str(query_length[i-1]).zfill(2)}_{str(query_length[i]).zfill(2)}\"\n contras_result = edge_weights[weight_key]\n if query_length[i-1] == query_length[i]:\n for j in range(contras_result.shape[0]):\n contras_result[j, j] = -1 #the ith phrase does not transition to itself at i+1\n for k in range(j-1, -1, -1):\n if song_ref[k][0] != song_ref[j][0]:\n break\n contras_result[j, k] = -1 #ith phrase does not transition to its ancestors in the same song.\n if i > 1:\n contras_result = contras_result[[item[-1][1] for item in record]]\n if spotlights is not None:\n for spot_idx in spotlights:\n for ref_idx, ref_item in enumerate(song_ref):\n if ref_item[0] == spot_idx:\n contras_result[:, ref_idx] += 1\n mel_set = mel\n rhy_set = np.concatenate((np.sum(mel_set[:, :, :128], axis=-1, keepdims=True), mel_set[:, :, 128: 130]), axis=-1)\n query_rhy = np.concatenate((np.sum(query_phrases[i][:, : 128], axis=-1, keepdims=True), query_phrases[i][:, 128: 130]), axis=-1)[np.newaxis, :, :]\n rhythm_result = cosine_rhy(query_rhy, rhy_set)\n chord_set = chord\n chord_set, num_total, shift_const = chord_shift(chord_set)\n chord_set_TIV = computeTIV(chord_set)\n query_chord = query_phrases[i][:, 130:][::4]\n query_chord_TIV = computeTIV(query_chord)[np.newaxis, :, :]\n chord_score, arg_chord = cosine(query_chord_TIV, chord_set_TIV)\n sim_this_layer = .5*rhythm_result + .5*chord_score\n sim_this_layer += randomness * np.random.normal(0, 1, size=len(sim_this_layer))\n if spotlights is not None:\n for spot_idx in spotlights:\n for ref_idx, ref_item in enumerate(song_ref):\n if ref_item[0] == spot_idx: \n sim_this_layer[ref_idx] += 1\n score_this_layer = .7*contras_result + .3*np.tile(sim_this_layer[np.newaxis, :], (contras_result.shape[0], 1)) + np.tile(score[:, np.newaxis], (1, contras_result.shape[1]))\n melody_flat = np.argmax(mel_set, axis=-1)\n if seg_query[i] == seg_query[i-1]:\n melody_pre = melody_record\n matrix = np.matmul(melody_pre, np.transpose(melody_flat, (1, 0))) / (np.linalg.norm(melody_pre, axis=-1)[:, np.newaxis]*(np.linalg.norm(np.transpose(melody_flat, (1, 0)), axis=0))[np.newaxis, :])\n if i == 1:\n for k in range(matrix.shape[1]):\n matrix[k, :k] = -1\n else:\n for k in range(len(record)):\n matrix[k, :record[k][-1][1]] = -1\n matrix = (matrix > 0.99) * 1.\n score_this_layer += matrix\n topk = 1\n args = np.argsort(score_this_layer, axis=0)[::-1, :][:topk, :]\n record = []\n for j in range(args.shape[-1]):\n for k in range(args.shape[0]):\n record.append((score_this_layer[args[k, j], j], (args[k, j], j)))\n shift_this_layer = [[shift_const[k]] for k in arg_chord]\n new_path = [path[item[-1][0]] + [(item[-1][1], sim_this_layer[item[-1][1]])] for item in record]\n new_shift = [shift[item[-1][0]] + shift_this_layer[item[-1][1]] for item in record]\n melody_record = melody_flat[[item[-1][1] for item in record]]\n path = new_path\n shift = new_shift\n score = np.array([item[0] for item in record])\n\n arg = score.argsort()[::-1]\n return [path[arg[i]] for i in range(topk)], [shift[arg[i]] for i in range(topk)]" }, { "identifier": "re_harmonization", "path": "piano_arranger/AccoMontage.py", "snippet": "def re_harmonization(lead_sheet, chord_table, query_phrases, indices, shifts, acc_pool, model, get_est=True, tempo=120):\n \"\"\"Re-harmonize the accompaniment texture donors and save in MIDI.\n * lead_sheet: the conditional lead sheet. Its melody track will be taken. Shape: (T, 142), quantized at 1-beat level. This format is defined in R. Yang et al., \"Deep music analogy via latent representation disentanglement,\" ISMIR 2019.\n * chord_table: the conditional chord progression from the lead sheet. Shape: (T', 36), quantized at 1-beat level. This format is defined in Z. Wang et al., \"Learning interpretable representation for controllable polyphonic music generation,\" ISMIR 2020.\n * seg_query: phrase annotation for the lead sheet. Format of each phrase: (label, length, start). For example, seg_query=[('A', 8, 0), ('A', 8, 8), ('B', 4, 16)].\n * indices: the indices of selected texture donor phrases in the acc_pool.\n * shifts: pitch transposition of each selected phrase.\n * acc_pool: search space for piano texture donors.\n * tempo: the tempo to render the piece.\n \"\"\"\n acc_roll = np.empty((0, 128))\n vel_roll = []\n phrase_mean_vel = []\n cc_roll = np.empty((0, 128))\n #retrive texture donor data of the corrresponding indices from the acc_pool\n for i, idx in enumerate(indices):\n length = query_phrases[i][-2]\n shift = shifts[i]\n # notes\n acc_matrix = np.roll(acc_pool[length][1][idx[0]], shift, axis=-1)\n acc_roll = np.concatenate((acc_roll, acc_matrix), axis=0)\n #MIDI velocity\n vel_matrix = np.roll(acc_pool[length][3][idx[0]], shift, axis=-1)\n phrase_mean_vel.append(np.mean(np.ma.masked_equal(vel_matrix, value=0)))\n vel_roll.append(vel_matrix)\n #MIDI control messages (mainly for pedals)\n cc_matrix = acc_pool[length][4][idx[0]]\n cc_roll = np.concatenate((cc_roll, cc_matrix), axis=0)\n # normalize the scale of velocity across different retrieved phrases\n global_mean_vel = np.mean(np.ma.masked_equal(np.concatenate(vel_roll, axis=0), value=0))\n for i in range(len(vel_roll)):\n vel_roll[i][vel_roll[i] > 0] += (global_mean_vel - phrase_mean_vel[i])\n vel_roll = np.concatenate(vel_roll, axis=0)\n #re-harmonization\n if len(acc_roll) % 32 != 0:\n pad_len = (len(acc_roll)//32+1)*32 - len(acc_roll)\n acc_roll = np.pad(acc_roll, ((0, pad_len), (0, 0)))\n vel_roll = np.pad(vel_roll, ((0, pad_len), (0, 0)))\n cc_roll = np.pad(cc_roll, ((0, pad_len), (0, 0)), mode='constant', constant_values=-1)\n chord_table = np.pad(chord_table, ((0, pad_len//4), (0, 0)))\n chord_table[-pad_len:, 0] = -1\n chord_table[-pad_len:, -1] = -1\n acc_roll = acc_roll.reshape(-1, 32, 128)\n chord_table = chord_table.reshape(-1, 8, 36)\n acc_roll = torch.from_numpy(acc_roll).float().cuda()\n acc_roll = torch.clip(acc_roll, min=0, max=31)\n gt_chord = torch.from_numpy(chord_table).float().cuda()\n est_x = model.inference(acc_roll, gt_chord, sample=False)\n acc_roll = cvt.grid2pr(est_x.reshape(-1, 15, 6))\n #interpolate MIDI velocity\n adapt_vel_roll = np.zeros(vel_roll.shape)\n masked_dyn_matrix = np.ma.masked_equal(vel_roll, value=0)\n mean = np.mean(masked_dyn_matrix, axis=-1)\n onsets = np.nonzero(mean.data)\n dynamic = mean.data[onsets]\n onsets = onsets[0].tolist()\n dynamic = dynamic.tolist()\n if not 0 in onsets:\n onsets = [0] + onsets\n dynamic = [dynamic[0]] + dynamic\n if not len(vel_roll)-1 in onsets:\n onsets = onsets + [len(vel_roll)-1]\n dynamic = dynamic + [dynamic[-1]]\n dyn_curve = interp1d(onsets, dynamic)\n for t, p in zip(*np.nonzero(acc_roll)):\n adapt_vel_roll[t, p] = dyn_curve(t)\n adapt_vel_roll = np.clip(adapt_vel_roll, a_min=0, a_max=127)\n #reconstruct MIDI\n accompaniment = np.stack([acc_roll, adapt_vel_roll, cc_roll], axis=-1)[np.newaxis, :, :, :]\n midi_recon = cvt.matrix2midi_with_dynamics(accompaniment, programs=[0], init_tempo=tempo)\n melody_track = cvt.melody_matrix2data(melody_matrix=lead_sheet[:, :130], tempo=tempo)\n midi_recon.instruments = [melody_track] + midi_recon.instruments\n if get_est:\n return midi_recon, est_x\n else:\n return midi_recon" }, { "identifier": "get_texture_filter", "path": "piano_arranger/AccoMontage.py", "snippet": "def get_texture_filter(acc_pool):\n \"\"\"Divide accompaniment texture donors into fifths in terms of voice number (VN) and rhythmic density (RD).\"\"\"\n texture_filter = {}\n for key in acc_pool:\n acc_track = acc_pool[key][1]\n # CALCULATE HORIZONTAL DENSITY (rhythmic density)\n onset_positions = (np.sum(acc_track, axis=-1) > 0) * 1.\n HD = np.sum(onset_positions, axis=-1) / acc_track.shape[1] #(N)\n # CALCULATE VERTICAL DENSITY (voice number)\n beat_positions = acc_track[:, ::4, :]\n downbeat_positions = acc_track[:, ::16, :]\n upbeat_positions = acc_track[:, 2::4, :]\n\n simu_notes_on_beats = np.sum((beat_positions > 0) * 1., axis=-1) #N*T\n simu_notes_on_downbeats = np.sum((downbeat_positions > 0) * 1., axis=-1)\n simu_notes_on_upbeats = np.sum((upbeat_positions > 0) * 1., axis=-1)\n\n VD_beat = np.sum(simu_notes_on_beats, axis=-1) / (np.sum((simu_notes_on_beats > 0) * 1., axis=-1) + 1e-10)\n VD_upbeat = np.sum(simu_notes_on_upbeats, axis=-1) / (np.sum((simu_notes_on_upbeats > 0) * 1., axis=-1) + 1e-10)\n\n VD = np.max(np.stack((VD_beat, VD_upbeat), axis=-1), axis=-1)\n #get five-equal-divident-points of HD\n dst = np.sort(HD)\n HD_anchors = [dst[len(dst) // 5], dst[len(dst) // 5 * 2], dst[len(dst) // 5 * 3], dst[len(dst) // 5 * 4]]\n HD_Bins = [\n HD < HD_anchors[0],\n (HD >= HD_anchors[0]) * (HD < HD_anchors[1]),\n (HD >= HD_anchors[1]) * (HD < HD_anchors[2]),\n (HD >= HD_anchors[2]) * (HD < HD_anchors[3]),\n HD >= HD_anchors[3]\n ]\n #get five-equal-divident-points of VD\n dst = np.sort(VD)\n VD_anchors = [dst[len(dst) // 5], dst[len(dst) // 5 * 2], dst[len(dst) // 5 * 3], dst[len(dst) // 5 * 4]]\n VD_Bins = [\n VD < VD_anchors[0],\n (VD >= VD_anchors[0]) * (VD < VD_anchors[1]),\n (VD >= VD_anchors[1]) * (VD < VD_anchors[2]),\n (VD >= VD_anchors[2]) * (VD < VD_anchors[3]),\n VD >= VD_anchors[3]\n ]\n texture_filter[key] = (HD_Bins, VD_Bins) #((5, N), (5, N))\n return texture_filter" }, { "identifier": "ref_spotlight", "path": "piano_arranger/AccoMontage.py", "snippet": "def ref_spotlight(ref_name_list, reference_check):\n \"\"\"convert spotlight song/artist names into the indices of corresponding pieces in the dataset.\"\"\"\n if ref_name_list is None:\n return None\n check_idx = []\n #POP909 song_id\n for name in ref_name_list:\n line = reference_check[reference_check.song_id == name]\n if not line.empty:\n check_idx.append(line.index)#read by pd, neglect first row, index starts from 0.\n #song name\n for name in ref_name_list:\n line = reference_check[reference_check.name == name]\n if not line.empty:\n check_idx.append(line.index)#read by pd, neglect first row, index starts from 0.\n #artist name\n for name in ref_name_list:\n line = reference_check[reference_check.artist == name]\n if not line.empty:\n check_idx += list(line.index)#read by pd, neglect first row, index starts from 0\n return check_idx" }, { "identifier": "Slakh2100_Pop909_Dataset", "path": "orchestrator/QA_dataset.py", "snippet": "class Slakh2100_Pop909_Dataset(Dataset):\n def __init__(self, slakh_dir, pop909_dir, sample_len=SAMPLE_LEN, hop_len=BAR_HOP_LEN, debug_mode=False, split='train', mode='train', with_dynamics=False, merge_pop909=0):\n super(Slakh2100_Pop909_Dataset, self).__init__()\n self.split = split\n self.mode = mode\n self.debug_mode = debug_mode\n\n self.with_dynamics = with_dynamics\n self.merge_pop909 = merge_pop909\n\n self.memory = dict({'tracks': [],\n 'programs': [],\n 'dynamics': [],\n 'dir': []\n })\n self.anchor_list = []\n self.sample_len = sample_len\n \n if slakh_dir is not None:\n print('loading Slakh2100 Dataset ...')\n self.load_data(slakh_dir, sample_len, hop_len)\n if pop909_dir is not None:\n print('loading Pop909 Dataset ...')\n self.load_data(pop909_dir, sample_len, hop_len)\n\n def __len__(self):\n return len(self.anchor_list)\n \n def __getitem__(self, idx):\n song_id, start = self.anchor_list[idx]\n\n if self.mode == 'train': \n tracks_sample = self.memory['tracks'][song_id][:, start: start+self.sample_len]\n program_sample = self.memory['programs'][song_id]\n #delete empty tracks if any\n non_empty = np.nonzero(np.sum(tracks_sample, axis=(1, 2)))[0]\n tracks_sample = tracks_sample[non_empty]\n program_sample = program_sample[non_empty]\n\n elif (self.mode == 'test') or (self.mode == 'inference'): \n tracks_sample = self.memory['tracks'][song_id][:, start:]\n program_sample = self.memory['programs'][song_id]\n\n if ((len(program_sample) <= 3) and (program_sample == 0).all()):\n #merge pop909 into a single piano track at certain probability\n if np.random.rand() < self.merge_pop909: \n tracks_sample = np.max(tracks_sample, axis=0, keepdims=True)\n program_sample = np.array([0])\n\n if self.with_dynamics:\n dynamics = self.memory['dynamics'][song_id][:, start: start+self.sample_len]\n else: \n dynamics = None\n \n return tracks_sample, program_sample, dynamics, self.memory['dir'][song_id]\n\n\n def slakh_program_mapping(self, programs):\n return np.array([EMBED_PROGRAM_MAPPING[SLAKH_PROGRAM_MAPPING[program]] for program in programs])\n\n\n def load_data(self, data_dir, sample_len, hop_len):\n song_list = [os.path.join(data_dir, self.split, item) for item in os.listdir(os.path.join(data_dir, self.split))]\n if self.debug_mode:\n song_list = song_list[: 10]\n for song_dir in tqdm(song_list):\n song_data = np.load(song_dir)\n tracks = song_data['tracks'] #(n_track, time, 128)\n if 'programs' in song_data:\n programs = song_data['programs'] #(n_track, )\n else:\n programs = np.array([0]*len(tracks))\n\n center_pitch = compute_center_pitch(tracks)\n pitch_sort = np.argsort(center_pitch)[::-1]\n tracks = tracks[pitch_sort]\n programs = programs[pitch_sort]\n\n \"\"\"clipping\"\"\" \n if self.mode == 'train':\n if self.split =='validation':\n # during model training, no overlapping for validation set\n for i in range(0, tracks.shape[1], sample_len):\n if i + sample_len >= tracks.shape[1]:\n break\n self.anchor_list.append((len(self.memory['tracks']), i)) #(song_id, start, total_length)\n else:\n # otherwise, hop size is 1-bar\n downbeats = np.nonzero(song_data['db_indicator'])[0]\n for i in range(0, len(downbeats), hop_len):\n if downbeats[i] + sample_len >= tracks.shape[1]:\n break\n self.anchor_list.append((len(self.memory['tracks']), downbeats[i])) #(song_id, start)\n\n elif (self.mode == 'test') or (self.mode == 'inference'):\n start = np.nonzero(song_data['db_indicator'])[0][0]\n end = start + (tracks.shape[1] - start) // sample_len * sample_len\n if end < tracks.shape[1]:\n pad_len = end + sample_len - tracks.shape[1]\n end += sample_len\n tracks = np.pad(tracks, ((0, 0), (0, pad_len), (0, 0)), mode='constant', constant_values=(0,))\n tracks = tracks[:, start: end]\n self.anchor_list.append((len(self.memory['tracks']), start))\n\n self.memory['tracks'].append(tracks)\n self.memory['programs'].append(self.slakh_program_mapping(programs))\n self.memory['dir'].append(song_dir)\n\n if self.with_dynamics:\n self.memory['dynamics'].append(song_data['dynamics'])" }, { "identifier": "collate_fn", "path": "orchestrator/QA_dataset.py", "snippet": "def collate_fn(batch, device, pitch_shift=True):\n #print(batch)\n max_tracks = max([max(len(item[0]), 1) for item in batch])\n\n tracks = [] \n mixture = []\n instrument = []\n aux_feature = []\n mask = [] #track-wise pad mask\n function = []\n\n if pitch_shift:\n aug_p = AUG_P / AUG_P.sum()\n aug_shift = np.random.choice(np.arange(-6, 6), 1, p=aug_p)[0]\n else:\n aug_shift = 0\n\n for pr, programs, _, _ in batch:\n pr = pr_mat_pitch_shift(pr, aug_shift)\n aux, _, func = compute_pr_feat(pr)\n mask.append([0]*len(pr) + [1]*(max_tracks-len(pr)))\n\n pr = np.pad(pr, ((0, max_tracks-len(pr)), (0, 0), (0, 0)), mode='constant', constant_values=(0,))\n programs = np.pad(programs, (0, max_tracks-len(programs)), mode='constant', constant_values=(NUM_INSTR_CLASS,))\n aux = np.pad(aux, ((0, max_tracks-len(aux)), (0, 0), (0, 0)), mode='constant', constant_values=(0,))\n func = np.pad(func, ((0, max_tracks-len(func)), (0, 0)), mode='constant', constant_values=(0,))\n\n mix = pr2grid(np.max(pr, axis=0), max_note_count=32)\n grid = np.array([pr2grid(matrix) for matrix in pr])\n\n tracks.append(grid)\n mixture.append(mix)\n instrument.append(programs)\n aux_feature.append(aux)\n function.append(func)\n\n return torch.from_numpy(np.array(mixture)).long().to(device), \\\n torch.from_numpy(np.array(instrument)).to(device), \\\n torch.from_numpy(np.array(function)).float().to(device),\\\n torch.from_numpy(np.array(tracks)).long().to(device), \\\n torch.from_numpy(np.array(aux_feature)).float().to(device), \\\n torch.BoolTensor(mask).to(device)" }, { "identifier": "compute_pr_feat", "path": "orchestrator/QA_dataset.py", "snippet": "def compute_pr_feat(pr):\n #pr: (track, time, 128)\n onset = (np.sum(pr, axis=-1) > 0) * 1. #(track, time)\n rhy_intensity = np.clip(np.sum((pr > 0) * 1., axis=-1) / 14, a_min=None, a_max=1) #(track, time)\n\n weight = np.sum(pr, axis=-1)\n weight[weight==0] = 1\n pitch_center = np.sum(np.arange(0, 128)[np.newaxis, np.newaxis, :] * pr, axis=-1) / weight / 128\n\n feature = np.stack((onset, rhy_intensity, pitch_center), axis=-1)\n\n func_pitch = np.sum((pr > 0) * 1., axis=-2) / 32\n\n func_time = rhy_intensity.copy()\n \n return feature, func_pitch, func_time" }, { "identifier": "EMBED_PROGRAM_MAPPING", "path": "orchestrator/QA_dataset.py", "snippet": "EMBED_PROGRAM_MAPPING = dict({\n 0: 0, 4: 1, 8: 2, 16: 3, 24: 4, 26: 5, 29: 6, 32: 7,\\\n 33: 8, 40: 9, 41: 10, 42: 11, 43: 12, 46: 13, 47: 14, 48: 15,\\\n 50: 16, 52: 17, 55: 18, 56: 19, 57: 20, 58: 21, 60: 22, 61: 23, \n 64: 24, 66: 25, 67: 26, 68: 27, 69: 28, 70: 29, 71: 30, 72: 31,\\\n 80: 32, 88: 33})" }, { "identifier": "Prior", "path": "orchestrator/prior_model.py", "snippet": "class Prior(nn.Module):\n def __init__(self, mixture_encoder=None,\n function_encoder=None,\n context_enc_layer=12, \n function_dec_layer=12, \n d_model=256, \n nhead=8, \n dim_feedforward=1024, \n dropout=.1, \n function_resolution=8,\n inference=False,\n QA_model=None,\n DEVICE='cuda:0'):\n super(Prior, self).__init__()\n\n # embeddings\n self.func_embedding = nn.Embedding(num_embeddings=NUM_TIME_CODE+1, embedding_dim=d_model, padding_idx=NUM_TIME_CODE)\n self.prog_embedding = nn.Embedding(num_embeddings=NUM_INSTR_CLASS+1, embedding_dim=d_model, padding_idx=NUM_INSTR_CLASS)\n self.total_len_embedding = nn.Embedding(num_embeddings=len(TOTAL_LEN_BIN)+1, embedding_dim=d_model, padding_idx=len(TOTAL_LEN_BIN))\n self.abs_pos_embedding = nn.Embedding(num_embeddings=len(ABS_POS_BIN)+1, embedding_dim=d_model, padding_idx=len(ABS_POS_BIN))\n self.rel_pos_embedding = nn.Embedding(num_embeddings=len(REL_POS_BIN)+1, embedding_dim=d_model, padding_idx=len(REL_POS_BIN))\n\n self.start_embedding = nn.Parameter(torch.empty(NUM_INSTR_CLASS+1, d_model))\n nn.init.normal_(self.start_embedding)\n with torch.no_grad():\n self.start_embedding[NUM_INSTR_CLASS].fill_(0)\n\n #pre-trained encoders\n if not inference:\n self.mixture_encoder = mixture_encoder\n for param in self.mixture_encoder.parameters():\n param.requires_grad = False\n self.function_encoder = function_encoder\n for param in self.function_encoder.parameters():\n param.requires_grad = False\n else:\n self.QA_model = QA_model\n self.mixture_encoder = self.QA_model.mixture_enc\n self.function_encoder = self.QA_model.function_enc\n\n \n self.context_enc = nn.TransformerEncoder(\n nn.TransformerEncoderLayer(d_model=d_model, \n nhead=nhead, \n dim_feedforward=dim_feedforward, \n dropout=dropout, \n activation=F.gelu, \n batch_first=True, \n norm_first=True,\n device=DEVICE),\n num_layers=context_enc_layer)\n #multi-track Transformer\n self.mt_trf = nn.ModuleDict({})\n for layer in range(function_dec_layer):\n self.mt_trf[f'track_layer_{layer}'] = TransformerEncoderLayerRPE(d_model=d_model, \n nhead=nhead, \n dim_feedforward=dim_feedforward, \n dropout=dropout, \n norm_first=True,\n max_len=18).to(DEVICE)\n self.mt_trf[f'time_layer_{layer}'] = nn.TransformerDecoderLayer(d_model=d_model, \n nhead=nhead, \n dim_feedforward=dim_feedforward, \n dropout=dropout, \n activation=F.gelu, \n batch_first=True, \n norm_first=True,\n device=DEVICE)\n \n #positional encoding\n self.max_len = 1000\n position = torch.arange(self.max_len).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))\n pe = torch.zeros(1, self.max_len, d_model)\n pe[0, :, 0::2] = torch.sin(position * div_term)\n pe[0, :, 1::2] = torch.cos(position * div_term)\n pe = pe.to(DEVICE)\n self.register_buffer('pe', pe)\n \n #decoder output module \n self.func_out_linear = nn.Linear(d_model, NUM_TIME_CODE)\n\n #constants\n self.d_model = d_model\n self.function_dec_layer = function_dec_layer\n self.func_res = function_resolution\n\n #loss function\n self.criterion = nn.CrossEntropyLoss(reduction='mean')\n\n\n def generate_square_subsequent_mask(self, sz=15):\n return torch.triu(torch.ones(sz, sz), diagonal=1).bool()\n\n\n def func_get_next_token(self, token, gt=None):\n #token: (batch, codebook_size)\n #gt: (bs,)\n if gt is None:\n idx = token.max(-1)[1]\n else:\n idx = gt\n token = torch.zeros_like(token, device=token.device)\n arange = torch.arange(token.shape[0], device=token.device).long()\n token[arange, idx] = 1\n return token.unsqueeze(1) #one-hot shaoe (batch, 1, ft_codebook_size)\n\n \n\n\n def run(self, mix, prog, function, tm_mask, tk_mask, total_len, abs_pos, rel_pos, inference=False):\n #mix: (batch, max_time, 256)\n #prog: (batch, max_track)\n #function: (batch, max_time, max_track, 8)\n #tm_mask: (batch, max_time)\n #tk_mask: (batch, max_track)\n #total_len: (batch, max_time)\n #abs_pos: (batch, max_time)\n #rel_pos: (batch, max_time)\n batch, max_time, _ = mix.shape\n _, max_track = prog.shape\n \n mix = mix + self.pe[:, :self.func_res*mix.shape[1], :][:, ::self.func_res]\n mix = mix + self.total_len_embedding(total_len)\n mix = mix + self.abs_pos_embedding(abs_pos)\n mix = mix + self.rel_pos_embedding(rel_pos)\n \n mix = self.context_enc(mix) #(batch, max_time, 256)\n mix = mix.unsqueeze(1) + self.prog_embedding(prog).unsqueeze(2) #(batch, max_track, max_time, 256)\n mix = mix.reshape(-1, max_time, self.d_model)\n\n function = function.permute(0, 1, 3, 2).reshape(batch, -1, max_track)\n func = self.func_embedding(function)#(batch, 8*max_time, max_track, d_model)\n \n func = torch.cat([\n self.start_embedding[prog].unsqueeze(1), #(batch, 1, max_track, d_model)\n func[:, :-1]], \n dim=1) #batch, 8*max_time, max_track, d_model\n\n func = func + self.prog_embedding(prog).unsqueeze(1) \n\n func = func + self.pe[:, :func.shape[1], :].unsqueeze(2)\n func = func + self.total_len_embedding(total_len).repeat_interleave(self.func_res, dim=1).unsqueeze(2)\n func = func + self.abs_pos_embedding(abs_pos).repeat_interleave(self.func_res, dim=1).unsqueeze(2)\n func = func + self.rel_pos_embedding(rel_pos).repeat_interleave(self.func_res, dim=1).unsqueeze(2)\n\n for layer in range(self.function_dec_layer):\n func = func.reshape(-1, max_track, self.d_model)\n func = self.mt_trf[f'track_layer_{layer}'](src=func, \n src_key_padding_mask=tk_mask.unsqueeze(1).repeat(1, self.func_res*max_time, 1).reshape(-1, max_track))\n func = func.reshape(batch, -1, max_track, self.d_model).permute(0, 2, 1, 3).reshape(-1, self.func_res*max_time, self.d_model)\n func = self.mt_trf[f'time_layer_{layer}'](tgt=func,\n tgt_mask=self.generate_square_subsequent_mask(self.func_res*max_time).to(func.device),\n tgt_key_padding_mask=tm_mask.unsqueeze(1).repeat(1, max_track, 1).reshape(-1, max_time).repeat_interleave(self.func_res, dim=-1),\n memory=mix) \n func = func.reshape(batch, max_track, -1, self.d_model).permute(0, 2, 1, 3) #(batch, 8*max_time, max_track, d_model)\n\n function_recon = self.func_out_linear(func)\n\n return function_recon, function\n\n \n\n def loss_function(self, function_recon, function_gt, tm_mask, tk_mask):\n\n mask = torch.logical_or(tm_mask.repeat_interleave(8, dim=-1).unsqueeze(-1), tk_mask.unsqueeze(1)) #(batch, 8*max_time, track) \n unmask = torch.logical_not(mask)\n\n function_loss = self.criterion(function_recon[unmask].reshape(-1, NUM_TIME_CODE), \n function_gt[unmask].reshape(-1))\n return function_loss\n \n\n def loss(self, mix, prog, function, tm_mask, tk_mask, total_len, abs_pos, rel_pos):\n output = self.run(mix, prog, function, tm_mask, tk_mask, total_len, abs_pos, rel_pos, inference=False)\n return self.loss_function(*output, tm_mask, tk_mask)\n \n\n def forward(self, mode, *input, **kwargs):\n if mode in [\"run\", 0]:\n return self.run(*input, **kwargs)\n elif mode in ['loss', 'train', 1]:\n return self.loss(*input, **kwargs)\n elif mode in ['inference', 'eval', 'val', 2]:\n return self.inference(*input, **kwargs)\n else:\n raise NotImplementedError\n\n\n def run_autoregressive_greedy(self, mix, prog, function, total_len, abs_pos, rel_pos, blur=.5):\n #mix: (batch, num2bar, bar_resolution, max_simu_note, 6)\n #prog: (batch, max_track)\n #function: (batch, 1, max_track, 32)\n #total_len: (batch, num2bar)\n #abs_pos: (batch, num2bar)\n #rel_pos: (batch, num2bar)\n batch, num_2bar, time, max_simu_note, _ = mix.shape\n _, max_track = prog.shape\n\n mix = mix.reshape(-1, time, max_simu_note, 6)\n mix = self.mixture_encoder(mix)[0].mean.reshape(batch, num_2bar, -1) #(batch, num_2bar, 256)\n mix_ = (1-blur)*mix.clone() + blur*torch.empty(mix.shape, device=mix.device).normal_(mean=0, std=1) \n \n mix_ = mix_ + self.pe[:, :self.func_res*mix.shape[1], :][:, ::self.func_res]\n mix_ = mix_ + self.total_len_embedding(total_len)\n mix_ = mix_ + self.abs_pos_embedding(abs_pos)\n mix_ = mix_ + self.rel_pos_embedding(rel_pos)\n\n mix_ = self.context_enc(mix_) #(batch, num_bar, 256)\n mix_ = mix_.unsqueeze(1) + self.prog_embedding(prog).unsqueeze(2) #(batch, max_track, num_bar, 256)\n mix_ = mix_.reshape(-1, num_2bar, self.d_model)\n \n function = function.reshape(-1, 32)\n function = self.function_encoder.get_code_indices(function).reshape(batch, max_track, self.func_res)\n\n\n for idx in range(self.func_res, self.func_res*num_2bar):\n func = self.func_embedding(function) #*batch, max_track, 8, d_model\n func = func.permute(0, 2, 1, 3).reshape(batch, -1, max_track, self.d_model)\n\n func = func + self.prog_embedding(prog).unsqueeze(1)\n func = func + self.pe[:, :func.shape[1], :].unsqueeze(2)\n\n func = func + self.total_len_embedding(total_len).repeat_interleave(self.func_res, dim=1)[:, :func.shape[1]].unsqueeze(2)\n func = func + self.abs_pos_embedding(abs_pos).repeat_interleave(self.func_res, dim=1)[:, :func.shape[1]].unsqueeze(2)\n func = func + self.rel_pos_embedding(rel_pos).repeat_interleave(self.func_res, dim=1)[:, :func.shape[1]].unsqueeze(2)\n\n for layer in range(self.function_dec_layer):\n \n func = func.reshape(-1, max_track, self.d_model)\n func = self.mt_trf[f'track_layer_{layer}'](src=func)\n func = func.reshape(batch, -1, max_track, self.d_model).permute(0, 2, 1, 3).reshape(-1, idx, self.d_model)\n func = self.mt_trf[f'time_layer_{layer}'](tgt=func,\n tgt_mask=self.generate_square_subsequent_mask(sz=idx).to(func.device),\n memory=mix_) \n func = func.reshape(batch, max_track, -1, self.d_model).permute(0, 2, 1, 3) #(batch, num2bar-1, max_track, d_model)\n\n \n func_pred = self.func_out_linear(func[:, -1,]).max(-1)[1].unsqueeze(-1)\n\n function = torch.cat([function, func_pred], dim=-1)\n if function.shape[1] == self.func_res*num_2bar:\n break\n \n function = function.reshape(batch, max_track, num_2bar, self.func_res).permute(0, 2, 1, 3)\n z_func = self.function_encoder.infer_by_codes(function)\n return self.QA_model.infer_with_function_codes(mix[0], prog[0].repeat(num_2bar, 1), z_func[0])\n \n\n def run_autoregressive_nucleus(self, mix, prog, func_prompt, total_len, abs_pos, rel_pos, blur=.5, p=.1, t=1):\n #mix: (batch, num2bar, bar_resolution, max_simu_note, 6)\n #prog: (batch, max_track)\n #func_prompt: (batch, 1, max_track, 32)\n #total_len: (batch, num2bar)\n #abs_pos: (batch, num2bar)\n #rel_pos: (batch, num2bar)\n\n batch, num_2bar, time, max_simu_note, _ = mix.shape\n _, max_track = prog.shape\n\n mix = mix.reshape(-1, time, max_simu_note, 6)\n mix = self.mixture_encoder(mix)[0].mean.reshape(batch, num_2bar, -1) #(batch, num_2bar, 256)\n mix_ = (1-blur)*mix.clone() + blur*torch.empty(mix.shape, device=mix.device).normal_(mean=0, std=1) \n \n mix_ = mix_ + self.pe[:, :self.func_res*mix.shape[1], :][:, ::self.func_res]\n mix_ = mix_ + self.total_len_embedding(total_len)\n mix_ = mix_ + self.abs_pos_embedding(abs_pos)\n mix_ = mix_ + self.rel_pos_embedding(rel_pos)\n\n mix_ = self.context_enc(mix_) #(batch, num_bar, 256)\n mix_ = mix_.unsqueeze(1) + self.prog_embedding(prog).unsqueeze(2) #(batch, max_track, num_bar, 256)\n mix_ = mix_.reshape(-1, num_2bar, self.d_model)\n \n start = self.start_embedding[prog].unsqueeze(1) #(batch, 1, max_track, dmodel)\n\n if func_prompt is not None:\n func_prompt = func_prompt.reshape(-1, 32)\n func_prompt = self.function_encoder.get_code_indices(func_prompt).reshape(batch, max_track, self.func_res).permute(0, 2, 1) #(batch, 8, max_track)\n #else:\n function = torch.empty((batch, 0, max_track)).long().to(mix.device)\n\n for idx in range(self.func_res*num_2bar):\n if (idx < self.func_res) and (func_prompt is not None):\n start = torch.cat([start, self.func_embedding(function[:, idx-1: idx, :])], dim=1)\n function = torch.cat([function, func_prompt[:, idx: idx+1, :]], dim=1) \n continue\n else:\n func = torch.cat([start, self.func_embedding(function[:, idx-1: idx, :])], dim=1)\n\n func = func + self.prog_embedding(prog).unsqueeze(1)\n func = func + self.pe[:, :func.shape[1], :].unsqueeze(2)\n\n func = func + self.total_len_embedding(total_len).repeat_interleave(self.func_res, dim=1)[:, :func.shape[1]].unsqueeze(2)\n func = func + self.abs_pos_embedding(abs_pos).repeat_interleave(self.func_res, dim=1)[:, :func.shape[1]].unsqueeze(2)\n func = func + self.rel_pos_embedding(rel_pos).repeat_interleave(self.func_res, dim=1)[:, :func.shape[1]].unsqueeze(2)\n\n for layer in range(self.function_dec_layer):\n \n func = func.reshape(-1, max_track, self.d_model)\n func = self.mt_trf[f'track_layer_{layer}'](src=func)\n func = func.reshape(batch, -1, max_track, self.d_model).permute(0, 2, 1, 3).reshape(-1, idx+1, self.d_model)\n func = self.mt_trf[f'time_layer_{layer}'](tgt=func,\n tgt_mask=self.generate_square_subsequent_mask(sz=idx+1).to(func.device),\n memory=mix_) \n func = func.reshape(batch, max_track, -1, self.d_model).permute(0, 2, 1, 3)#(batch, num2bar-1, max_track, d_model)\n \n start = torch.cat([start, self.func_embedding(function[:, idx-1: idx, :])], dim=1)\n\n func_logits = self.func_out_linear(func[:, -1,]) / t\n filtered_func_logits = self.nucleus_filter(func_logits, p)\n func_probability = F.softmax(filtered_func_logits, dim=-1)\n func_pred = torch.multinomial(func_probability.reshape(-1, NUM_TIME_CODE), 1).reshape(func_probability.shape[:-1]).unsqueeze(1)\n\n function = torch.cat([function, func_pred], dim=1)\n if function.shape[1] == self.func_res*num_2bar:\n break\n \n\n \n function = function.reshape(batch, num_2bar, self.func_res, max_track).permute(0, 1, 3, 2)\n z_func = self.function_encoder.infer_by_codes(function)\n return self.QA_model.infer_with_function_codes(mix[0], prog[0].repeat(num_2bar, 1), z_func[0])\n \n def nucleus_filter(self, logits, p):\n #sorted_logits, sorted_indices = torch.sort(logits, descending=True)\n sorted_logits, sorted_indices = torch.sort(logits, dim=-1, descending=True)\n #cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n cum_sum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n\n # Remove tokens with cumulative probability above the threshold\n #sorted_indices_to_remove = cumulative_probs > p\n nucleus = cum_sum_probs < p\n # Shift the indices to the right to keep also the first token above the threshold\n #sorted_indices_to_remove = torch.cat([sorted_indices_to_remove.new_zeros(sorted_indices_to_remove.shape[:-1] + (1,)), sorted_indices_to_remove[..., :-1]], dim=-1)\n nucleus = torch.cat([nucleus.new_ones(nucleus.shape[:-1] + (1,)), nucleus[..., :-1]], dim=-1)\n nucleus = nucleus.gather(-1, sorted_indices.argsort(-1))\n\n logits[~nucleus] = float('-inf')\n return logits\n \n\n\n @classmethod\n def init_model(cls, pretrain_model_path=None, DEVICE='cuda:0'):\n \"\"\"Fast model initialization.\"\"\"\n vqQaA = Query_and_reArrange(name='pretrain', trf_layers=2, device=DEVICE)\n if pretrain_model_path is not None:\n vqQaA.load_state_dict(torch.load(pretrain_model_path, map_location=torch.device('cpu')))\n vqQaA.eval()\n model = cls(vqQaA.mixture_enc, vqQaA.function_enc, DEVICE=DEVICE).to(DEVICE)\n return model\n \n @classmethod\n def init_inference_model(cls, prior_model_path, QA_model_path, DEVICE='cuda:0'):\n \"\"\"Fast model initialization.\"\"\"\n vqQaA = Query_and_reArrange(name='pretrain', trf_layers=2, device=DEVICE)\n vqQaA.load_state_dict(torch.load(QA_model_path, map_location=torch.device('cpu')))\n vqQaA.eval()\n model = cls(inference=True, QA_model=vqQaA, DEVICE=DEVICE).to(DEVICE)\n model.load_state_dict(torch.load(prior_model_path), strict=False)\n return model" }, { "identifier": "SLAKH_CLASS_PROGRAMS", "path": "orchestrator/QA_dataset.py", "snippet": "SLAKH_CLASS_PROGRAMS = dict({\n 0: 'Acoustic Piano', #0\n 4: 'Electric Piano', #1\n 8: 'Chromatic Percussion',#2\n 16: 'Organ', #3\n 24: 'Acoustic Guitar', #4\n 26: 'Clean Electric Guitar', #5\n 29: 'Distorted Electric Guitar', #6\n 32: 'Acoustic Bass', #7\n 33: 'Electric Bass', #8\n 40: 'Violin', #9\n 41: 'Viola', #10\n 42: 'Cello', #11\n 43: 'Contrabass', #12\n 46: 'Orchestral Harp', #13\n 47: 'Timpani', #14\n 48: 'String Ensemble', #15\n 50: 'Synth Strings', #16\n 52: 'Choir and Voice', #17\n 55: 'Orchestral Hit', #18\n 56: 'Trumpet', #19\n 57: 'Trombone', #20\n 58: 'Tuba', #21\n 60: 'French Horn', #22\n 61: 'Brass Section', #23\n 64: 'Soprano/Alto Sax', #24\n 66: 'Tenor Sax', #25\n 67: 'Baritone Sax', #26\n 68: 'Oboe', #27\n 69: 'English Horn', #28\n 70: 'Bassoon', #29\n 71: 'Clarinet', #30\n 72: 'Pipe', #31\n 80: 'Synth Lead', #32\n 88: 'Synth Pad' #33\n})" }, { "identifier": "grid2pr", "path": "orchestrator/utils/format_convert.py", "snippet": "def grid2pr(grid, max_note_count=16, min_pitch=0, pitch_eos_ind=129):\n #grid: (time, max_simu_note, 6)\n if grid.shape[1] == max_note_count:\n grid = grid[:, 1:]\n pr = np.zeros((grid.shape[0], 128), dtype=int)\n for t in range(grid.shape[0]):\n for n in range(grid.shape[1]):\n note = grid[t, n]\n if note[0] == pitch_eos_ind:\n break\n pitch = note[0] + min_pitch\n dur = int(''.join([str(_) for _ in note[1:]]), 2) + 1\n pr[t, pitch] = dur\n return pr" }, { "identifier": "pr2grid", "path": "orchestrator/utils/format_convert.py", "snippet": "def pr2grid(pr_mat, max_note_count=16, max_pitch=127, min_pitch=0,\n pitch_pad_ind=130, dur_pad_ind=2,\n pitch_sos_ind=128, pitch_eos_ind=129):\n pr_mat3d = np.ones((len(pr_mat), max_note_count, 6), dtype=int) * dur_pad_ind\n pr_mat3d[:, :, 0] = pitch_pad_ind\n pr_mat3d[:, 0, 0] = pitch_sos_ind\n cur_idx = np.ones(len(pr_mat), dtype=int)\n for t, p in zip(*np.where(pr_mat != 0)):\n pr_mat3d[t, cur_idx[t], 0] = p - min_pitch\n binary = np.binary_repr(min(int(pr_mat[t, p]), 32) - 1, width=5)\n pr_mat3d[t, cur_idx[t], 1: 6] = \\\n np.fromstring(' '.join(list(binary)), dtype=int, sep=' ')\n if cur_idx[t] == max_note_count-1:\n continue\n cur_idx[t] += 1\n #print(cur_idx)\n pr_mat3d[np.arange(0, len(pr_mat)), cur_idx, 0] = pitch_eos_ind\n return pr_mat3d" }, { "identifier": "matrix2midi", "path": "orchestrator/utils/format_convert.py", "snippet": "def matrix2midi(matrices, programs, init_tempo=120, time_start=0):\n \"\"\"\n Reconstruct a multi-track midi from a 3D matrix of shape (Track. Time, 128).\n \"\"\"\n ACC = 16\n tracks = []\n for program in programs:\n track_recon = pyd.Instrument(program=int(program), is_drum=False, name=pyd.program_to_instrument_name(int(program)))\n tracks.append(track_recon)\n\n indices_track, indices_onset, indices_pitch = np.nonzero(matrices)\n alpha = 1 / (ACC // 4) * 60 / init_tempo #timetep between each quntization bin\n for idx in range(len(indices_track)):\n track_id = indices_track[idx]\n onset = indices_onset[idx]\n pitch = indices_pitch[idx]\n\n start = onset * alpha\n duration = matrices[track_id, onset, pitch] * alpha\n velocity = 100\n\n note_recon = pyd.Note(velocity=int(velocity), pitch=int(pitch), start=time_start + start, end=time_start + start + duration)\n tracks[track_id].notes.append(note_recon)\n \n midi_recon = pyd.PrettyMIDI(initial_tempo=init_tempo)\n midi_recon.instruments = tracks\n return midi_recon" }, { "identifier": "midi2matrix", "path": "orchestrator/utils/format_convert.py", "snippet": "def midi2matrix(midi, quaver):\n pr_matrices = []\n programs = []\n for track in midi.instruments:\n programs.append(track.program)\n pr_matrix = np.zeros((len(quaver), 128))\n for note in track.notes:\n note_start = np.argmin(np.abs(quaver - note.start))\n note_end = np.argmin(np.abs(quaver - note.end))\n if note_end == note_start:\n note_end = min(note_start + 1, len(quaver) - 1)\n pr_matrix[note_start, note.pitch] = note_end - note_start\n pr_matrices.append(pr_matrix)\n return np.array(pr_matrices), np.array(programs)" }, { "identifier": "TOTAL_LEN_BIN", "path": "orchestrator/prior_dataset.py", "snippet": "TOTAL_LEN_BIN = np.array([4, 7, 12, 15, 20, 23, 28, 31, 36, 39, 44, 47, 52, 55, 60, 63, 68, 71, 76, 79, 84, 87, 92, 95, 100, 103, 108, 111, 116, 119, 124, 127, 132])" }, { "identifier": "ABS_POS_BIN", "path": "orchestrator/prior_dataset.py", "snippet": "ABS_POS_BIN = np.arange(129)" }, { "identifier": "REL_POS_BIN", "path": "orchestrator/prior_dataset.py", "snippet": "REL_POS_BIN = np.arange(128)" } ]
import os import pretty_midi as pyd import numpy as np import torch import piano_arranger.format_converter as cvt from torch.utils.data import DataLoader from scipy.interpolate import interp1d from tqdm import tqdm from piano_arranger.acc_utils import split_phrases from piano_arranger.models import DisentangleVAE from piano_arranger.AccoMontage import find_by_length, dp_search, re_harmonization, get_texture_filter, ref_spotlight from orchestrator import Slakh2100_Pop909_Dataset, collate_fn, compute_pr_feat, EMBED_PROGRAM_MAPPING, Prior from orchestrator.QA_dataset import SLAKH_CLASS_PROGRAMS from orchestrator.utils import grid2pr, pr2grid, matrix2midi, midi2matrix from orchestrator.prior_dataset import TOTAL_LEN_BIN, ABS_POS_BIN, REL_POS_BIN
18,914
SLAKH_CLASS_MAPPING = {v: k for k, v in EMBED_PROGRAM_MAPPING.items()} def load_premise(DATA_FILE_ROOT, DEVICE): """Load AccoMontage Search Space""" print('Loading AccoMontage piano texture search space. This may take 1 or 2 minutes ...') data = np.load(os.path.join(DATA_FILE_ROOT, 'phrase_data.npz'), allow_pickle=True) melody = data['melody'] acc = data['acc'] chord = data['chord'] vel = data['velocity'] cc = data['cc'] acc_pool = {} for LEN in tqdm(range(2, 13)): (mel, acc_, chord_, vel_, cc_, song_reference) = find_by_length(melody, acc, chord, vel, cc, LEN) acc_pool[LEN] = (mel, acc_, chord_, vel_, cc_, song_reference) texture_filter = get_texture_filter(acc_pool) edge_weights=np.load(os.path.join(DATA_FILE_ROOT, 'edge_weights.npz'), allow_pickle=True) """Load Q&A Prompt Search Space""" print('loading orchestration prompt search space ...') slakh_dir = os.path.join(DATA_FILE_ROOT, 'Slakh2100_inference_set') dataset = Slakh2100_Pop909_Dataset(slakh_dir=slakh_dir, pop909_dir=None, debug_mode=False, split='validation', mode='train') loader = DataLoader(dataset, batch_size=1, shuffle=True, collate_fn=lambda b:collate_fn(b, DEVICE)) REF = [] REF_PROG = [] REF_MIX = [] for (_, prog, function, _, _, _) in loader: prog = prog[0, :] REF.extend([batch for batch in function]) REF_PROG.extend([prog for _ in range(len(function))]) REF_MIX.append(torch.sum(function, dim=1)) REF_MIX = torch.cat(REF_MIX, dim=0) """Initialize orchestration model (Prior + Q&A)""" print('Initialize model ...') prior_model_path = os.path.join(DATA_FILE_ROOT, 'params_prior.pt') QaA_model_path = os.path.join(DATA_FILE_ROOT, 'params_qa.pt') orchestrator = Prior.init_inference_model(prior_model_path, QaA_model_path, DEVICE=DEVICE) orchestrator.to(DEVICE) orchestrator.eval() piano_arranger = DisentangleVAE.init_model(torch.device('cuda')).cuda() piano_arranger.load_state_dict(torch.load(os.path.join(DATA_FILE_ROOT, 'params_reharmonizer.pt'))) print('Finished.') return piano_arranger, orchestrator, (acc_pool, edge_weights, texture_filter), (REF, REF_PROG, REF_MIX) def read_lead_sheet(DEMO_ROOT, SONG_NAME, SEGMENTATION, NOTE_SHIFT, melody_track_ID=0): melody_roll, chord_roll = cvt.leadsheet2matrix(os.path.join(DEMO_ROOT, SONG_NAME, 'lead sheet.mid'), melody_track_ID) assert(len(melody_roll == len(chord_roll))) if NOTE_SHIFT != 0: melody_roll = melody_roll[int(NOTE_SHIFT*4):, :] chord_roll = chord_roll[int(NOTE_SHIFT*4):, :] if len(melody_roll) % 16 != 0: pad_len = (len(melody_roll)//16+1)*16-len(melody_roll) melody_roll = np.pad(melody_roll, ((0, pad_len), (0, 0))) melody_roll[-pad_len:, -1] = 1 chord_roll = np.pad(chord_roll, ((0, pad_len), (0, 0))) chord_roll[-pad_len:, 0] = -1 chord_roll[-pad_len:, -1] = -1 CHORD_TABLE = np.stack([cvt.expand_chord(chord) for chord in chord_roll[::4]], axis=0) LEADSHEET = np.concatenate((melody_roll, chord_roll[:, 1: -1]), axis=-1) #T*142, quantized at 16th query_phrases = split_phrases(SEGMENTATION) #[('A', 8, 0), ('A', 8, 8), ('B', 8, 16), ('B', 8, 24)] midi_len = len(LEADSHEET)//16 anno_len = sum([item[1] for item in query_phrases]) if midi_len > anno_len: LEADSHEET = LEADSHEET[: anno_len*16] CHORD_TABLE = CHORD_TABLE[: anno_len*4] print(f'Mismatch warning: Detect {midi_len} bars in the lead sheet (MIDI) and {anno_len} bars in the provided phrase annotation. The lead sheet is truncated to {anno_len} bars.') elif midi_len < anno_len: pad_len = (anno_len - midi_len)*16 LEADSHEET = np.pad(LEADSHEET, ((0, pad_len), (0, 0))) LEADSHEET[-pad_len:, 129] = 1 CHORD_TABLE = np.pad(CHORD_TABLE, ((0, pad_len//4), (0, 0))) CHORD_TABLE[-pad_len//4:, 11] = -1 CHORD_TABLE[-pad_len//4:, -1] = -1 print(f'Mismatch warning: Detect {midi_len} bars in the lead sheet (MIDI) and {anno_len} bars in the provided phrase annotation. The lead sheet is padded to {anno_len} bars.') melody_queries = [] for item in query_phrases: start_bar = item[-1] length = item[-2] segment = LEADSHEET[start_bar*16: (start_bar+length)*16] melody_queries.append(segment) #melody queries: list of T16*142, segmented by phrases return (LEADSHEET, CHORD_TABLE, melody_queries, query_phrases) def piano_arrangement(pianoRoll, chord_table, melody_queries, query_phrases, acc_pool, edge_weights, texture_filter, piano_arranger, PREFILTER, tempo=100): print('Phrasal Unit selection begins:\n\t', f'{len(query_phrases)} phrases in the lead sheet;\n\t', f'set note density filter: {PREFILTER}.') phrase_indice, chord_shift = dp_search( melody_queries, query_phrases, acc_pool, edge_weights, texture_filter, filter_id=PREFILTER) path = phrase_indice[0] shift = chord_shift[0] print('Re-harmonization begins ...') midi_recon, acc = re_harmonization(pianoRoll, chord_table, query_phrases, path, shift, acc_pool, model=piano_arranger, get_est=True, tempo=tempo) acc = np.array([grid2pr(matrix) for matrix in acc]) print('Piano accompaiment generated!') return midi_recon, acc def prompt_sampling(acc_piano, REF, REF_PROG, REF_MIX, DEVICE='cuda:0'):
SLAKH_CLASS_MAPPING = {v: k for k, v in EMBED_PROGRAM_MAPPING.items()} def load_premise(DATA_FILE_ROOT, DEVICE): """Load AccoMontage Search Space""" print('Loading AccoMontage piano texture search space. This may take 1 or 2 minutes ...') data = np.load(os.path.join(DATA_FILE_ROOT, 'phrase_data.npz'), allow_pickle=True) melody = data['melody'] acc = data['acc'] chord = data['chord'] vel = data['velocity'] cc = data['cc'] acc_pool = {} for LEN in tqdm(range(2, 13)): (mel, acc_, chord_, vel_, cc_, song_reference) = find_by_length(melody, acc, chord, vel, cc, LEN) acc_pool[LEN] = (mel, acc_, chord_, vel_, cc_, song_reference) texture_filter = get_texture_filter(acc_pool) edge_weights=np.load(os.path.join(DATA_FILE_ROOT, 'edge_weights.npz'), allow_pickle=True) """Load Q&A Prompt Search Space""" print('loading orchestration prompt search space ...') slakh_dir = os.path.join(DATA_FILE_ROOT, 'Slakh2100_inference_set') dataset = Slakh2100_Pop909_Dataset(slakh_dir=slakh_dir, pop909_dir=None, debug_mode=False, split='validation', mode='train') loader = DataLoader(dataset, batch_size=1, shuffle=True, collate_fn=lambda b:collate_fn(b, DEVICE)) REF = [] REF_PROG = [] REF_MIX = [] for (_, prog, function, _, _, _) in loader: prog = prog[0, :] REF.extend([batch for batch in function]) REF_PROG.extend([prog for _ in range(len(function))]) REF_MIX.append(torch.sum(function, dim=1)) REF_MIX = torch.cat(REF_MIX, dim=0) """Initialize orchestration model (Prior + Q&A)""" print('Initialize model ...') prior_model_path = os.path.join(DATA_FILE_ROOT, 'params_prior.pt') QaA_model_path = os.path.join(DATA_FILE_ROOT, 'params_qa.pt') orchestrator = Prior.init_inference_model(prior_model_path, QaA_model_path, DEVICE=DEVICE) orchestrator.to(DEVICE) orchestrator.eval() piano_arranger = DisentangleVAE.init_model(torch.device('cuda')).cuda() piano_arranger.load_state_dict(torch.load(os.path.join(DATA_FILE_ROOT, 'params_reharmonizer.pt'))) print('Finished.') return piano_arranger, orchestrator, (acc_pool, edge_weights, texture_filter), (REF, REF_PROG, REF_MIX) def read_lead_sheet(DEMO_ROOT, SONG_NAME, SEGMENTATION, NOTE_SHIFT, melody_track_ID=0): melody_roll, chord_roll = cvt.leadsheet2matrix(os.path.join(DEMO_ROOT, SONG_NAME, 'lead sheet.mid'), melody_track_ID) assert(len(melody_roll == len(chord_roll))) if NOTE_SHIFT != 0: melody_roll = melody_roll[int(NOTE_SHIFT*4):, :] chord_roll = chord_roll[int(NOTE_SHIFT*4):, :] if len(melody_roll) % 16 != 0: pad_len = (len(melody_roll)//16+1)*16-len(melody_roll) melody_roll = np.pad(melody_roll, ((0, pad_len), (0, 0))) melody_roll[-pad_len:, -1] = 1 chord_roll = np.pad(chord_roll, ((0, pad_len), (0, 0))) chord_roll[-pad_len:, 0] = -1 chord_roll[-pad_len:, -1] = -1 CHORD_TABLE = np.stack([cvt.expand_chord(chord) for chord in chord_roll[::4]], axis=0) LEADSHEET = np.concatenate((melody_roll, chord_roll[:, 1: -1]), axis=-1) #T*142, quantized at 16th query_phrases = split_phrases(SEGMENTATION) #[('A', 8, 0), ('A', 8, 8), ('B', 8, 16), ('B', 8, 24)] midi_len = len(LEADSHEET)//16 anno_len = sum([item[1] for item in query_phrases]) if midi_len > anno_len: LEADSHEET = LEADSHEET[: anno_len*16] CHORD_TABLE = CHORD_TABLE[: anno_len*4] print(f'Mismatch warning: Detect {midi_len} bars in the lead sheet (MIDI) and {anno_len} bars in the provided phrase annotation. The lead sheet is truncated to {anno_len} bars.') elif midi_len < anno_len: pad_len = (anno_len - midi_len)*16 LEADSHEET = np.pad(LEADSHEET, ((0, pad_len), (0, 0))) LEADSHEET[-pad_len:, 129] = 1 CHORD_TABLE = np.pad(CHORD_TABLE, ((0, pad_len//4), (0, 0))) CHORD_TABLE[-pad_len//4:, 11] = -1 CHORD_TABLE[-pad_len//4:, -1] = -1 print(f'Mismatch warning: Detect {midi_len} bars in the lead sheet (MIDI) and {anno_len} bars in the provided phrase annotation. The lead sheet is padded to {anno_len} bars.') melody_queries = [] for item in query_phrases: start_bar = item[-1] length = item[-2] segment = LEADSHEET[start_bar*16: (start_bar+length)*16] melody_queries.append(segment) #melody queries: list of T16*142, segmented by phrases return (LEADSHEET, CHORD_TABLE, melody_queries, query_phrases) def piano_arrangement(pianoRoll, chord_table, melody_queries, query_phrases, acc_pool, edge_weights, texture_filter, piano_arranger, PREFILTER, tempo=100): print('Phrasal Unit selection begins:\n\t', f'{len(query_phrases)} phrases in the lead sheet;\n\t', f'set note density filter: {PREFILTER}.') phrase_indice, chord_shift = dp_search( melody_queries, query_phrases, acc_pool, edge_weights, texture_filter, filter_id=PREFILTER) path = phrase_indice[0] shift = chord_shift[0] print('Re-harmonization begins ...') midi_recon, acc = re_harmonization(pianoRoll, chord_table, query_phrases, path, shift, acc_pool, model=piano_arranger, get_est=True, tempo=tempo) acc = np.array([grid2pr(matrix) for matrix in acc]) print('Piano accompaiment generated!') return midi_recon, acc def prompt_sampling(acc_piano, REF, REF_PROG, REF_MIX, DEVICE='cuda:0'):
ref_mix = torch.from_numpy(compute_pr_feat(acc_piano[0:1])[-1]).to(DEVICE)
9
2023-10-23 12:36:57+00:00
24k
liuqidong07/MOELoRA-peft
src/MLoRA/peft/peft_model.py
[ { "identifier": "PeftConfig", "path": "src/MLoRA/peft/utils/config.py", "snippet": "class PeftConfig(PeftConfigMixin):\n \"\"\"\n This is the base configuration class to store the configuration of a [`PeftModel`].\n\n Args:\n peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.\n task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform.\n inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode.\n \"\"\"\n\n base_model_name_or_path: str = field(default=None, metadata={\"help\": \"The name of the base model to use.\"})\n peft_type: Union[str, PeftType] = field(default=None, metadata={\"help\": \"Peft type\"})\n task_type: Union[str, TaskType] = field(default=None, metadata={\"help\": \"Task type\"})\n inference_mode: bool = field(default=False, metadata={\"help\": \"Whether to use inference mode\"})" }, { "identifier": "Gate", "path": "src/MLoRA/peft/shared.py", "snippet": "class Gate(nn.Module):\n \"\"\"Gate\"\"\"\n def __init__(self, peft_config: PeftConfig, adapter_name=\"default\"):\n\n super().__init__()\n\n self.expert_num = peft_config.expert_num\n self.task_num = peft_config.task_num\n self.te_dim = peft_config.task_embedding_dim\n\n #self.lora_task_embedding = nn.Embedding(self.task_num+1, self.te_dim)# 使用embedding来代替线性层\n self.GateL = nn.Linear(self.te_dim, self.expert_num, bias=False)\n self.act = nn.Softmax(dim=1) # 第0维为batch size\n \n def forward(self, task_em):\n\n #task_em = self.lora_task_embedding(x)\n y = self.GateL(task_em)\n y = self.act(y)\n\n return y" }, { "identifier": "GateN", "path": "src/MLoRA/peft/shared.py", "snippet": "class GateN(nn.Module):\n \"\"\"Gate New Function\"\"\"\n def __init__(self, expert_num, task_embedding_dim):\n\n super().__init__()\n\n self.expert_num = expert_num\n self.te_dim = task_embedding_dim\n\n self.GateL = nn.Linear(self.te_dim, self.expert_num, bias=False)\n self.act = nn.Softmax(dim=1) # 第0维为batch size\n \n def forward(self, task_em):\n\n #task_em = self.lora_task_embedding(x)\n y = self.GateL(task_em)\n y = self.act(y)\n\n return y" }, { "identifier": "AdaptionPromptModel", "path": "src/MLoRA/peft/tuners/adaption_prompt.py", "snippet": "class AdaptionPromptModel(nn.Module):\n \"\"\"\n Implements adaption prompts as described in https://arxiv.org/pdf/2303.16199.pdf.\n\n The top L attention modules are replaced with AdaptedAttention modules that wrap the original ones, but insert\n trainable prompts with gates (for zero init).\n\n Notes on the multi-adapter pattern:\n - We store the states of different adapters by keeping a dictionary of AdaptedAttention modules indexed by adapter\n name.\n - Every time we switch adapters, we remove the modules of the currently active adapter from the model, store them\n in the dictionary, and replace them with the modules of the new adapter.\n - To avoid duplicated and potentially inconsistent state, the currently active adapter is always removed from the\n dictionary.\n - Disabling the adapter would also result in the modules being removed from the model.\n \"\"\"\n\n def __init__(self, model, configs: Dict, adapter_name: str):\n super().__init__()\n self.model = model\n # Store adapter configs by name.\n self._configs: Dict[str, AdaptionPromptConfig] = {}\n # Store lists of the parents of the affected attention modules by adapter name.\n # We keep references to the parents so we can swap the adapters in-and-out of the model.\n self._parents: Dict[str, List[nn.Module]] = {}\n # Store lists of cached AdaptedAttention modules by name.\n self._cached_adapters: Dict[str, List] = {}\n # The name of the currently active adapter.\n self._active_adapter = None\n # Whether the adapter is enabled.\n self._enabled = True\n self.forward = self.model.forward\n self.add_adapter(adapter_name, configs[adapter_name])\n self._mark_only_adaption_prompts_as_trainable()\n\n def add_adapter(self, adapter_name: str, config: AdaptionPromptConfig) -> None:\n \"\"\"Add an adapter with the given name and config.\"\"\"\n config = prepare_config(config, self.model)\n if adapter_name in self._configs:\n raise ValueError(f\"Adapter with name '{adapter_name}' already exists.\")\n\n parents = []\n for name, _ in self.model.named_modules():\n if name.endswith(config.target_modules):\n par, _, _ = _get_submodules(self.model, name)\n parents.append(par)\n if len(parents) < config.adapter_layers:\n raise ValueError(\n f\"Config specifies more adapter layers '{config.adapter_layers}'\"\n f\" than the model has '{len(parents)}'.\"\n )\n # Note that if the target modules are not in Sequential, ModuleList, or\n # some other PyTorch ordered container, the behavior is undefined as we\n # assume here that the order of the modules is the same as the order of\n # the transformer decoder layers.\n parents = parents[-config.adapter_layers :]\n self._parents[adapter_name] = parents\n\n # It is only None during initialization.\n # If it is disabled, we don't have to remove the modules.\n if self._active_adapter is not None and self._enabled:\n self._remove_adapted_attentions(self._active_adapter)\n self._active_adapter = adapter_name\n self._configs[adapter_name] = config\n self._create_adapted_attentions(config, parents)\n if not self._enabled:\n self._remove_adapted_attentions(self._active_adapter)\n\n if config.inference_mode:\n _freeze_adapter(self.model, adapter_name)\n\n def set_adapter(self, adapter_name: str) -> None:\n \"\"\"Set the model to use the adapter with the given name.\"\"\"\n if self._active_adapter == adapter_name:\n return\n if adapter_name not in self._configs:\n raise ValueError(f\"Adapter with name '{adapter_name}' does not exist.\")\n\n if self._enabled:\n self._remove_adapted_attentions(self._active_adapter)\n self._set_adapted_attentions(adapter_name)\n\n self._active_adapter = adapter_name\n\n def enable_adapter_layers(self):\n \"\"\"Enable adapter layers by swapping in cached AdaptedAttention modules.\"\"\"\n self._enabled = True\n self._set_adapted_attentions(self._active_adapter)\n\n def disable_adapter_layers(self):\n \"\"\"Disable adapter layers by swapping out AdaptedAttention modules.\"\"\"\n self._enabled = False\n self._remove_adapted_attentions(self._active_adapter)\n\n def _create_adapted_attentions(self, config: AdaptionPromptConfig, parents: List[nn.Module]) -> None:\n \"\"\"Wrap LlamaAttention modules with newly created AdaptedAttention modules.\"\"\"\n for par in parents:\n attn = AdaptedAttention(\n model_type=self.model.config.model_type,\n adapter_len=config.adapter_len,\n model=getattr(par, config.target_modules),\n )\n setattr(par, config.target_modules, attn)\n\n def _set_adapted_attentions(self, adapter_name: str) -> None:\n \"\"\"Replace LlamaAttention modules with cached AdaptedAttention modules.\"\"\"\n cached = self._cached_adapters[adapter_name]\n del self._cached_adapters[adapter_name]\n config = self._configs[adapter_name]\n for i, par in enumerate(self._parents[adapter_name]):\n setattr(par, config.target_modules, cached[i])\n\n def _remove_adapted_attentions(self, adapter_name: str) -> None:\n \"\"\"Remove AdaptedAttention modules from the model and store them in the cache.\"\"\"\n config = self._configs[adapter_name]\n adapted_attentions = []\n for par in self._parents[adapter_name]:\n attn = getattr(par, config.target_modules)\n adapted_attentions.append(attn)\n setattr(par, config.target_modules, attn.model)\n self._cached_adapters[adapter_name] = adapted_attentions\n\n def _mark_only_adaption_prompts_as_trainable(self) -> None:\n \"\"\"Freeze all parameters of the model except the adaption prompts.\"\"\"\n for n, p in self.model.named_parameters():\n if not is_adaption_prompt_trainable(n):\n p.requires_grad = False\n\n def __getattr__(self, name: str):\n \"\"\"Forward missing attributes to the wrapped module.\"\"\"\n try:\n return super().__getattr__(name) # defer to nn.Module's logic\n except AttributeError:\n # This is necessary as e.g. causal models have various methods that we\n # don't want to re-implement here.\n return getattr(self.model, name)" }, { "identifier": "LoraModel", "path": "src/MLoRA/peft/tuners/lora.py", "snippet": "class LoraModel(torch.nn.Module):\n \"\"\"\n Creates Low Rank Adapter (Lora) model from a pretrained transformers model.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): The model to be adapted.\n config ([`LoraConfig`]): The configuration of the Lora model.\n\n Returns:\n `torch.nn.Module`: The Lora model.\n\n Example:\n\n ```py\n >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig\n >>> from peft import LoraModel, LoraConfig\n\n >>> config = LoraConfig(\n ... peft_type=\"LORA\",\n ... task_type=\"SEQ_2_SEQ_LM\",\n ... r=8,\n ... lora_alpha=32,\n ... target_modules=[\"q\", \"v\"],\n ... lora_dropout=0.01,\n ... )\n\n >>> model = AutoModelForSeq2SeqLM.from_pretrained(\"t5-base\")\n >>> lora_model = LoraModel(config, model)\n ```\n\n **Attributes**:\n - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted.\n - **peft_config** ([`LoraConfig`]): The configuration of the Lora model.\n \"\"\"\n\n def __init__(self, model, config, adapter_name):\n super().__init__()\n self.model = model\n self.forward = self.model.forward\n self.peft_config = config\n self.add_adapter(adapter_name, self.peft_config[adapter_name])\n\n def add_adapter(self, adapter_name, config=None):\n if config is not None:\n model_config = self.model.config.to_dict() if hasattr(self.model.config, \"to_dict\") else self.model.config\n config = self._prepare_lora_config(config, model_config)\n self.peft_config[adapter_name] = config\n self._find_and_replace(adapter_name)\n if len(self.peft_config) > 1 and self.peft_config[adapter_name].bias != \"none\":\n raise ValueError(\n \"LoraModel supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters.\"\n )\n mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias) # freeze all layers except for lora layer\n if self.peft_config[adapter_name].inference_mode: # if inference, also freeze lora layer\n _freeze_adapter(self.model, adapter_name)\n\n def _find_and_replace(self, adapter_name):\n \"\"\"Replace the target `Linear` module with LoRA layer (Linear+LoRA)\"\"\"\n lora_config = self.peft_config[adapter_name]\n loaded_in_8bit = getattr(self.model, \"is_loaded_in_8bit\", False)\n if loaded_in_8bit and not is_bnb_available():\n raise ImportError(\n \"To use Lora with 8-bit quantization, please install the `bitsandbytes` package. \"\n \"You can install it with `pip install bitsandbytes`.\"\n )\n is_target_modules_in_base_model = False\n kwargs = {\n \"r\": lora_config.r,\n \"lora_alpha\": lora_config.lora_alpha,\n \"lora_dropout\": lora_config.lora_dropout,\n \"fan_in_fan_out\": lora_config.fan_in_fan_out,\n \"init_lora_weights\": lora_config.init_lora_weights,\n }\n key_list = [key for key, _ in self.model.named_modules()]\n for key in key_list:\n if isinstance(lora_config.target_modules, str):\n target_module_found = re.fullmatch(lora_config.target_modules, key)\n else:\n target_module_found = any(key.endswith(target_key) for target_key in lora_config.target_modules)\n if target_module_found:\n if not is_target_modules_in_base_model:\n is_target_modules_in_base_model = True\n parent, target, target_name = _get_submodules(self.model, key) # parent: the parent mudle of target (e.g., SelfAttention), target: target module (e.g., nn.Linear()), target name: the name of target module (e.g., query_key_value)\n bias = target.bias is not None\n if isinstance(target, LoraLayer): # if the target is LoraLayer, only need to update the parameters\n target.update_layer(\n adapter_name,\n lora_config.r,\n lora_config.lora_alpha,\n lora_config.lora_dropout,\n lora_config.init_lora_weights,\n )\n else: # if not, get the lora parameter for create.\n if loaded_in_8bit and isinstance(target, bnb.nn.Linear8bitLt):\n eightbit_kwargs = kwargs.copy()\n eightbit_kwargs.update(\n {\n \"has_fp16_weights\": target.state.has_fp16_weights,\n \"memory_efficient_backward\": target.state.memory_efficient_backward,\n \"threshold\": target.state.threshold,\n \"index\": target.index,\n }\n )\n new_module = Linear8bitLt(\n adapter_name, target.in_features, target.out_features, bias=bias, **eightbit_kwargs\n )\n else: # create based on the original module type\n if isinstance(target, torch.nn.Linear):\n in_features, out_features = target.in_features, target.out_features\n if kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. \"\n \"Setting fan_in_fan_out to False.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = False\n elif isinstance(target, Conv1D):\n in_features, out_features = (\n target.weight.ds_shape if hasattr(target.weight, \"ds_shape\") else target.weight.shape\n )\n if not kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to False but the target module is `Conv1D`. \"\n \"Setting fan_in_fan_out to True.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = True\n else:\n raise ValueError(\n f\"Target module {target} is not supported. \"\n f\"Currently, only `torch.nn.Linear` and `Conv1D` are supported.\"\n )\n new_module = Linear(adapter_name, in_features, out_features, bias=bias, **kwargs) # create the lora module, here is not the raw nn.Linear, but the lora layer\n\n self._replace_module(parent, target_name, new_module, target)\n if not is_target_modules_in_base_model:\n raise ValueError(\n f\"Target modules {lora_config.target_modules} not found in the base model. \"\n f\"Please check the target modules and try again.\"\n )\n\n def _replace_module(self, parent_module, child_name, new_module, old_module):\n \"\"\"substitute the original nn.Linear to new Linear (nn.Linear+LoRA block)\"\"\"\n setattr(parent_module, child_name, new_module)\n new_module.weight = old_module.weight\n if old_module.bias is not None:\n new_module.bias = old_module.bias\n if getattr(old_module, \"state\", None) is not None: # synchronize the state and device\n new_module.state = old_module.state\n new_module.to(old_module.weight.device)\n\n # dispatch to correct device\n for name, module in new_module.named_modules():\n if \"lora_\" in name:\n module.to(old_module.weight.device)\n\n def __getattr__(self, name: str):\n \"\"\"Forward missing attributes to the wrapped module.\"\"\"\n try:\n return super().__getattr__(name) # defer to nn.Module's logic\n except AttributeError:\n return getattr(self.model, name)\n\n def get_peft_config_as_dict(self, inference: bool = False):\n config_dict = {}\n for key, value in self.peft_config.items():\n config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()}\n if inference:\n config[\"inference_mode\"] = True\n config_dict[key] = config\n return config\n\n def _set_adapter_layers(self, enabled=True):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n module.disable_adapters = False if enabled else True\n\n def enable_adapter_layers(self):\n self._set_adapter_layers(enabled=True)\n\n def disable_adapter_layers(self):\n self._set_adapter_layers(enabled=False)\n\n def set_adapter(self, adapter_name):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n if module.merged:\n warnings.warn(\"Adapter cannot be set when the model is merged. Unmerging the model first.\")\n module.unmerge()\n module.active_adapter = adapter_name\n\n def merge_adapter(self):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n module.merge()\n\n def unmerge_adapter(self):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n module.unmerge()\n\n @staticmethod\n def _prepare_lora_config(peft_config, model_config):\n if peft_config.target_modules is None:\n if model_config[\"model_type\"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING:\n raise ValueError(\"Please specify `target_modules` in `peft_config`\")\n peft_config.target_modules = TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config[\"model_type\"]]\n if peft_config.inference_mode:\n peft_config.merge_weights = True\n return peft_config\n\n def merge_and_unload(self):\n r\"\"\"\n This method merges the LoRa layers into the base model. This is needed if someone wants to use the base model\n as a standalone model.\n \"\"\"\n if getattr(self.config, \"model_type\", None) == \"gpt2\":\n raise ValueError(\"GPT2 models are not supported for merging LORA layers\")\n\n if getattr(self.model, \"is_loaded_in_8bit\", False):\n raise ValueError(\"Cannot merge LORA layers when the model is loaded in 8-bit mode\")\n\n key_list = [key for key, _ in self.model.named_modules() if \"lora\" not in key]\n for key in key_list:\n try:\n parent, target, target_name = _get_submodules(self.model, key)\n except AttributeError:\n continue\n if isinstance(target, LoraLayer):\n bias = target.bias is not None\n new_module = torch.nn.Linear(target.in_features, target.out_features, bias=bias)\n target.merge()\n self._replace_module(parent, target_name, new_module, target)\n\n # save any additional trainable modules part of `modules_to_save`\n if isinstance(target, ModulesToSaveWrapper):\n setattr(parent, target_name, target.modules_to_save[target.active_adapter])\n\n return self.model\n\n def add_weighted_adapter(self, adapters, weights, adapter_name):\n if len({self.peft_config[adapter].r for adapter in adapters}) != 1:\n raise ValueError(\"All adapters must have the same r value\")\n self.peft_config[adapter_name] = self.peft_config[adapters[0]]\n self.peft_config[adapter_name].lora_alpha = self.peft_config[adapters[0]].r\n self._find_and_replace(adapter_name)\n mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias)\n _freeze_adapter(self.model, adapter_name)\n key_list = [key for key, _ in self.model.named_modules() if \"lora\" not in key]\n for key in key_list:\n _, target, _ = _get_submodules(self.model, key)\n if isinstance(target, LoraLayer):\n target.lora_A[adapter_name].weight.data = target.lora_A[adapter_name].weight.data * 0.0\n target.lora_B[adapter_name].weight.data = target.lora_B[adapter_name].weight.data * 0.0\n for adapter, weight in zip(adapters, weights):\n if adapter not in target.lora_A:\n continue\n target.lora_A[adapter_name].weight.data += (\n target.lora_A[adapter].weight.data * weight * target.scaling[adapter]\n )\n target.lora_B[adapter_name].weight.data += target.lora_B[adapter].weight.data * weight" }, { "identifier": "AdaLoraModel", "path": "src/MLoRA/peft/tuners/adalora.py", "snippet": "class AdaLoraModel(LoraModel):\n \"\"\"\n Creates AdaLoRA (Adaptive LoRA) model from a pretrained transformers model. Paper:\n https://openreview.net/pdf?id=lq62uWRJjiY\n\n Args:\n model ([`transformers.PreTrainedModel`]): The model to be adapted.\n config ([`AdaLoraConfig`]): The configuration of the AdaLora model.\n\n Returns:\n `torch.nn.Module`: The AdaLora model.\n\n Example::\n\n >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig >>> from peft import AdaLoraModel, AdaLoraConfig\n >>> config = AdaLoraConfig(\n peft_type=\"ADALORA\", task_type=\"SEQ_2_SEQ_LM\", r=8, lora_alpha=32, target_modules=[\"q\", \"v\"],\n lora_dropout=0.01,\n )\n >>> model = AutoModelForSeq2SeqLM.from_pretrained(\"t5-base\") >>> model = AdaLoraModel(config, model)\n\n **Attributes**:\n - **model** ([`transformers.PreTrainedModel`]) -- The model to be adapted.\n - **peft_config** ([`AdaLoraConfig`]): The configuration of the AdaLora model.\n \"\"\"\n\n def __init__(self, model, config, adapter_name):\n nn.Module.__init__(self)\n self.model = model\n self.peft_config = config\n self.add_adapter(adapter_name, self.peft_config[adapter_name])\n\n def add_adapter(self, adapter_name, config=None):\n if config is not None:\n model_config = self.model.config.to_dict() if hasattr(self.model.config, \"to_dict\") else self.model.config\n config = self._prepare_adalora_config(config, model_config)\n self.peft_config[adapter_name] = config\n self._find_and_replace(adapter_name)\n if len(self.peft_config) > 1 and self.peft_config[adapter_name].bias != \"none\":\n raise ValueError(\n \"AdaLoraModel supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters.\"\n )\n traininable_mode_counter = 0\n for config in self.peft_config.values():\n if not config.inference_mode:\n traininable_mode_counter += 1\n\n if traininable_mode_counter > 1:\n raise ValueError(\n \"AdaLoraModel supports only 1 trainable adapter. \"\n \"When using multiple adapters, set inference_mode to True for all adapters except the one you want to train.\"\n )\n\n mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias)\n if self.peft_config[adapter_name].inference_mode:\n _freeze_adapter(self.model, adapter_name)\n else:\n self.trainable_adapter_name = adapter_name\n self.rankallocator = RankAllocator(self.model, self.peft_config[adapter_name], self.trainable_adapter_name)\n\n def _find_and_replace(self, adapter_name):\n lora_config = self.peft_config[adapter_name]\n loaded_in_8bit = getattr(self.model, \"is_loaded_in_8bit\", False)\n if loaded_in_8bit and not is_bnb_available():\n raise ImportError(\n \"To use Lora with 8-bit quantization, please install the `bitsandbytes` package. \"\n \"You can install it with `pip install bitsandbytes`.\"\n )\n is_target_modules_in_base_model = False\n kwargs = {\n \"r\": lora_config.init_r,\n \"lora_alpha\": lora_config.lora_alpha,\n \"lora_dropout\": lora_config.lora_dropout,\n \"fan_in_fan_out\": lora_config.fan_in_fan_out,\n \"init_lora_weights\": lora_config.init_lora_weights,\n }\n key_list = [key for key, _ in self.model.named_modules()]\n for key in key_list:\n if isinstance(lora_config.target_modules, str):\n target_module_found = re.fullmatch(lora_config.target_modules, key)\n else:\n target_module_found = any(key.endswith(target_key) for target_key in lora_config.target_modules)\n if target_module_found:\n if not is_target_modules_in_base_model:\n is_target_modules_in_base_model = True\n parent, target, target_name = _get_submodules(self.model, key)\n bias = target.bias is not None\n if isinstance(target, LoraLayer):\n target.update_layer(\n adapter_name,\n lora_config.init_r,\n lora_config.lora_alpha,\n lora_config.lora_dropout,\n lora_config.init_lora_weights,\n )\n else:\n if loaded_in_8bit and isinstance(target, bnb.nn.Linear8bitLt):\n kwargs.update(\n {\n \"has_fp16_weights\": target.state.has_fp16_weights,\n \"memory_efficient_backward\": target.state.memory_efficient_backward,\n \"threshold\": target.state.threshold,\n \"index\": target.index,\n }\n )\n new_module = SVDLinear8bitLt(\n adapter_name, target.in_features, target.out_features, bias=bias, **kwargs\n )\n else:\n if isinstance(target, torch.nn.Linear):\n in_features, out_features = target.in_features, target.out_features\n if kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. \"\n \"Setting fan_in_fan_out to False.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = False\n elif isinstance(target, Conv1D):\n in_features, out_features = (\n target.weight.ds_shape if hasattr(target.weight, \"ds_shape\") else target.weight.shape\n )\n if not kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to False but the target module is `Conv1D`. \"\n \"Setting fan_in_fan_out to True.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = True\n else:\n raise ValueError(\n f\"Target module {target} is not supported. \"\n f\"Currently, only `torch.nn.Linear` and `Conv1D` are supported.\"\n )\n new_module = SVDLinear(adapter_name, in_features, out_features, bias=bias, **kwargs)\n\n self._replace_module(parent, target_name, new_module, target)\n if not is_target_modules_in_base_model:\n raise ValueError(\n f\"Target modules {lora_config.target_modules} not found in the base model. \"\n f\"Please check the target modules and try again.\"\n )\n\n def __getattr__(self, name: str):\n \"\"\"Forward missing attributes to the wrapped module.\"\"\"\n try:\n return super().__getattr__(name) # defer to nn.Module's logic\n except AttributeError:\n return getattr(self.model, name)\n\n def forward(self, *args, **kwargs):\n outputs = self.model.forward(*args, **kwargs)\n\n # Calculate the orthogonal regularization\n orth_reg_weight = self.peft_config[self.trainable_adapter_name].orth_reg_weight\n assert orth_reg_weight > 0\n\n if hasattr(outputs, \"loss\"):\n regu_loss = 0\n num_param = 0\n for n, p in self.model.named_parameters():\n if (\"lora_A\" in n or \"lora_B\" in n) and self.trainable_adapter_name in n:\n para_cov = p @ p.T if \"lora_A\" in n else p.T @ p\n I = torch.eye(*para_cov.size(), out=torch.empty_like(para_cov))\n I.requires_grad = False\n num_param += 1\n regu_loss += torch.norm(para_cov - I, p=\"fro\")\n regu_loss = regu_loss / num_param\n outputs.loss += orth_reg_weight * regu_loss\n return outputs\n\n def resize_modules_by_rank_pattern(self, rank_pattern, adapter_name):\n lora_config = self.peft_config[adapter_name]\n for name, rank_idx in rank_pattern.items():\n if isinstance(rank_idx, list):\n rank = sum(rank_idx)\n elif isinstance(rank_idx, torch.Tensor):\n rank_idx = rank_idx.view(-1)\n rank = rank_idx.sum().item()\n else:\n raise ValueError(\"Unexcepted type of rank_idx\")\n key = \".\".join(name.split(\".\")[0:-2]) if adapter_name in name else \".\".join(name.split(\".\")[0:-1])\n _, target, _ = _get_submodules(self.model, key)\n lora_E_weights = target.lora_E[adapter_name][rank_idx]\n lora_A_weights = target.lora_A[adapter_name][rank_idx]\n lora_B_weights = target.lora_B[adapter_name][:, rank_idx]\n ranknum = target.ranknum[adapter_name]\n target.update_layer(\n adapter_name,\n rank,\n lora_config.lora_alpha,\n lora_config.lora_dropout,\n lora_config.init_lora_weights,\n )\n with torch.no_grad():\n if rank > 0:\n target.lora_E[adapter_name].copy_(lora_E_weights)\n target.lora_A[adapter_name].copy_(lora_A_weights)\n target.lora_B[adapter_name].copy_(lora_B_weights)\n # The scaling is exactly as the previous\n target.ranknum[adapter_name].copy_(ranknum)\n\n def resize_state_dict_by_rank_pattern(self, rank_pattern, state_dict, adapter_name):\n for name, rank_idx in rank_pattern.items():\n rank = sum(rank_idx)\n prefix = \".\".join(name.split(\".\")[0:-2]) if adapter_name in name else \".\".join(name.split(\".\")[0:-1])\n for layer in [\"lora_E\", \"lora_A\", \"lora_B\"]:\n key = f\"base_model.model.{prefix}.{layer}.{adapter_name}\"\n if layer != \"lora_B\":\n state_dict[key] = (\n state_dict[key][rank_idx] if rank != state_dict[key].shape[0] else state_dict[key]\n )\n else:\n state_dict[key] = (\n state_dict[key][:, rank_idx] if rank != state_dict[key].shape[1] else state_dict[key]\n )\n return state_dict\n\n def update_and_allocate(self, global_step):\n lora_config = self.peft_config[self.trainable_adapter_name]\n # Update the importance score and allocate the budget\n if global_step < lora_config.total_step - lora_config.tfinal:\n _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step)\n if rank_pattern:\n lora_config.rank_pattern = rank_pattern\n # Finalize the budget allocation\n elif global_step == lora_config.total_step - lora_config.tfinal:\n _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step, force_mask=True)\n # for some reason, this freezes the trainable parameters and nothing gets updates\n # self.resize_modules_by_rank_pattern(rank_pattern, self.trainable_adapter_name)\n lora_config.rank_pattern = rank_pattern\n self.rankallocator.reset_ipt()\n # Currently using inefficient way to mask the unimportant weights using the rank pattern\n # due to problem mentioned above\n elif global_step > lora_config.total_step - lora_config.tfinal:\n self.rankallocator.mask_using_rank_pattern(self.model, lora_config.rank_pattern)\n # Pass the function and do forward propagation\n else:\n return None\n\n @staticmethod\n def _prepare_adalora_config(peft_config, model_config):\n if peft_config.target_modules is None:\n if model_config[\"model_type\"] not in TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING:\n raise ValueError(\"Please specify `target_modules` in `peft_config`\")\n peft_config.target_modules = TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING[\n model_config[\"model_type\"]\n ]\n if peft_config.inference_mode:\n peft_config.merge_weights = True\n return peft_config" }, { "identifier": "PromptEncoder", "path": "src/MLoRA/peft/tuners/p_tuning.py", "snippet": "class PromptEncoder(torch.nn.Module):\n \"\"\"\n The prompt encoder network that is used to generate the virtual token embeddings for p-tuning.\n\n Args:\n config ([`PromptEncoderConfig`]): The configuration of the prompt encoder.\n\n Example:\n\n ```py\n >>> from peft import PromptEncoder, PromptEncoderConfig\n\n >>> config = PromptEncoderConfig(\n ... peft_type=\"P_TUNING\",\n ... task_type=\"SEQ_2_SEQ_LM\",\n ... num_virtual_tokens=20,\n ... token_dim=768,\n ... num_transformer_submodules=1,\n ... num_attention_heads=12,\n ... num_layers=12,\n ... encoder_reparameterization_type=\"MLP\",\n ... encoder_hidden_size=768,\n ... )\n\n >>> prompt_encoder = PromptEncoder(config)\n ```\n\n **Attributes**:\n - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt encoder.\n - **mlp_head** (`torch.nn.Sequential`) -- The MLP head of the prompt encoder if `inference_mode=False`.\n - **lstm_head** (`torch.nn.LSTM`) -- The LSTM head of the prompt encoder if `inference_mode=False` and\n `encoder_reparameterization_type=\"LSTM\"`.\n - **token_dim** (`int`) -- The hidden embedding dimension of the base transformer model.\n - **input_size** (`int`) -- The input size of the prompt encoder.\n - **output_size** (`int`) -- The output size of the prompt encoder.\n - **hidden_size** (`int`) -- The hidden size of the prompt encoder.\n - **total_virtual_tokens** (`int`): The total number of virtual tokens of the\n prompt encoder.\n - **encoder_type** (Union[[`PromptEncoderReparameterizationType`], `str`]): The encoder type of the prompt\n encoder.\n\n\n Input shape: (`batch_size`, `total_virtual_tokens`)\n\n Output shape: (`batch_size`, `total_virtual_tokens`, `token_dim`)\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.token_dim = config.token_dim\n self.input_size = self.token_dim\n self.output_size = self.token_dim\n self.hidden_size = config.encoder_hidden_size\n self.total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules\n self.encoder_type = config.encoder_reparameterization_type\n\n # embedding\n self.embedding = torch.nn.Embedding(self.total_virtual_tokens, self.token_dim)\n if not config.inference_mode:\n if self.encoder_type == PromptEncoderReparameterizationType.LSTM:\n lstm_dropout = config.encoder_dropout\n num_layers = config.encoder_num_layers\n # LSTM\n self.lstm_head = torch.nn.LSTM(\n input_size=self.input_size,\n hidden_size=self.hidden_size,\n num_layers=num_layers,\n dropout=lstm_dropout,\n bidirectional=True,\n batch_first=True,\n )\n\n self.mlp_head = torch.nn.Sequential(\n torch.nn.Linear(self.hidden_size * 2, self.hidden_size * 2),\n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden_size * 2, self.output_size),\n )\n\n elif self.encoder_type == PromptEncoderReparameterizationType.MLP:\n warnings.warn(\n f\"for {self.encoder_type}, the `encoder_num_layers` is ignored. Exactly 2 MLP layers are used.\"\n )\n layers = [\n torch.nn.Linear(self.input_size, self.hidden_size),\n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden_size, self.hidden_size),\n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden_size, self.output_size),\n ]\n self.mlp_head = torch.nn.Sequential(*layers)\n\n else:\n raise ValueError(\"Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.\")\n\n def forward(self, indices):\n input_embeds = self.embedding(indices)\n if self.encoder_type == PromptEncoderReparameterizationType.LSTM:\n output_embeds = self.mlp_head(self.lstm_head(input_embeds)[0])\n elif self.encoder_type == PromptEncoderReparameterizationType.MLP:\n output_embeds = self.mlp_head(input_embeds)\n else:\n raise ValueError(\"Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.\")\n\n return output_embeds" }, { "identifier": "PrefixEncoder", "path": "src/MLoRA/peft/tuners/prefix_tuning.py", "snippet": "class PrefixEncoder(torch.nn.Module):\n r\"\"\"\n The `torch.nn` model to encode the prefix.\n\n Args:\n config ([`PrefixTuningConfig`]): The configuration of the prefix encoder.\n\n Example:\n\n ```py\n >>> from peft import PrefixEncoder, PrefixTuningConfig\n\n >>> config = PrefixTuningConfig(\n ... peft_type=\"PREFIX_TUNING\",\n ... task_type=\"SEQ_2_SEQ_LM\",\n ... num_virtual_tokens=20,\n ... token_dim=768,\n ... num_transformer_submodules=1,\n ... num_attention_heads=12,\n ... num_layers=12,\n ... encoder_hidden_size=768,\n ... )\n >>> prefix_encoder = PrefixEncoder(config)\n ```\n\n **Attributes**:\n - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prefix encoder.\n - **transform** (`torch.nn.Sequential`) -- The two-layer MLP to transform the prefix embeddings if\n `prefix_projection` is `True`.\n - **prefix_projection** (`bool`) -- Whether to project the prefix embeddings.\n\n Input shape: (`batch_size`, `num_virtual_tokens`)\n\n Output shape: (`batch_size`, `num_virtual_tokens`, `2*layers*hidden`)\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.prefix_projection = config.prefix_projection\n token_dim = config.token_dim\n num_layers = config.num_layers\n encoder_hidden_size = config.encoder_hidden_size\n num_virtual_tokens = config.num_virtual_tokens\n if self.prefix_projection and not config.inference_mode:\n # Use a two-layer MLP to encode the prefix\n self.embedding = torch.nn.Embedding(num_virtual_tokens, token_dim)\n self.transform = torch.nn.Sequential(\n torch.nn.Linear(token_dim, encoder_hidden_size),\n torch.nn.Tanh(),\n torch.nn.Linear(encoder_hidden_size, num_layers * 2 * token_dim),\n )\n else:\n self.embedding = torch.nn.Embedding(num_virtual_tokens, num_layers * 2 * token_dim)\n\n def forward(self, prefix: torch.Tensor):\n if self.prefix_projection:\n prefix_tokens = self.embedding(prefix)\n past_key_values = self.transform(prefix_tokens)\n else:\n past_key_values = self.embedding(prefix)\n return past_key_values" }, { "identifier": "PromptEmbedding", "path": "src/MLoRA/peft/tuners/prompt_tuning.py", "snippet": "class PromptEmbedding(torch.nn.Module):\n \"\"\"\n The model to encode virtual tokens into prompt embeddings.\n\n Args:\n config ([`PromptTuningConfig`]): The configuration of the prompt embedding.\n word_embeddings (`torch.nn.Module`): The word embeddings of the base transformer model.\n\n **Attributes**:\n - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt embedding.\n\n Example:\n\n ```py\n >>> from peft import PromptEmbedding, PromptTuningConfig\n\n >>> config = PromptTuningConfig(\n ... peft_type=\"PROMPT_TUNING\",\n ... task_type=\"SEQ_2_SEQ_LM\",\n ... num_virtual_tokens=20,\n ... token_dim=768,\n ... num_transformer_submodules=1,\n ... num_attention_heads=12,\n ... num_layers=12,\n ... prompt_tuning_init=\"TEXT\",\n ... prompt_tuning_init_text=\"Predict if sentiment of this review is positive, negative or neutral\",\n ... tokenizer_name_or_path=\"t5-base\",\n ... )\n\n >>> # t5_model.shared is the word embeddings of the base model\n >>> prompt_embedding = PromptEmbedding(config, t5_model.shared)\n ```\n\n Input Shape: (`batch_size`, `total_virtual_tokens`)\n\n Output Shape: (`batch_size`, `total_virtual_tokens`, `token_dim`)\n \"\"\"\n\n def __init__(self, config, word_embeddings):\n super().__init__()\n\n total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules\n self.embedding = torch.nn.Embedding(total_virtual_tokens, config.token_dim)\n if config.prompt_tuning_init == PromptTuningInit.TEXT:\n from transformers import AutoTokenizer\n\n tokenizer = AutoTokenizer.from_pretrained(config.tokenizer_name_or_path)\n init_text = config.prompt_tuning_init_text\n init_token_ids = tokenizer(init_text)[\"input_ids\"]\n # Trim or iterate until num_text_tokens matches total_virtual_tokens\n num_text_tokens = len(init_token_ids)\n if num_text_tokens > total_virtual_tokens:\n init_token_ids = init_token_ids[:total_virtual_tokens]\n elif num_text_tokens < total_virtual_tokens:\n num_reps = math.ceil(total_virtual_tokens / num_text_tokens)\n init_token_ids = init_token_ids * num_reps\n init_token_ids = init_token_ids[:total_virtual_tokens]\n\n word_embedding_weights = word_embeddings(torch.LongTensor(init_token_ids)).detach().clone()\n word_embedding_weights = word_embedding_weights.to(torch.float32)\n self.embedding.weight = torch.nn.Parameter(word_embedding_weights)\n\n def forward(self, indices):\n # Just get embeddings\n prompt_embeddings = self.embedding(indices)\n return prompt_embeddings" }, { "identifier": "MMOELoraModelS", "path": "src/MLoRA/peft/tuners/mmoeloraS.py", "snippet": "class MMOELoraModelS(MMOELoraModel):\n\n def __init__(self, model, config, adapter_name):\n\n super().__init__(model, config, adapter_name)\n\n\n\n def _find_and_replace(self, adapter_name):\n \"\"\"Replace the target `Linear` module with LoRA layer (Linear+LoRA)\"\"\"\n lora_config = self.peft_config[adapter_name]\n loaded_in_8bit = getattr(self.model, \"is_loaded_in_8bit\", False)\n if loaded_in_8bit and not is_bnb_available():\n raise ImportError(\n \"To use Lora with 8-bit quantization, please install the `bitsandbytes` package. \"\n \"You can install it with `pip install bitsandbytes`.\"\n )\n is_target_modules_in_base_model = False\n kwargs = {\n \"r\": lora_config.r,\n \"lora_alpha\": lora_config.lora_alpha,\n \"lora_dropout\": lora_config.lora_dropout,\n \"fan_in_fan_out\": lora_config.fan_in_fan_out,\n \"init_lora_weights\": lora_config.init_lora_weights,\n \"task_num\": lora_config.task_num,\n \"task_embedding_dim\": lora_config.task_embedding_dim,\n \"expert_num\": lora_config.expert_num,\n }\n key_list = [key for key, _ in self.model.named_modules()] # all module in raw model\n for key in key_list:\n # find the corresponding modules. target module has been split into list.\n if isinstance(lora_config.target_modules, str):\n target_module_found = re.fullmatch(lora_config.target_modules, key)\n else:\n target_module_found = any(key.endswith(target_key) for target_key in lora_config.target_modules)\n if target_module_found:\n if not is_target_modules_in_base_model:\n is_target_modules_in_base_model = True\n parent, target, target_name = _get_submodules(self.model, key)\n bias = target.bias is not None\n if isinstance(target, MMOELoraLayer):\n target.update_layer(\n adapter_name,\n lora_config.init_r,\n lora_config.lora_alpha,\n lora_config.lora_dropout,\n lora_config.init_lora_weights,\n )\n else:\n if loaded_in_8bit and isinstance(target, bnb.nn.Linear8bitLt):\n raise NotImplementedError\n else:\n if isinstance(target, torch.nn.Linear):\n in_features, out_features = target.in_features, target.out_features\n if kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. \"\n \"Setting fan_in_fan_out to False.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = False\n elif isinstance(target, Conv1D):\n in_features, out_features = (\n target.weight.ds_shape if hasattr(target.weight, \"ds_shape\") else target.weight.shape\n )\n if not kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to False but the target module is `Conv1D`. \"\n \"Setting fan_in_fan_out to True.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = True\n else:\n raise ValueError(\n f\"Target module {target} is not supported. \"\n f\"Currently, only `torch.nn.Linear` and `Conv1D` are supported.\"\n )\n new_module = MMOELoraLinearS(adapter_name, in_features, out_features, \n bias=bias, **kwargs)\n\n self._replace_module(parent, target_name, new_module, target)\n if not is_target_modules_in_base_model:\n raise ValueError(\n f\"Target modules {lora_config.target_modules} not found in the base model. \"\n f\"Please check the target modules and try again.\"\n )" }, { "identifier": "PeftConfig", "path": "src/MLoRA/peft/utils/config.py", "snippet": "class PeftConfig(PeftConfigMixin):\n \"\"\"\n This is the base configuration class to store the configuration of a [`PeftModel`].\n\n Args:\n peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.\n task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform.\n inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode.\n \"\"\"\n\n base_model_name_or_path: str = field(default=None, metadata={\"help\": \"The name of the base model to use.\"})\n peft_type: Union[str, PeftType] = field(default=None, metadata={\"help\": \"Peft type\"})\n task_type: Union[str, TaskType] = field(default=None, metadata={\"help\": \"Task type\"})\n inference_mode: bool = field(default=False, metadata={\"help\": \"Whether to use inference mode\"})" }, { "identifier": "PeftType", "path": "src/MLoRA/peft/utils/config.py", "snippet": "class PeftType(str, enum.Enum):\n PROMPT_TUNING = \"PROMPT_TUNING\"\n P_TUNING = \"P_TUNING\"\n PREFIX_TUNING = \"PREFIX_TUNING\"\n LORA = \"LORA\"\n ADALORA = \"ADALORA\"\n ADAPTION_PROMPT = \"ADAPTION_PROMPT\"\n MMOELORAS = \"MMOELORAS\"" }, { "identifier": "PromptLearningConfig", "path": "src/MLoRA/peft/utils/config.py", "snippet": "class PromptLearningConfig(PeftConfig):\n \"\"\"\n This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or\n [`PromptTuning`].\n\n Args:\n num_virtual_tokens (`int`): The number of virtual tokens to use.\n token_dim (`int`): The hidden embedding dimension of the base transformer model.\n num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model.\n num_attention_heads (`int`): The number of attention heads in the base transformer model.\n num_layers (`int`): The number of layers in the base transformer model.\n \"\"\"\n\n num_virtual_tokens: int = field(default=None, metadata={\"help\": \"Number of virtual tokens\"})\n token_dim: int = field(\n default=None, metadata={\"help\": \"The hidden embedding dimension of the base transformer model\"}\n )\n num_transformer_submodules: Optional[int] = field(\n default=None, metadata={\"help\": \"Number of transformer submodules\"}\n )\n num_attention_heads: Optional[int] = field(default=None, metadata={\"help\": \"Number of attention heads\"})\n num_layers: Optional[int] = field(default=None, metadata={\"help\": \"Number of transformer layers\"})" }, { "identifier": "TaskType", "path": "src/MLoRA/peft/utils/config.py", "snippet": "class TaskType(str, enum.Enum):\n SEQ_CLS = \"SEQ_CLS\"\n SEQ_2_SEQ_LM = \"SEQ_2_SEQ_LM\"\n CAUSAL_LM = \"CAUSAL_LM\"\n TOKEN_CLS = \"TOKEN_CLS\"\n CAUSAL_LMS = \"CAUSAL_LMS\"" }, { "identifier": "TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING", "path": "src/MLoRA/peft/utils/other.py", "snippet": "TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING = {\n \"bloom\": bloom_model_postprocess_past_key_value,\n}" }, { "identifier": "WEIGHTS_NAME", "path": "src/MLoRA/peft/utils/other.py", "snippet": "WEIGHTS_NAME = \"adapter_model.bin\"" }, { "identifier": "_set_trainable", "path": "src/MLoRA/peft/utils/other.py", "snippet": "def _set_trainable(model, adapter_name):\n key_list = [key for key, _ in model.named_modules()]\n for key in key_list:\n target_module_found = any(key.endswith(target_key) for target_key in model.modules_to_save)\n if target_module_found:\n parent, target, target_name = _get_submodules(model, key)\n if isinstance(target, ModulesToSaveWrapper):\n target.update(adapter_name)\n else:\n for param in target.parameters():\n param.requires_grad = True\n setattr(parent, target_name, ModulesToSaveWrapper(target, adapter_name))" }, { "identifier": "shift_tokens_right", "path": "src/MLoRA/peft/utils/other.py", "snippet": "def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):\n \"\"\"\n Shift input ids one token to the right.\n\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): input ids\n pad_token_id (`int`): The id of the `padding` token.\n decoder_start_token_id (`int`): The id of the `start` token.\n \"\"\"\n shifted_input_ids = input_ids.new_zeros(input_ids.shape)\n shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()\n shifted_input_ids[:, 0] = decoder_start_token_id\n\n if pad_token_id is None:\n raise ValueError(\"self.model.config.pad_token_id has to be defined.\")\n # replace possible -100 values in labels by `pad_token_id`\n shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)\n\n return shifted_input_ids" }, { "identifier": "_set_adapter", "path": "src/MLoRA/peft/utils/other.py", "snippet": "def _set_adapter(model, adapter_name):\n for module in model.modules():\n if isinstance(module, ModulesToSaveWrapper):\n module.active_adapter = adapter_name" }, { "identifier": "get_peft_model_state_dict", "path": "src/MLoRA/peft/utils/save_and_load.py", "snippet": "def get_peft_model_state_dict(model, state_dict=None, adapter_name=\"default\"):\n \"\"\"\n Get the state dict of the Peft model.\n\n Args:\n model ([`PeftModel`]): The Peft model. When using torch.nn.DistributedDataParallel, DeepSpeed or FSDP,\n the model should be the underlying model/unwrapped model (i.e. model.module).\n state_dict (`dict`, *optional*, defaults to `None`):\n The state dict of the model. If not provided, the state dict of the model\n will be used.\n \"\"\"\n config = model.peft_config[adapter_name]\n if state_dict is None:\n state_dict = model.state_dict()\n if config.peft_type in (PeftType.LORA, PeftType.ADALORA,\n PeftType.MMOELORAS):\n # to_return = lora_state_dict(model, bias=model.peft_config.bias)\n # adapted from `https://github.com/microsoft/LoRA/blob/main/loralib/utils.py`\n # to be used directly with the state dict which is necessary when using DeepSpeed or FSDP\n bias = config.bias\n if bias == \"none\": # filter out all lora parameters\n to_return = {k: state_dict[k] for k in state_dict if \"lora_\" in k}\n elif bias == \"all\":\n to_return = {k: state_dict[k] for k in state_dict if \"lora_\" in k or \"bias\" in k}\n elif bias == \"lora_only\":\n to_return = {}\n for k in state_dict:\n if \"lora_\" in k:\n to_return[k] = state_dict[k]\n bias_name = k.split(\"lora_\")[0] + \"bias\"\n if bias_name in state_dict:\n to_return[bias_name] = state_dict[bias_name]\n else:\n raise NotImplementedError\n to_return = {k: v for k, v in to_return.items() if ((\"lora_\" in k and adapter_name in k) or (\"bias\" in k))}\n\n if config.peft_type == PeftType.ADALORA:\n rank_pattern = config.rank_pattern\n if rank_pattern is not None:\n rank_pattern = {k.replace(f\".{adapter_name}\", \"\"): v for k, v in rank_pattern.items()}\n config.rank_pattern = rank_pattern\n to_return = model.resize_state_dict_by_rank_pattern(rank_pattern, to_return, adapter_name)\n\n elif config.peft_type == PeftType.ADAPTION_PROMPT:\n to_return = {k: state_dict[k] for k in state_dict if k.split(\".\")[-1].startswith(\"adaption_\")}\n elif isinstance(config, PromptLearningConfig):\n to_return = {}\n if config.inference_mode:\n prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight\n else:\n prompt_embeddings = model.get_prompt_embedding_to_save(adapter_name)\n to_return[\"prompt_embeddings\"] = prompt_embeddings\n else:\n raise NotImplementedError\n if model.modules_to_save is not None:\n for key, value in state_dict.items():\n if any(f\"{module_name}.modules_to_save.{adapter_name}\" in key for module_name in model.modules_to_save):\n to_return[key.replace(\"modules_to_save.\", \"\")] = value\n\n to_return = {k.replace(f\".{adapter_name}\", \"\"): v for k, v in to_return.items()}\n return to_return" }, { "identifier": "set_peft_model_state_dict", "path": "src/MLoRA/peft/utils/save_and_load.py", "snippet": "def set_peft_model_state_dict(model, peft_model_state_dict, adapter_name=\"default\"):\n \"\"\"\n Set the state dict of the Peft model.\n\n Args:\n model ([`PeftModel`]): The Peft model.\n peft_model_state_dict (`dict`): The state dict of the Peft model.\n \"\"\"\n config = model.peft_config[adapter_name]\n state_dict = {}\n if model.modules_to_save is not None:\n for key, value in peft_model_state_dict.items():\n if any(module_name in key for module_name in model.modules_to_save):\n for module_name in model.modules_to_save:\n if module_name in key:\n key = key.replace(module_name, f\"{module_name}.modules_to_save.{adapter_name}\")\n break\n state_dict[key] = value\n else:\n state_dict = peft_model_state_dict\n\n if config.peft_type in (PeftType.LORA, PeftType.ADALORA,\n PeftType.MMOELORAS):\n peft_model_state_dict = {}\n for k, v in state_dict.items():\n if \"lora_\" in k:\n suffix = k.split(\"lora_\")[1]\n if \".\" in suffix:\n suffix_to_replace = \".\".join(suffix.split(\".\")[1:])\n k = k.replace(suffix_to_replace, f\"{adapter_name}.{suffix_to_replace}\")\n else:\n k = f\"{k}.{adapter_name}\"\n peft_model_state_dict[k] = v\n else:\n peft_model_state_dict[k] = v\n if config.peft_type == PeftType.ADALORA:\n rank_pattern = config.rank_pattern\n if rank_pattern is not None:\n model.resize_modules_by_rank_pattern(rank_pattern, adapter_name)\n elif isinstance(config, PromptLearningConfig) or config.peft_type == PeftType.ADAPTION_PROMPT:\n peft_model_state_dict = state_dict\n else:\n raise NotImplementedError\n\n model.load_state_dict(peft_model_state_dict, strict=False)\n if isinstance(config, PromptLearningConfig):\n model.prompt_encoder[adapter_name].embedding.load_state_dict(\n {\"weight\": peft_model_state_dict[\"prompt_embeddings\"]}, strict=True\n )" } ]
import inspect import os import warnings import torch import torch.nn as nn from contextlib import contextmanager from accelerate import dispatch_model, infer_auto_device_map from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules from accelerate.utils import get_balanced_memory from huggingface_hub import hf_hub_download from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers import PreTrainedModel from transformers.modeling_outputs import SequenceClassifierOutput, TokenClassifierOutput from transformers.utils import PushToHubMixin from .utils import PeftConfig from .shared import Gate, GateN from .tuners import ( AdaLoraModel, AdaptionPromptModel, LoraModel, PrefixEncoder, PromptEmbedding, PromptEncoder, MMOELoraModelS, ) from .utils import ( TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, PeftConfig, PeftType, PromptLearningConfig, TaskType, _set_adapter, _set_trainable, get_peft_model_state_dict, set_peft_model_state_dict, shift_tokens_right, ) from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING
14,482
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. PEFT_TYPE_TO_MODEL_MAPPING = { PeftType.LORA: LoraModel, PeftType.PROMPT_TUNING: PromptEmbedding, PeftType.P_TUNING: PromptEncoder, PeftType.PREFIX_TUNING: PrefixEncoder,
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. PEFT_TYPE_TO_MODEL_MAPPING = { PeftType.LORA: LoraModel, PeftType.PROMPT_TUNING: PromptEmbedding, PeftType.P_TUNING: PromptEncoder, PeftType.PREFIX_TUNING: PrefixEncoder,
PeftType.ADALORA: AdaLoraModel,
5
2023-10-19 10:55:50+00:00
24k
YuroFR/freqtrade-modded-crypto-trading-bot
tests/test_integration.py
[ { "identifier": "ExitCheckTuple", "path": "freqtrade/enums/exitchecktuple.py", "snippet": "class ExitCheckTuple:\n \"\"\"\n NamedTuple for Exit type + reason\n \"\"\"\n exit_type: ExitType\n exit_reason: str = ''\n\n def __init__(self, exit_type: ExitType, exit_reason: str = ''):\n self.exit_type = exit_type\n self.exit_reason = exit_reason or exit_type.value\n\n @property\n def exit_flag(self):\n return self.exit_type != ExitType.NONE\n\n def __eq__(self, other):\n return self.exit_type == other.exit_type and self.exit_reason == other.exit_reason\n\n def __repr__(self):\n return f\"ExitCheckTuple({self.exit_type}, {self.exit_reason})\"" }, { "identifier": "ExitType", "path": "freqtrade/enums/exittype.py", "snippet": "class ExitType(Enum):\n \"\"\"\n Enum to distinguish between exit reasons\n \"\"\"\n ROI = \"roi\"\n STOP_LOSS = \"stop_loss\"\n STOPLOSS_ON_EXCHANGE = \"stoploss_on_exchange\"\n TRAILING_STOP_LOSS = \"trailing_stop_loss\"\n LIQUIDATION = \"liquidation\"\n EXIT_SIGNAL = \"exit_signal\"\n FORCE_EXIT = \"force_exit\"\n EMERGENCY_EXIT = \"emergency_exit\"\n CUSTOM_EXIT = \"custom_exit\"\n PARTIAL_EXIT = \"partial_exit\"\n SOLD_ON_EXCHANGE = \"sold_on_exchange\"\n NONE = \"\"\n\n def __str__(self):\n # explicitly convert to String to help with exporting data.\n return self.value" }, { "identifier": "TradingMode", "path": "freqtrade/enums/tradingmode.py", "snippet": "class TradingMode(str, Enum):\n \"\"\"\n Enum to distinguish between\n spot, margin, futures or any other trading method\n \"\"\"\n SPOT = \"spot\"\n MARGIN = \"margin\"\n FUTURES = \"futures\"" }, { "identifier": "Trade", "path": "freqtrade/persistence/trade_model.py", "snippet": "class Trade(ModelBase, LocalTrade):\n \"\"\"\n Trade database model.\n Also handles updating and querying trades\n\n Note: Fields must be aligned with LocalTrade class\n \"\"\"\n __tablename__ = 'trades'\n session: ClassVar[SessionType]\n\n use_db: bool = True\n\n id: Mapped[int] = mapped_column(Integer, primary_key=True) # type: ignore\n\n orders: Mapped[List[Order]] = relationship(\n \"Order\", order_by=\"Order.id\", cascade=\"all, delete-orphan\", lazy=\"selectin\",\n innerjoin=True) # type: ignore\n\n exchange: Mapped[str] = mapped_column(String(25), nullable=False) # type: ignore\n pair: Mapped[str] = mapped_column(String(25), nullable=False, index=True) # type: ignore\n base_currency: Mapped[Optional[str]] = mapped_column(String(25), nullable=True) # type: ignore\n stake_currency: Mapped[Optional[str]] = mapped_column(String(25), nullable=True) # type: ignore\n is_open: Mapped[bool] = mapped_column(nullable=False, default=True, index=True) # type: ignore\n fee_open: Mapped[float] = mapped_column(Float(), nullable=False, default=0.0) # type: ignore\n fee_open_cost: Mapped[Optional[float]] = mapped_column(Float(), nullable=True) # type: ignore\n fee_open_currency: Mapped[Optional[str]] = mapped_column(\n String(25), nullable=True) # type: ignore\n fee_close: Mapped[Optional[float]] = mapped_column(\n Float(), nullable=False, default=0.0) # type: ignore\n fee_close_cost: Mapped[Optional[float]] = mapped_column(Float(), nullable=True) # type: ignore\n fee_close_currency: Mapped[Optional[str]] = mapped_column(\n String(25), nullable=True) # type: ignore\n open_rate: Mapped[float] = mapped_column(Float()) # type: ignore\n open_rate_requested: Mapped[Optional[float]] = mapped_column(\n Float(), nullable=True) # type: ignore\n # open_trade_value - calculated via _calc_open_trade_value\n open_trade_value: Mapped[float] = mapped_column(Float(), nullable=True) # type: ignore\n close_rate: Mapped[Optional[float]] = mapped_column(Float()) # type: ignore\n close_rate_requested: Mapped[Optional[float]] = mapped_column(Float()) # type: ignore\n realized_profit: Mapped[float] = mapped_column(\n Float(), default=0.0, nullable=True) # type: ignore\n close_profit: Mapped[Optional[float]] = mapped_column(Float()) # type: ignore\n close_profit_abs: Mapped[Optional[float]] = mapped_column(Float()) # type: ignore\n stake_amount: Mapped[float] = mapped_column(Float(), nullable=False) # type: ignore\n max_stake_amount: Mapped[Optional[float]] = mapped_column(Float()) # type: ignore\n amount: Mapped[float] = mapped_column(Float()) # type: ignore\n amount_requested: Mapped[Optional[float]] = mapped_column(Float()) # type: ignore\n open_date: Mapped[datetime] = mapped_column(\n nullable=False, default=datetime.utcnow) # type: ignore\n close_date: Mapped[Optional[datetime]] = mapped_column() # type: ignore\n # absolute value of the stop loss\n stop_loss: Mapped[float] = mapped_column(Float(), nullable=True, default=0.0) # type: ignore\n # percentage value of the stop loss\n stop_loss_pct: Mapped[Optional[float]] = mapped_column(Float(), nullable=True) # type: ignore\n # absolute value of the initial stop loss\n initial_stop_loss: Mapped[Optional[float]] = mapped_column(\n Float(), nullable=True, default=0.0) # type: ignore\n # percentage value of the initial stop loss\n initial_stop_loss_pct: Mapped[Optional[float]] = mapped_column(\n Float(), nullable=True) # type: ignore\n is_stop_loss_trailing: Mapped[bool] = mapped_column(\n nullable=False, default=False) # type: ignore\n # stoploss order id which is on exchange\n stoploss_order_id: Mapped[Optional[str]] = mapped_column(\n String(255), nullable=True, index=True) # type: ignore\n # last update time of the stoploss order on exchange\n stoploss_last_update: Mapped[Optional[datetime]] = mapped_column(nullable=True) # type: ignore\n # absolute value of the highest reached price\n max_rate: Mapped[Optional[float]] = mapped_column(\n Float(), nullable=True, default=0.0) # type: ignore\n # Lowest price reached\n min_rate: Mapped[Optional[float]] = mapped_column(Float(), nullable=True) # type: ignore\n exit_reason: Mapped[Optional[str]] = mapped_column(\n String(CUSTOM_TAG_MAX_LENGTH), nullable=True) # type: ignore\n exit_order_status: Mapped[Optional[str]] = mapped_column(\n String(100), nullable=True) # type: ignore\n strategy: Mapped[Optional[str]] = mapped_column(String(100), nullable=True) # type: ignore\n enter_tag: Mapped[Optional[str]] = mapped_column(\n String(CUSTOM_TAG_MAX_LENGTH), nullable=True) # type: ignore\n timeframe: Mapped[Optional[int]] = mapped_column(Integer, nullable=True) # type: ignore\n\n trading_mode: Mapped[TradingMode] = mapped_column(\n Enum(TradingMode), nullable=True) # type: ignore\n amount_precision: Mapped[Optional[float]] = mapped_column(\n Float(), nullable=True) # type: ignore\n price_precision: Mapped[Optional[float]] = mapped_column(Float(), nullable=True) # type: ignore\n precision_mode: Mapped[Optional[int]] = mapped_column(Integer, nullable=True) # type: ignore\n contract_size: Mapped[Optional[float]] = mapped_column(Float(), nullable=True) # type: ignore\n\n # Leverage trading properties\n leverage: Mapped[float] = mapped_column(Float(), nullable=True, default=1.0) # type: ignore\n is_short: Mapped[bool] = mapped_column(nullable=False, default=False) # type: ignore\n liquidation_price: Mapped[Optional[float]] = mapped_column(\n Float(), nullable=True) # type: ignore\n\n # Margin Trading Properties\n interest_rate: Mapped[float] = mapped_column(\n Float(), nullable=False, default=0.0) # type: ignore\n\n # Futures properties\n funding_fees: Mapped[Optional[float]] = mapped_column(\n Float(), nullable=True, default=None) # type: ignore\n funding_fee_running: Mapped[Optional[float]] = mapped_column(\n Float(), nullable=True, default=None) # type: ignore\n\n def __init__(self, **kwargs):\n from_json = kwargs.pop('__FROM_JSON', None)\n super().__init__(**kwargs)\n if not from_json:\n # Skip recalculation when loading from json\n self.realized_profit = 0\n self.recalc_open_trade_value()\n\n @validates('enter_tag', 'exit_reason')\n def validate_string_len(self, key, value):\n max_len = getattr(self.__class__, key).prop.columns[0].type.length\n if value and len(value) > max_len:\n return value[:max_len]\n return value\n\n def delete(self) -> None:\n\n for order in self.orders:\n Order.session.delete(order)\n\n Trade.session.delete(self)\n Trade.commit()\n\n @staticmethod\n def commit():\n Trade.session.commit()\n\n @staticmethod\n def rollback():\n Trade.session.rollback()\n\n @staticmethod\n def get_trades_proxy(*, pair: Optional[str] = None, is_open: Optional[bool] = None,\n open_date: Optional[datetime] = None,\n close_date: Optional[datetime] = None,\n ) -> List['LocalTrade']:\n \"\"\"\n Helper function to query Trades.j\n Returns a List of trades, filtered on the parameters given.\n In live mode, converts the filter to a database query and returns all rows\n In Backtest mode, uses filters on Trade.trades to get the result.\n\n :return: unsorted List[Trade]\n \"\"\"\n if Trade.use_db:\n trade_filter = []\n if pair:\n trade_filter.append(Trade.pair == pair)\n if open_date:\n trade_filter.append(Trade.open_date > open_date)\n if close_date:\n trade_filter.append(Trade.close_date > close_date)\n if is_open is not None:\n trade_filter.append(Trade.is_open.is_(is_open))\n return cast(List[LocalTrade], Trade.get_trades(trade_filter).all())\n else:\n return LocalTrade.get_trades_proxy(\n pair=pair, is_open=is_open,\n open_date=open_date,\n close_date=close_date\n )\n\n @staticmethod\n def get_trades_query(trade_filter=None, include_orders: bool = True) -> Select:\n \"\"\"\n Helper function to query Trades using filters.\n NOTE: Not supported in Backtesting.\n :param trade_filter: Optional filter to apply to trades\n Can be either a Filter object, or a List of filters\n e.g. `(trade_filter=[Trade.id == trade_id, Trade.is_open.is_(True),])`\n e.g. `(trade_filter=Trade.id == trade_id)`\n :return: unsorted query object\n \"\"\"\n if not Trade.use_db:\n raise NotImplementedError('`Trade.get_trades()` not supported in backtesting mode.')\n if trade_filter is not None:\n if not isinstance(trade_filter, list):\n trade_filter = [trade_filter]\n this_query = select(Trade).filter(*trade_filter)\n else:\n this_query = select(Trade)\n if not include_orders:\n # Don't load order relations\n # Consider using noload or raiseload instead of lazyload\n this_query = this_query.options(lazyload(Trade.orders))\n return this_query\n\n @staticmethod\n def get_trades(trade_filter=None, include_orders: bool = True) -> ScalarResult['Trade']:\n \"\"\"\n Helper function to query Trades using filters.\n NOTE: Not supported in Backtesting.\n :param trade_filter: Optional filter to apply to trades\n Can be either a Filter object, or a List of filters\n e.g. `(trade_filter=[Trade.id == trade_id, Trade.is_open.is_(True),])`\n e.g. `(trade_filter=Trade.id == trade_id)`\n :return: unsorted query object\n \"\"\"\n query = Trade.get_trades_query(trade_filter, include_orders)\n # this sholud remain split. if use_db is False, session is not available and the above will\n # raise an exception.\n return Trade.session.scalars(query)\n\n @staticmethod\n def get_open_trades_without_assigned_fees():\n \"\"\"\n Returns all open trades which don't have open fees set correctly\n NOTE: Not supported in Backtesting.\n \"\"\"\n return Trade.get_trades([Trade.fee_open_currency.is_(None),\n Trade.orders.any(),\n Trade.is_open.is_(True),\n ]).all()\n\n @staticmethod\n def get_closed_trades_without_assigned_fees():\n \"\"\"\n Returns all closed trades which don't have fees set correctly\n NOTE: Not supported in Backtesting.\n \"\"\"\n return Trade.get_trades([Trade.fee_close_currency.is_(None),\n Trade.orders.any(),\n Trade.is_open.is_(False),\n ]).all()\n\n @staticmethod\n def get_total_closed_profit() -> float:\n \"\"\"\n Retrieves total realized profit\n \"\"\"\n if Trade.use_db:\n total_profit: float = Trade.session.execute(\n select(func.sum(Trade.close_profit_abs)).filter(Trade.is_open.is_(False))\n ).scalar_one()\n else:\n total_profit = sum(t.close_profit_abs # type: ignore\n for t in LocalTrade.get_trades_proxy(is_open=False))\n return total_profit or 0\n\n @staticmethod\n def total_open_trades_stakes() -> float:\n \"\"\"\n Calculates total invested amount in open trades\n in stake currency\n \"\"\"\n if Trade.use_db:\n total_open_stake_amount = Trade.session.scalar(\n select(func.sum(Trade.stake_amount)).filter(Trade.is_open.is_(True))\n )\n else:\n total_open_stake_amount = sum(\n t.stake_amount for t in LocalTrade.get_trades_proxy(is_open=True))\n return total_open_stake_amount or 0\n\n @staticmethod\n def get_overall_performance(minutes=None) -> List[Dict[str, Any]]:\n \"\"\"\n Returns List of dicts containing all Trades, including profit and trade count\n NOTE: Not supported in Backtesting.\n \"\"\"\n filters: List = [Trade.is_open.is_(False)]\n if minutes:\n start_date = datetime.now(timezone.utc) - timedelta(minutes=minutes)\n filters.append(Trade.close_date >= start_date)\n\n pair_rates = Trade.session.execute(\n select(\n Trade.pair,\n func.sum(Trade.close_profit).label('profit_sum'),\n func.sum(Trade.close_profit_abs).label('profit_sum_abs'),\n func.count(Trade.pair).label('count')\n ).filter(*filters)\n .group_by(Trade.pair)\n .order_by(desc('profit_sum_abs'))\n ).all()\n\n return [\n {\n 'pair': pair,\n 'profit_ratio': profit,\n 'profit': round(profit * 100, 2), # Compatibility mode\n 'profit_pct': round(profit * 100, 2),\n 'profit_abs': profit_abs,\n 'count': count\n }\n for pair, profit, profit_abs, count in pair_rates\n ]\n\n @staticmethod\n def get_enter_tag_performance(pair: Optional[str]) -> List[Dict[str, Any]]:\n \"\"\"\n Returns List of dicts containing all Trades, based on buy tag performance\n Can either be average for all pairs or a specific pair provided\n NOTE: Not supported in Backtesting.\n \"\"\"\n\n filters: List = [Trade.is_open.is_(False)]\n if (pair is not None):\n filters.append(Trade.pair == pair)\n\n enter_tag_perf = Trade.session.execute(\n select(\n Trade.enter_tag,\n func.sum(Trade.close_profit).label('profit_sum'),\n func.sum(Trade.close_profit_abs).label('profit_sum_abs'),\n func.count(Trade.pair).label('count')\n ).filter(*filters)\n .group_by(Trade.enter_tag)\n .order_by(desc('profit_sum_abs'))\n ).all()\n\n return [\n {\n 'enter_tag': enter_tag if enter_tag is not None else \"Other\",\n 'profit_ratio': profit,\n 'profit_pct': round(profit * 100, 2),\n 'profit_abs': profit_abs,\n 'count': count\n }\n for enter_tag, profit, profit_abs, count in enter_tag_perf\n ]\n\n @staticmethod\n def get_exit_reason_performance(pair: Optional[str]) -> List[Dict[str, Any]]:\n \"\"\"\n Returns List of dicts containing all Trades, based on exit reason performance\n Can either be average for all pairs or a specific pair provided\n NOTE: Not supported in Backtesting.\n \"\"\"\n\n filters: List = [Trade.is_open.is_(False)]\n if (pair is not None):\n filters.append(Trade.pair == pair)\n sell_tag_perf = Trade.session.execute(\n select(\n Trade.exit_reason,\n func.sum(Trade.close_profit).label('profit_sum'),\n func.sum(Trade.close_profit_abs).label('profit_sum_abs'),\n func.count(Trade.pair).label('count')\n ).filter(*filters)\n .group_by(Trade.exit_reason)\n .order_by(desc('profit_sum_abs'))\n ).all()\n\n return [\n {\n 'exit_reason': exit_reason if exit_reason is not None else \"Other\",\n 'profit_ratio': profit,\n 'profit_pct': round(profit * 100, 2),\n 'profit_abs': profit_abs,\n 'count': count\n }\n for exit_reason, profit, profit_abs, count in sell_tag_perf\n ]\n\n @staticmethod\n def get_mix_tag_performance(pair: Optional[str]) -> List[Dict[str, Any]]:\n \"\"\"\n Returns List of dicts containing all Trades, based on entry_tag + exit_reason performance\n Can either be average for all pairs or a specific pair provided\n NOTE: Not supported in Backtesting.\n \"\"\"\n\n filters: List = [Trade.is_open.is_(False)]\n if (pair is not None):\n filters.append(Trade.pair == pair)\n mix_tag_perf = Trade.session.execute(\n select(\n Trade.id,\n Trade.enter_tag,\n Trade.exit_reason,\n func.sum(Trade.close_profit).label('profit_sum'),\n func.sum(Trade.close_profit_abs).label('profit_sum_abs'),\n func.count(Trade.pair).label('count')\n ).filter(*filters)\n .group_by(Trade.id)\n .order_by(desc('profit_sum_abs'))\n ).all()\n\n return_list: List[Dict] = []\n for id, enter_tag, exit_reason, profit, profit_abs, count in mix_tag_perf:\n enter_tag = enter_tag if enter_tag is not None else \"Other\"\n exit_reason = exit_reason if exit_reason is not None else \"Other\"\n\n if (exit_reason is not None and enter_tag is not None):\n mix_tag = enter_tag + \" \" + exit_reason\n i = 0\n if not any(item[\"mix_tag\"] == mix_tag for item in return_list):\n return_list.append({'mix_tag': mix_tag,\n 'profit': profit,\n 'profit_pct': round(profit * 100, 2),\n 'profit_abs': profit_abs,\n 'count': count})\n else:\n while i < len(return_list):\n if return_list[i][\"mix_tag\"] == mix_tag:\n return_list[i] = {\n 'mix_tag': mix_tag,\n 'profit': profit + return_list[i][\"profit\"],\n 'profit_pct': round(profit + return_list[i][\"profit\"] * 100, 2),\n 'profit_abs': profit_abs + return_list[i][\"profit_abs\"],\n 'count': 1 + return_list[i][\"count\"]}\n i += 1\n\n return return_list\n\n @staticmethod\n def get_best_pair(start_date: datetime = datetime.fromtimestamp(0)):\n \"\"\"\n Get best pair with closed trade.\n NOTE: Not supported in Backtesting.\n :returns: Tuple containing (pair, profit_sum)\n \"\"\"\n best_pair = Trade.session.execute(\n select(\n Trade.pair,\n func.sum(Trade.close_profit).label('profit_sum')\n ).filter(Trade.is_open.is_(False) & (Trade.close_date >= start_date))\n .group_by(Trade.pair)\n .order_by(desc('profit_sum'))\n ).first()\n\n return best_pair\n\n @staticmethod\n def get_trading_volume(start_date: datetime = datetime.fromtimestamp(0)) -> float:\n \"\"\"\n Get Trade volume based on Orders\n NOTE: Not supported in Backtesting.\n :returns: Tuple containing (pair, profit_sum)\n \"\"\"\n trading_volume = Trade.session.execute(\n select(\n func.sum(Order.cost).label('volume')\n ).filter(\n Order.order_filled_date >= start_date,\n Order.status == 'closed'\n )).scalar_one()\n return trading_volume" }, { "identifier": "Order", "path": "freqtrade/persistence/models.py", "snippet": "REQUEST_ID_CTX_KEY: Final[str] = 'request_id'\n_SQL_DOCS_URL = 'http://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls'\ndef get_request_or_thread_id() -> Optional[str]:\ndef init_db(db_url: str) -> None:" }, { "identifier": "RPC", "path": "freqtrade/rpc/rpc.py", "snippet": "class RPC:\n \"\"\"\n RPC class can be used to have extra feature, like bot data, and access to DB data\n \"\"\"\n # Bind _fiat_converter if needed\n _fiat_converter: Optional[CryptoToFiatConverter] = None\n\n def __init__(self, freqtrade) -> None:\n \"\"\"\n Initializes all enabled rpc modules\n :param freqtrade: Instance of a freqtrade bot\n :return: None\n \"\"\"\n self._freqtrade = freqtrade\n self._config: Config = freqtrade.config\n if self._config.get('fiat_display_currency'):\n self._fiat_converter = CryptoToFiatConverter()\n\n @staticmethod\n def _rpc_show_config(config, botstate: Union[State, str],\n strategy_version: Optional[str] = None) -> Dict[str, Any]:\n \"\"\"\n Return a dict of config options.\n Explicitly does NOT return the full config to avoid leakage of sensitive\n information via rpc.\n \"\"\"\n val = {\n 'version': __version__,\n 'strategy_version': strategy_version,\n 'dry_run': config['dry_run'],\n 'trading_mode': config.get('trading_mode', 'spot'),\n 'short_allowed': config.get('trading_mode', 'spot') != 'spot',\n 'stake_currency': config['stake_currency'],\n 'stake_currency_decimals': decimals_per_coin(config['stake_currency']),\n 'stake_amount': str(config['stake_amount']),\n 'available_capital': config.get('available_capital'),\n 'max_open_trades': (config['max_open_trades']\n if config['max_open_trades'] != float('inf') else -1),\n 'minimal_roi': config['minimal_roi'].copy() if 'minimal_roi' in config else {},\n 'stoploss': config.get('stoploss'),\n 'stoploss_on_exchange': config.get('order_types',\n {}).get('stoploss_on_exchange', False),\n 'trailing_stop': config.get('trailing_stop'),\n 'trailing_stop_positive': config.get('trailing_stop_positive'),\n 'trailing_stop_positive_offset': config.get('trailing_stop_positive_offset'),\n 'trailing_only_offset_is_reached': config.get('trailing_only_offset_is_reached'),\n 'unfilledtimeout': config.get('unfilledtimeout'),\n 'use_custom_stoploss': config.get('use_custom_stoploss'),\n 'order_types': config.get('order_types'),\n 'bot_name': config.get('bot_name', 'freqtrade'),\n 'timeframe': config.get('timeframe'),\n 'timeframe_ms': timeframe_to_msecs(config['timeframe']\n ) if 'timeframe' in config else 0,\n 'timeframe_min': timeframe_to_minutes(config['timeframe']\n ) if 'timeframe' in config else 0,\n 'exchange': config['exchange']['name'],\n 'strategy': config['strategy'],\n 'force_entry_enable': config.get('force_entry_enable', False),\n 'exit_pricing': config.get('exit_pricing', {}),\n 'entry_pricing': config.get('entry_pricing', {}),\n 'state': str(botstate),\n 'runmode': config['runmode'].value,\n 'position_adjustment_enable': config.get('position_adjustment_enable', False),\n 'max_entry_position_adjustment': (\n config.get('max_entry_position_adjustment', -1)\n if config.get('max_entry_position_adjustment') != float('inf')\n else -1)\n }\n return val\n\n def _rpc_trade_status(self, trade_ids: List[int] = []) -> List[Dict[str, Any]]:\n \"\"\"\n Below follows the RPC backend it is prefixed with rpc_ to raise awareness that it is\n a remotely exposed function\n \"\"\"\n # Fetch open trades\n if trade_ids:\n trades: Sequence[Trade] = Trade.get_trades(trade_filter=Trade.id.in_(trade_ids)).all()\n else:\n trades = Trade.get_open_trades()\n\n if not trades:\n raise RPCException('no active trade')\n else:\n results = []\n for trade in trades:\n current_profit_fiat: Optional[float] = None\n total_profit_fiat: Optional[float] = None\n\n # prepare open orders details\n oo_details: Optional[str] = \"\"\n oo_details_lst = [\n f'({oo.order_type} {oo.side} rem={oo.safe_remaining:.8f})'\n for oo in trade.open_orders\n if oo.ft_order_side not in ['stoploss']\n ]\n oo_details = ', '.join(oo_details_lst)\n\n total_profit_abs = 0.0\n total_profit_ratio: Optional[float] = None\n # calculate profit and send message to user\n if trade.is_open:\n try:\n current_rate = self._freqtrade.exchange.get_rate(\n trade.pair, side='exit', is_short=trade.is_short, refresh=False)\n except (ExchangeError, PricingError):\n current_rate = NAN\n if len(trade.select_filled_orders(trade.entry_side)) > 0:\n\n current_profit = current_profit_abs = current_profit_fiat = NAN\n if not isnan(current_rate):\n prof = trade.calculate_profit(current_rate)\n current_profit = prof.profit_ratio\n current_profit_abs = prof.profit_abs\n total_profit_abs = prof.total_profit\n total_profit_ratio = prof.total_profit_ratio\n else:\n current_profit = current_profit_abs = current_profit_fiat = 0.0\n\n else:\n # Closed trade ...\n current_rate = trade.close_rate\n current_profit = trade.close_profit or 0.0\n current_profit_abs = trade.close_profit_abs or 0.0\n\n # Calculate fiat profit\n if not isnan(current_profit_abs) and self._fiat_converter:\n current_profit_fiat = self._fiat_converter.convert_amount(\n current_profit_abs,\n self._freqtrade.config['stake_currency'],\n self._freqtrade.config['fiat_display_currency']\n )\n total_profit_fiat = self._fiat_converter.convert_amount(\n total_profit_abs,\n self._freqtrade.config['stake_currency'],\n self._freqtrade.config['fiat_display_currency']\n )\n\n # Calculate guaranteed profit (in case of trailing stop)\n stop_entry = trade.calculate_profit(trade.stop_loss)\n\n stoploss_entry_dist = stop_entry.profit_abs\n stoploss_entry_dist_ratio = stop_entry.profit_ratio\n\n # calculate distance to stoploss\n stoploss_current_dist = trade.stop_loss - current_rate\n stoploss_current_dist_ratio = stoploss_current_dist / current_rate\n\n trade_dict = trade.to_json()\n trade_dict.update(dict(\n close_profit=trade.close_profit if not trade.is_open else None,\n current_rate=current_rate,\n profit_ratio=current_profit,\n profit_pct=round(current_profit * 100, 2),\n profit_abs=current_profit_abs,\n profit_fiat=current_profit_fiat,\n total_profit_abs=total_profit_abs,\n total_profit_fiat=total_profit_fiat,\n total_profit_ratio=total_profit_ratio,\n stoploss_current_dist=stoploss_current_dist,\n stoploss_current_dist_ratio=round(stoploss_current_dist_ratio, 8),\n stoploss_current_dist_pct=round(stoploss_current_dist_ratio * 100, 2),\n stoploss_entry_dist=stoploss_entry_dist,\n stoploss_entry_dist_ratio=round(stoploss_entry_dist_ratio, 8),\n open_orders=oo_details\n ))\n results.append(trade_dict)\n return results\n\n def _rpc_status_table(self, stake_currency: str,\n fiat_display_currency: str) -> Tuple[List, List, float]:\n trades: List[Trade] = Trade.get_open_trades()\n nonspot = self._config.get('trading_mode', TradingMode.SPOT) != TradingMode.SPOT\n if not trades:\n raise RPCException('no active trade')\n else:\n trades_list = []\n fiat_profit_sum = NAN\n for trade in trades:\n # calculate profit and send message to user\n try:\n current_rate = self._freqtrade.exchange.get_rate(\n trade.pair, side='exit', is_short=trade.is_short, refresh=False)\n except (PricingError, ExchangeError):\n current_rate = NAN\n trade_profit = NAN\n profit_str = f'{NAN:.2%}'\n else:\n if trade.nr_of_successful_entries > 0:\n profit = trade.calculate_profit(current_rate)\n trade_profit = profit.profit_abs\n profit_str = f'{profit.profit_ratio:.2%}'\n else:\n trade_profit = 0.0\n profit_str = f'{0.0:.2f}'\n direction_str = ('S' if trade.is_short else 'L') if nonspot else ''\n if self._fiat_converter:\n fiat_profit = self._fiat_converter.convert_amount(\n trade_profit,\n stake_currency,\n fiat_display_currency\n )\n if not isnan(fiat_profit):\n profit_str += f\" ({fiat_profit:.2f})\"\n fiat_profit_sum = fiat_profit if isnan(fiat_profit_sum) \\\n else fiat_profit_sum + fiat_profit\n\n active_attempt_side_symbols = [\n '*' if (oo and oo.ft_order_side == trade.entry_side) else '**'\n for oo in trade.open_orders\n ]\n\n # exemple: '*.**.**' trying to enter, exit and exit with 3 different orders\n active_attempt_side_symbols_str = '.'.join(active_attempt_side_symbols)\n\n detail_trade = [\n f'{trade.id} {direction_str}',\n trade.pair + active_attempt_side_symbols_str,\n shorten_date(dt_humanize(trade.open_date, only_distance=True)),\n profit_str\n ]\n\n if self._config.get('position_adjustment_enable', False):\n max_entry_str = ''\n if self._config.get('max_entry_position_adjustment', -1) > 0:\n max_entry_str = f\"/{self._config['max_entry_position_adjustment'] + 1}\"\n filled_entries = trade.nr_of_successful_entries\n detail_trade.append(f\"{filled_entries}{max_entry_str}\")\n trades_list.append(detail_trade)\n profitcol = \"Profit\"\n if self._fiat_converter:\n profitcol += \" (\" + fiat_display_currency + \")\"\n\n columns = [\n 'ID L/S' if nonspot else 'ID',\n 'Pair',\n 'Since',\n profitcol]\n if self._config.get('position_adjustment_enable', False):\n columns.append('# Entries')\n return trades_list, columns, fiat_profit_sum\n\n def _rpc_timeunit_profit(\n self, timescale: int,\n stake_currency: str, fiat_display_currency: str,\n timeunit: str = 'days') -> Dict[str, Any]:\n \"\"\"\n :param timeunit: Valid entries are 'days', 'weeks', 'months'\n \"\"\"\n start_date = datetime.now(timezone.utc).date()\n if timeunit == 'weeks':\n # weekly\n start_date = start_date - timedelta(days=start_date.weekday()) # Monday\n if timeunit == 'months':\n start_date = start_date.replace(day=1)\n\n def time_offset(step: int):\n if timeunit == 'months':\n return relativedelta(months=step)\n return timedelta(**{timeunit: step})\n\n if not (isinstance(timescale, int) and timescale > 0):\n raise RPCException('timescale must be an integer greater than 0')\n\n profit_units: Dict[date, Dict] = {}\n daily_stake = self._freqtrade.wallets.get_total_stake_amount()\n\n for day in range(0, timescale):\n profitday = start_date - time_offset(day)\n # Only query for necessary columns for performance reasons.\n trades = Trade.session.execute(\n select(Trade.close_profit_abs)\n .filter(Trade.is_open.is_(False),\n Trade.close_date >= profitday,\n Trade.close_date < (profitday + time_offset(1)))\n .order_by(Trade.close_date)\n ).all()\n\n curdayprofit = sum(\n trade.close_profit_abs for trade in trades if trade.close_profit_abs is not None)\n # Calculate this periods starting balance\n daily_stake = daily_stake - curdayprofit\n profit_units[profitday] = {\n 'amount': curdayprofit,\n 'daily_stake': daily_stake,\n 'rel_profit': round(curdayprofit / daily_stake, 8) if daily_stake > 0 else 0,\n 'trades': len(trades),\n }\n\n data = [\n {\n 'date': key,\n 'abs_profit': value[\"amount\"],\n 'starting_balance': value[\"daily_stake\"],\n 'rel_profit': value[\"rel_profit\"],\n 'fiat_value': self._fiat_converter.convert_amount(\n value['amount'],\n stake_currency,\n fiat_display_currency\n ) if self._fiat_converter else 0,\n 'trade_count': value[\"trades\"],\n }\n for key, value in profit_units.items()\n ]\n return {\n 'stake_currency': stake_currency,\n 'fiat_display_currency': fiat_display_currency,\n 'data': data\n }\n\n def _rpc_trade_history(self, limit: int, offset: int = 0, order_by_id: bool = False) -> Dict:\n \"\"\" Returns the X last trades \"\"\"\n order_by: Any = Trade.id if order_by_id else Trade.close_date.desc()\n if limit:\n trades = Trade.session.scalars(\n Trade.get_trades_query([Trade.is_open.is_(False)])\n .order_by(order_by)\n .limit(limit)\n .offset(offset))\n else:\n trades = Trade.session.scalars(\n Trade.get_trades_query([Trade.is_open.is_(False)])\n .order_by(Trade.close_date.desc()))\n\n output = [trade.to_json() for trade in trades]\n total_trades = Trade.session.scalar(\n select(func.count(Trade.id)).filter(Trade.is_open.is_(False)))\n\n return {\n \"trades\": output,\n \"trades_count\": len(output),\n \"offset\": offset,\n \"total_trades\": total_trades,\n }\n\n def _rpc_stats(self) -> Dict[str, Any]:\n \"\"\"\n Generate generic stats for trades in database\n \"\"\"\n def trade_win_loss(trade):\n if trade.close_profit > 0:\n return 'wins'\n elif trade.close_profit < 0:\n return 'losses'\n else:\n return 'draws'\n trades = Trade.get_trades([Trade.is_open.is_(False)], include_orders=False)\n # Duration\n dur: Dict[str, List[float]] = {'wins': [], 'draws': [], 'losses': []}\n # Exit reason\n exit_reasons = {}\n for trade in trades:\n if trade.exit_reason not in exit_reasons:\n exit_reasons[trade.exit_reason] = {'wins': 0, 'losses': 0, 'draws': 0}\n exit_reasons[trade.exit_reason][trade_win_loss(trade)] += 1\n\n if trade.close_date is not None and trade.open_date is not None:\n trade_dur = (trade.close_date - trade.open_date).total_seconds()\n dur[trade_win_loss(trade)].append(trade_dur)\n\n wins_dur = sum(dur['wins']) / len(dur['wins']) if len(dur['wins']) > 0 else None\n draws_dur = sum(dur['draws']) / len(dur['draws']) if len(dur['draws']) > 0 else None\n losses_dur = sum(dur['losses']) / len(dur['losses']) if len(dur['losses']) > 0 else None\n\n durations = {'wins': wins_dur, 'draws': draws_dur, 'losses': losses_dur}\n return {'exit_reasons': exit_reasons, 'durations': durations}\n\n def _rpc_trade_statistics(\n self, stake_currency: str, fiat_display_currency: str,\n start_date: datetime = datetime.fromtimestamp(0)) -> Dict[str, Any]:\n \"\"\" Returns cumulative profit statistics \"\"\"\n trade_filter = ((Trade.is_open.is_(False) & (Trade.close_date >= start_date)) |\n Trade.is_open.is_(True))\n trades: Sequence[Trade] = Trade.session.scalars(Trade.get_trades_query(\n trade_filter, include_orders=False).order_by(Trade.id)).all()\n\n profit_all_coin = []\n profit_all_ratio = []\n profit_closed_coin = []\n profit_closed_ratio = []\n durations = []\n winning_trades = 0\n losing_trades = 0\n winning_profit = 0.0\n losing_profit = 0.0\n\n for trade in trades:\n current_rate: float = 0.0\n\n if trade.close_date:\n durations.append((trade.close_date - trade.open_date).total_seconds())\n\n if not trade.is_open:\n profit_ratio = trade.close_profit or 0.0\n profit_abs = trade.close_profit_abs or 0.0\n profit_closed_coin.append(profit_abs)\n profit_closed_ratio.append(profit_ratio)\n if profit_ratio >= 0:\n winning_trades += 1\n winning_profit += profit_abs\n else:\n losing_trades += 1\n losing_profit += profit_abs\n else:\n # Get current rate\n try:\n current_rate = self._freqtrade.exchange.get_rate(\n trade.pair, side='exit', is_short=trade.is_short, refresh=False)\n except (PricingError, ExchangeError):\n current_rate = NAN\n if isnan(current_rate):\n profit_ratio = NAN\n profit_abs = NAN\n else:\n profit = trade.calculate_profit(trade.close_rate or current_rate)\n\n profit_ratio = profit.profit_ratio\n profit_abs = profit.total_profit\n\n profit_all_coin.append(profit_abs)\n profit_all_ratio.append(profit_ratio)\n\n closed_trade_count = len([t for t in trades if not t.is_open])\n\n best_pair = Trade.get_best_pair(start_date)\n trading_volume = Trade.get_trading_volume(start_date)\n\n # Prepare data to display\n profit_closed_coin_sum = round(sum(profit_closed_coin), 8)\n profit_closed_ratio_mean = float(mean(profit_closed_ratio) if profit_closed_ratio else 0.0)\n profit_closed_ratio_sum = sum(profit_closed_ratio) if profit_closed_ratio else 0.0\n\n profit_closed_fiat = self._fiat_converter.convert_amount(\n profit_closed_coin_sum,\n stake_currency,\n fiat_display_currency\n ) if self._fiat_converter else 0\n\n profit_all_coin_sum = round(sum(profit_all_coin), 8)\n profit_all_ratio_mean = float(mean(profit_all_ratio) if profit_all_ratio else 0.0)\n # Doing the sum is not right - overall profit needs to be based on initial capital\n profit_all_ratio_sum = sum(profit_all_ratio) if profit_all_ratio else 0.0\n starting_balance = self._freqtrade.wallets.get_starting_balance()\n profit_closed_ratio_fromstart = 0\n profit_all_ratio_fromstart = 0\n if starting_balance:\n profit_closed_ratio_fromstart = profit_closed_coin_sum / starting_balance\n profit_all_ratio_fromstart = profit_all_coin_sum / starting_balance\n\n profit_factor = winning_profit / abs(losing_profit) if losing_profit else float('inf')\n\n winrate = (winning_trades / closed_trade_count) if closed_trade_count > 0 else 0\n\n trades_df = DataFrame([{'close_date': format_date(trade.close_date),\n 'close_date_dt': trade.close_date,\n 'profit_abs': trade.close_profit_abs}\n for trade in trades if not trade.is_open and trade.close_date])\n\n expectancy, expectancy_ratio = calculate_expectancy(trades_df)\n\n max_drawdown_abs = 0.0\n max_drawdown = 0.0\n drawdown_start: Optional[datetime] = None\n drawdown_end: Optional[datetime] = None\n dd_high_val = dd_low_val = 0.0\n if len(trades_df) > 0:\n try:\n (max_drawdown_abs, drawdown_start, drawdown_end, dd_high_val, dd_low_val,\n max_drawdown) = calculate_max_drawdown(\n trades_df, value_col='profit_abs', date_col='close_date_dt',\n starting_balance=starting_balance)\n except ValueError:\n # ValueError if no losing trade.\n pass\n\n profit_all_fiat = self._fiat_converter.convert_amount(\n profit_all_coin_sum,\n stake_currency,\n fiat_display_currency\n ) if self._fiat_converter else 0\n\n first_date = trades[0].open_date_utc if trades else None\n last_date = trades[-1].open_date_utc if trades else None\n num = float(len(durations) or 1)\n bot_start = KeyValueStore.get_datetime_value(KeyStoreKeys.BOT_START_TIME)\n return {\n 'profit_closed_coin': profit_closed_coin_sum,\n 'profit_closed_percent_mean': round(profit_closed_ratio_mean * 100, 2),\n 'profit_closed_ratio_mean': profit_closed_ratio_mean,\n 'profit_closed_percent_sum': round(profit_closed_ratio_sum * 100, 2),\n 'profit_closed_ratio_sum': profit_closed_ratio_sum,\n 'profit_closed_ratio': profit_closed_ratio_fromstart,\n 'profit_closed_percent': round(profit_closed_ratio_fromstart * 100, 2),\n 'profit_closed_fiat': profit_closed_fiat,\n 'profit_all_coin': profit_all_coin_sum,\n 'profit_all_percent_mean': round(profit_all_ratio_mean * 100, 2),\n 'profit_all_ratio_mean': profit_all_ratio_mean,\n 'profit_all_percent_sum': round(profit_all_ratio_sum * 100, 2),\n 'profit_all_ratio_sum': profit_all_ratio_sum,\n 'profit_all_ratio': profit_all_ratio_fromstart,\n 'profit_all_percent': round(profit_all_ratio_fromstart * 100, 2),\n 'profit_all_fiat': profit_all_fiat,\n 'trade_count': len(trades),\n 'closed_trade_count': closed_trade_count,\n 'first_trade_date': format_date(first_date),\n 'first_trade_humanized': dt_humanize(first_date) if first_date else '',\n 'first_trade_timestamp': dt_ts_def(first_date, 0),\n 'latest_trade_date': format_date(last_date),\n 'latest_trade_humanized': dt_humanize(last_date) if last_date else '',\n 'latest_trade_timestamp': dt_ts_def(last_date, 0),\n 'avg_duration': str(timedelta(seconds=sum(durations) / num)).split('.')[0],\n 'best_pair': best_pair[0] if best_pair else '',\n 'best_rate': round(best_pair[1] * 100, 2) if best_pair else 0, # Deprecated\n 'best_pair_profit_ratio': best_pair[1] if best_pair else 0,\n 'winning_trades': winning_trades,\n 'losing_trades': losing_trades,\n 'profit_factor': profit_factor,\n 'winrate': winrate,\n 'expectancy': expectancy,\n 'expectancy_ratio': expectancy_ratio,\n 'max_drawdown': max_drawdown,\n 'max_drawdown_abs': max_drawdown_abs,\n 'max_drawdown_start': format_date(drawdown_start),\n 'max_drawdown_start_timestamp': dt_ts_def(drawdown_start),\n 'max_drawdown_end': format_date(drawdown_end),\n 'max_drawdown_end_timestamp': dt_ts_def(drawdown_end),\n 'drawdown_high': dd_high_val,\n 'drawdown_low': dd_low_val,\n 'trading_volume': trading_volume,\n 'bot_start_timestamp': dt_ts_def(bot_start, 0),\n 'bot_start_date': format_date(bot_start),\n }\n\n def __balance_get_est_stake(\n self, coin: str, stake_currency: str, amount: float,\n balance: Wallet, tickers) -> Tuple[float, float]:\n est_stake = 0.0\n est_bot_stake = 0.0\n if coin == stake_currency:\n est_stake = balance.total\n if self._config.get('trading_mode', TradingMode.SPOT) != TradingMode.SPOT:\n # in Futures, \"total\" includes the locked stake, and therefore all positions\n est_stake = balance.free\n est_bot_stake = amount\n else:\n pair = self._freqtrade.exchange.get_valid_pair_combination(coin, stake_currency)\n rate: Optional[float] = tickers.get(pair, {}).get('last', None)\n if rate:\n if pair.startswith(stake_currency) and not pair.endswith(stake_currency):\n rate = 1.0 / rate\n est_stake = rate * balance.total\n est_bot_stake = rate * amount\n\n return est_stake, est_bot_stake\n\n def _rpc_balance(self, stake_currency: str, fiat_display_currency: str) -> Dict:\n \"\"\" Returns current account balance per crypto \"\"\"\n currencies: List[Dict] = []\n total = 0.0\n total_bot = 0.0\n try:\n tickers: Tickers = self._freqtrade.exchange.get_tickers(cached=True)\n except (ExchangeError):\n raise RPCException('Error getting current tickers.')\n\n open_trades: List[Trade] = Trade.get_open_trades()\n open_assets: Dict[str, Trade] = {t.safe_base_currency: t for t in open_trades}\n self._freqtrade.wallets.update(require_update=False)\n starting_capital = self._freqtrade.wallets.get_starting_balance()\n starting_cap_fiat = self._fiat_converter.convert_amount(\n starting_capital, stake_currency, fiat_display_currency) if self._fiat_converter else 0\n coin: str\n balance: Wallet\n for coin, balance in self._freqtrade.wallets.get_all_balances().items():\n if not balance.total:\n continue\n\n trade = open_assets.get(coin, None)\n is_bot_managed = coin == stake_currency or trade is not None\n trade_amount = trade.amount if trade else 0\n if coin == stake_currency:\n trade_amount = self._freqtrade.wallets.get_available_stake_amount()\n\n try:\n est_stake, est_stake_bot = self.__balance_get_est_stake(\n coin, stake_currency, trade_amount, balance, tickers)\n except ValueError:\n continue\n\n total += est_stake\n\n if is_bot_managed:\n total_bot += est_stake_bot\n currencies.append({\n 'currency': coin,\n 'free': balance.free,\n 'balance': balance.total,\n 'used': balance.used,\n 'bot_owned': trade_amount,\n 'est_stake': est_stake or 0,\n 'est_stake_bot': est_stake_bot if is_bot_managed else 0,\n 'stake': stake_currency,\n 'side': 'long',\n 'leverage': 1,\n 'position': 0,\n 'is_bot_managed': is_bot_managed,\n 'is_position': False,\n })\n symbol: str\n position: PositionWallet\n for symbol, position in self._freqtrade.wallets.get_all_positions().items():\n total += position.collateral\n total_bot += position.collateral\n\n currencies.append({\n 'currency': symbol,\n 'free': 0,\n 'balance': 0,\n 'used': 0,\n 'position': position.position,\n 'est_stake': position.collateral,\n 'est_stake_bot': position.collateral,\n 'stake': stake_currency,\n 'leverage': position.leverage,\n 'side': position.side,\n 'is_bot_managed': True,\n 'is_position': True\n })\n\n value = self._fiat_converter.convert_amount(\n total, stake_currency, fiat_display_currency) if self._fiat_converter else 0\n value_bot = self._fiat_converter.convert_amount(\n total_bot, stake_currency, fiat_display_currency) if self._fiat_converter else 0\n\n trade_count = len(Trade.get_trades_proxy())\n starting_capital_ratio = (total_bot / starting_capital) - 1 if starting_capital else 0.0\n starting_cap_fiat_ratio = (value_bot / starting_cap_fiat) - 1 if starting_cap_fiat else 0.0\n\n return {\n 'currencies': currencies,\n 'total': total,\n 'total_bot': total_bot,\n 'symbol': fiat_display_currency,\n 'value': value,\n 'value_bot': value_bot,\n 'stake': stake_currency,\n 'starting_capital': starting_capital,\n 'starting_capital_ratio': starting_capital_ratio,\n 'starting_capital_pct': round(starting_capital_ratio * 100, 2),\n 'starting_capital_fiat': starting_cap_fiat,\n 'starting_capital_fiat_ratio': starting_cap_fiat_ratio,\n 'starting_capital_fiat_pct': round(starting_cap_fiat_ratio * 100, 2),\n 'trade_count': trade_count,\n 'note': 'Simulated balances' if self._freqtrade.config['dry_run'] else ''\n }\n\n def _rpc_start(self) -> Dict[str, str]:\n \"\"\" Handler for start \"\"\"\n if self._freqtrade.state == State.RUNNING:\n return {'status': 'already running'}\n\n self._freqtrade.state = State.RUNNING\n return {'status': 'starting trader ...'}\n\n def _rpc_stop(self) -> Dict[str, str]:\n \"\"\" Handler for stop \"\"\"\n if self._freqtrade.state == State.RUNNING:\n self._freqtrade.state = State.STOPPED\n return {'status': 'stopping trader ...'}\n\n return {'status': 'already stopped'}\n\n def _rpc_reload_config(self) -> Dict[str, str]:\n \"\"\" Handler for reload_config. \"\"\"\n self._freqtrade.state = State.RELOAD_CONFIG\n return {'status': 'Reloading config ...'}\n\n def _rpc_stopentry(self) -> Dict[str, str]:\n \"\"\"\n Handler to stop buying, but handle open trades gracefully.\n \"\"\"\n if self._freqtrade.state == State.RUNNING:\n # Set 'max_open_trades' to 0\n self._freqtrade.config['max_open_trades'] = 0\n self._freqtrade.strategy.max_open_trades = 0\n\n return {'status': 'No more entries will occur from now. Run /reload_config to reset.'}\n\n def _rpc_reload_trade_from_exchange(self, trade_id: int) -> Dict[str, str]:\n \"\"\"\n Handler for reload_trade_from_exchange.\n Reloads a trade from it's orders, should manual interaction have happened.\n \"\"\"\n trade = Trade.get_trades(trade_filter=[Trade.id == trade_id]).first()\n if not trade:\n raise RPCException(f\"Could not find trade with id {trade_id}.\")\n\n self._freqtrade.handle_onexchange_order(trade)\n return {'status': 'Reloaded from orders from exchange'}\n\n def __exec_force_exit(self, trade: Trade, ordertype: Optional[str],\n amount: Optional[float] = None) -> bool:\n # Check if there is there are open orders\n trade_entry_cancelation_registry = []\n for oo in trade.open_orders:\n trade_entry_cancelation_res = {'order_id': oo.order_id, 'cancel_state': False}\n order = self._freqtrade.exchange.fetch_order(oo.order_id, trade.pair)\n\n if order['side'] == trade.entry_side:\n fully_canceled = self._freqtrade.handle_cancel_enter(\n trade, order, oo, CANCEL_REASON['FORCE_EXIT'])\n trade_entry_cancelation_res['cancel_state'] = fully_canceled\n trade_entry_cancelation_registry.append(trade_entry_cancelation_res)\n\n if order['side'] == trade.exit_side:\n # Cancel order - so it is placed anew with a fresh price.\n self._freqtrade.handle_cancel_exit(\n trade, order, oo, CANCEL_REASON['FORCE_EXIT'])\n\n if all(tocr['cancel_state'] is False for tocr in trade_entry_cancelation_registry):\n if trade.has_open_orders:\n # Order cancellation failed, so we can't exit.\n return False\n # Get current rate and execute sell\n current_rate = self._freqtrade.exchange.get_rate(\n trade.pair, side='exit', is_short=trade.is_short, refresh=True)\n exit_check = ExitCheckTuple(exit_type=ExitType.FORCE_EXIT)\n order_type = ordertype or self._freqtrade.strategy.order_types.get(\n \"force_exit\", self._freqtrade.strategy.order_types[\"exit\"])\n sub_amount: Optional[float] = None\n if amount and amount < trade.amount:\n # Partial exit ...\n min_exit_stake = self._freqtrade.exchange.get_min_pair_stake_amount(\n trade.pair, current_rate, trade.stop_loss_pct)\n remaining = (trade.amount - amount) * current_rate\n if remaining < min_exit_stake:\n raise RPCException(f'Remaining amount of {remaining} would be too small.')\n sub_amount = amount\n\n self._freqtrade.execute_trade_exit(\n trade, current_rate, exit_check, ordertype=order_type,\n sub_trade_amt=sub_amount)\n\n return True\n return False\n\n def _rpc_force_exit(self, trade_id: str, ordertype: Optional[str] = None, *,\n amount: Optional[float] = None) -> Dict[str, str]:\n \"\"\"\n Handler for forceexit <id>.\n Sells the given trade at current price\n \"\"\"\n\n if self._freqtrade.state != State.RUNNING:\n raise RPCException('trader is not running')\n\n with self._freqtrade._exit_lock:\n if trade_id == 'all':\n # Execute exit for all open orders\n for trade in Trade.get_open_trades():\n self.__exec_force_exit(trade, ordertype)\n Trade.commit()\n self._freqtrade.wallets.update()\n return {'result': 'Created exit orders for all open trades.'}\n\n # Query for trade\n trade = Trade.get_trades(\n trade_filter=[Trade.id == trade_id, Trade.is_open.is_(True), ]\n ).first()\n if not trade:\n logger.warning('force_exit: Invalid argument received')\n raise RPCException('invalid argument')\n\n result = self.__exec_force_exit(trade, ordertype, amount)\n Trade.commit()\n self._freqtrade.wallets.update()\n if not result:\n raise RPCException('Failed to exit trade.')\n return {'result': f'Created exit order for trade {trade_id}.'}\n\n def _force_entry_validations(self, pair: str, order_side: SignalDirection):\n if not self._freqtrade.config.get('force_entry_enable', False):\n raise RPCException('Force_entry not enabled.')\n\n if self._freqtrade.state != State.RUNNING:\n raise RPCException('trader is not running')\n\n if order_side == SignalDirection.SHORT and self._freqtrade.trading_mode == TradingMode.SPOT:\n raise RPCException(\"Can't go short on Spot markets.\")\n\n if pair not in self._freqtrade.exchange.get_markets(tradable_only=True):\n raise RPCException('Symbol does not exist or market is not active.')\n # Check if pair quote currency equals to the stake currency.\n stake_currency = self._freqtrade.config.get('stake_currency')\n if not self._freqtrade.exchange.get_pair_quote_currency(pair) == stake_currency:\n raise RPCException(\n f'Wrong pair selected. Only pairs with stake-currency {stake_currency} allowed.')\n\n def _rpc_force_entry(self, pair: str, price: Optional[float], *,\n order_type: Optional[str] = None,\n order_side: SignalDirection = SignalDirection.LONG,\n stake_amount: Optional[float] = None,\n enter_tag: Optional[str] = 'force_entry',\n leverage: Optional[float] = None) -> Optional[Trade]:\n \"\"\"\n Handler for forcebuy <asset> <price>\n Buys a pair trade at the given or current price\n \"\"\"\n self._force_entry_validations(pair, order_side)\n\n # check if valid pair\n\n # check if pair already has an open pair\n trade: Optional[Trade] = Trade.get_trades(\n [Trade.is_open.is_(True), Trade.pair == pair]).first()\n is_short = (order_side == SignalDirection.SHORT)\n if trade:\n is_short = trade.is_short\n if not self._freqtrade.strategy.position_adjustment_enable:\n raise RPCException(f\"position for {pair} already open - id: {trade.id}\")\n if trade.has_open_orders:\n raise RPCException(f\"position for {pair} already open - id: {trade.id} \"\n f\"and has open order {','.join(trade.open_orders_ids)}\")\n else:\n if Trade.get_open_trade_count() >= self._config['max_open_trades']:\n raise RPCException(\"Maximum number of trades is reached.\")\n\n if not stake_amount:\n # gen stake amount\n stake_amount = self._freqtrade.wallets.get_trade_stake_amount(pair)\n\n # execute buy\n if not order_type:\n order_type = self._freqtrade.strategy.order_types.get(\n 'force_entry', self._freqtrade.strategy.order_types['entry'])\n with self._freqtrade._exit_lock:\n if self._freqtrade.execute_entry(pair, stake_amount, price,\n ordertype=order_type, trade=trade,\n is_short=is_short,\n enter_tag=enter_tag,\n leverage_=leverage,\n ):\n Trade.commit()\n trade = Trade.get_trades([Trade.is_open.is_(True), Trade.pair == pair]).first()\n return trade\n else:\n raise RPCException(f'Failed to enter position for {pair}.')\n\n def _rpc_cancel_open_order(self, trade_id: int):\n if self._freqtrade.state != State.RUNNING:\n raise RPCException('trader is not running')\n with self._freqtrade._exit_lock:\n # Query for trade\n trade = Trade.get_trades(\n trade_filter=[Trade.id == trade_id, Trade.is_open.is_(True), ]\n ).first()\n if not trade:\n logger.warning('cancel_open_order: Invalid trade_id received.')\n raise RPCException('Invalid trade_id.')\n if not trade.has_open_orders:\n logger.warning('cancel_open_order: No open order for trade_id.')\n raise RPCException('No open order for trade_id.')\n\n for open_order in trade.open_orders:\n try:\n order = self._freqtrade.exchange.fetch_order(open_order.order_id, trade.pair)\n except ExchangeError as e:\n logger.info(f\"Cannot query order for {trade} due to {e}.\", exc_info=True)\n raise RPCException(\"Order not found.\")\n self._freqtrade.handle_cancel_order(\n order, open_order, trade, CANCEL_REASON['USER_CANCEL'])\n Trade.commit()\n\n def _rpc_delete(self, trade_id: int) -> Dict[str, Union[str, int]]:\n \"\"\"\n Handler for delete <id>.\n Delete the given trade and close eventually existing open orders.\n \"\"\"\n with self._freqtrade._exit_lock:\n c_count = 0\n trade = Trade.get_trades(trade_filter=[Trade.id == trade_id]).first()\n if not trade:\n logger.warning('delete trade: Invalid argument received')\n raise RPCException('invalid argument')\n\n # Try cancelling regular order if that exists\n for open_order in trade.open_orders:\n try:\n self._freqtrade.exchange.cancel_order(open_order.order_id, trade.pair)\n c_count += 1\n except (ExchangeError):\n pass\n\n # cancel stoploss on exchange ...\n if (self._freqtrade.strategy.order_types.get('stoploss_on_exchange')\n and trade.stoploss_order_id):\n try:\n self._freqtrade.exchange.cancel_stoploss_order(trade.stoploss_order_id,\n trade.pair)\n c_count += 1\n except (ExchangeError):\n pass\n\n trade.delete()\n self._freqtrade.wallets.update()\n return {\n 'result': 'success',\n 'trade_id': trade_id,\n 'result_msg': f'Deleted trade {trade_id}. Closed {c_count} open orders.',\n 'cancel_order_count': c_count,\n }\n\n def _rpc_performance(self) -> List[Dict[str, Any]]:\n \"\"\"\n Handler for performance.\n Shows a performance statistic from finished trades\n \"\"\"\n pair_rates = Trade.get_overall_performance()\n\n return pair_rates\n\n def _rpc_enter_tag_performance(self, pair: Optional[str]) -> List[Dict[str, Any]]:\n \"\"\"\n Handler for buy tag performance.\n Shows a performance statistic from finished trades\n \"\"\"\n return Trade.get_enter_tag_performance(pair)\n\n def _rpc_exit_reason_performance(self, pair: Optional[str]) -> List[Dict[str, Any]]:\n \"\"\"\n Handler for exit reason performance.\n Shows a performance statistic from finished trades\n \"\"\"\n return Trade.get_exit_reason_performance(pair)\n\n def _rpc_mix_tag_performance(self, pair: Optional[str]) -> List[Dict[str, Any]]:\n \"\"\"\n Handler for mix tag (enter_tag + exit_reason) performance.\n Shows a performance statistic from finished trades\n \"\"\"\n mix_tags = Trade.get_mix_tag_performance(pair)\n\n return mix_tags\n\n def _rpc_count(self) -> Dict[str, float]:\n \"\"\" Returns the number of trades running \"\"\"\n if self._freqtrade.state != State.RUNNING:\n raise RPCException('trader is not running')\n\n trades = Trade.get_open_trades()\n return {\n 'current': len(trades),\n 'max': (int(self._freqtrade.config['max_open_trades'])\n if self._freqtrade.config['max_open_trades'] != float('inf') else -1),\n 'total_stake': sum((trade.open_rate * trade.amount) for trade in trades)\n }\n\n def _rpc_locks(self) -> Dict[str, Any]:\n \"\"\" Returns the current locks \"\"\"\n\n locks = PairLocks.get_pair_locks(None)\n return {\n 'lock_count': len(locks),\n 'locks': [lock.to_json() for lock in locks]\n }\n\n def _rpc_delete_lock(self, lockid: Optional[int] = None,\n pair: Optional[str] = None) -> Dict[str, Any]:\n \"\"\" Delete specific lock(s) \"\"\"\n locks: Sequence[PairLock] = []\n\n if pair:\n locks = PairLocks.get_pair_locks(pair)\n if lockid:\n locks = PairLock.session.scalars(select(PairLock).filter(PairLock.id == lockid)).all()\n\n for lock in locks:\n lock.active = False\n lock.lock_end_time = datetime.now(timezone.utc)\n\n Trade.commit()\n\n return self._rpc_locks()\n\n def _rpc_whitelist(self) -> Dict:\n \"\"\" Returns the currently active whitelist\"\"\"\n res = {'method': self._freqtrade.pairlists.name_list,\n 'length': len(self._freqtrade.active_pair_whitelist),\n 'whitelist': self._freqtrade.active_pair_whitelist\n }\n return res\n\n def _rpc_blacklist_delete(self, delete: List[str]) -> Dict:\n \"\"\" Removes pairs from currently active blacklist \"\"\"\n errors = {}\n for pair in delete:\n if pair in self._freqtrade.pairlists.blacklist:\n self._freqtrade.pairlists.blacklist.remove(pair)\n else:\n errors[pair] = {\n 'error_msg': f\"Pair {pair} is not in the current blacklist.\"\n }\n resp = self._rpc_blacklist()\n resp['errors'] = errors\n return resp\n\n def _rpc_blacklist(self, add: Optional[List[str]] = None) -> Dict:\n \"\"\" Returns the currently active blacklist\"\"\"\n errors = {}\n if add:\n for pair in add:\n if pair not in self._freqtrade.pairlists.blacklist:\n try:\n expand_pairlist([pair], self._freqtrade.exchange.get_markets().keys())\n self._freqtrade.pairlists.blacklist.append(pair)\n\n except ValueError:\n errors[pair] = {\n 'error_msg': f'Pair {pair} is not a valid wildcard.'}\n else:\n errors[pair] = {\n 'error_msg': f'Pair {pair} already in pairlist.'}\n\n res = {'method': self._freqtrade.pairlists.name_list,\n 'length': len(self._freqtrade.pairlists.blacklist),\n 'blacklist': self._freqtrade.pairlists.blacklist,\n 'blacklist_expanded': self._freqtrade.pairlists.expanded_blacklist,\n 'errors': errors,\n }\n return res\n\n @staticmethod\n def _rpc_get_logs(limit: Optional[int]) -> Dict[str, Any]:\n \"\"\"Returns the last X logs\"\"\"\n if limit:\n buffer = bufferHandler.buffer[-limit:]\n else:\n buffer = bufferHandler.buffer\n records = [[format_date(datetime.fromtimestamp(r.created)),\n r.created * 1000, r.name, r.levelname,\n r.message + ('\\n' + r.exc_text if r.exc_text else '')]\n for r in buffer]\n\n # Log format:\n # [logtime-formatted, logepoch, logger-name, loglevel, message \\n + exception]\n # e.g. [\"2020-08-27 11:35:01\", 1598520901097.9397,\n # \"freqtrade.worker\", \"INFO\", \"Starting worker develop\"]\n\n return {'log_count': len(records), 'logs': records}\n\n def _rpc_edge(self) -> List[Dict[str, Any]]:\n \"\"\" Returns information related to Edge \"\"\"\n if not self._freqtrade.edge:\n raise RPCException('Edge is not enabled.')\n return self._freqtrade.edge.accepted_pairs()\n\n @staticmethod\n def _convert_dataframe_to_dict(strategy: str, pair: str, timeframe: str, dataframe: DataFrame,\n last_analyzed: datetime) -> Dict[str, Any]:\n has_content = len(dataframe) != 0\n signals = {\n 'enter_long': 0,\n 'exit_long': 0,\n 'enter_short': 0,\n 'exit_short': 0,\n }\n if has_content:\n\n dataframe.loc[:, '__date_ts'] = dataframe.loc[:, 'date'].view(int64) // 1000 // 1000\n # Move signal close to separate column when signal for easy plotting\n for sig_type in signals.keys():\n if sig_type in dataframe.columns:\n mask = (dataframe[sig_type] == 1)\n signals[sig_type] = int(mask.sum())\n dataframe.loc[mask, f'_{sig_type}_signal_close'] = dataframe.loc[mask, 'close']\n\n # band-aid until this is fixed:\n # https://github.com/pandas-dev/pandas/issues/45836\n datetime_types = ['datetime', 'datetime64', 'datetime64[ns, UTC]']\n date_columns = dataframe.select_dtypes(include=datetime_types)\n for date_column in date_columns:\n # replace NaT with `None`\n dataframe[date_column] = dataframe[date_column].astype(object).replace({NaT: None})\n\n dataframe = dataframe.replace({inf: None, -inf: None, NAN: None})\n\n res = {\n 'pair': pair,\n 'timeframe': timeframe,\n 'timeframe_ms': timeframe_to_msecs(timeframe),\n 'strategy': strategy,\n 'columns': list(dataframe.columns),\n 'data': dataframe.values.tolist(),\n 'length': len(dataframe),\n 'buy_signals': signals['enter_long'], # Deprecated\n 'sell_signals': signals['exit_long'], # Deprecated\n 'enter_long_signals': signals['enter_long'],\n 'exit_long_signals': signals['exit_long'],\n 'enter_short_signals': signals['enter_short'],\n 'exit_short_signals': signals['exit_short'],\n 'last_analyzed': last_analyzed,\n 'last_analyzed_ts': int(last_analyzed.timestamp()),\n 'data_start': '',\n 'data_start_ts': 0,\n 'data_stop': '',\n 'data_stop_ts': 0,\n }\n if has_content:\n res.update({\n 'data_start': str(dataframe.iloc[0]['date']),\n 'data_start_ts': int(dataframe.iloc[0]['__date_ts']),\n 'data_stop': str(dataframe.iloc[-1]['date']),\n 'data_stop_ts': int(dataframe.iloc[-1]['__date_ts']),\n })\n return res\n\n def _rpc_analysed_dataframe(self, pair: str, timeframe: str,\n limit: Optional[int]) -> Dict[str, Any]:\n \"\"\" Analyzed dataframe in Dict form \"\"\"\n\n _data, last_analyzed = self.__rpc_analysed_dataframe_raw(pair, timeframe, limit)\n return RPC._convert_dataframe_to_dict(self._freqtrade.config['strategy'],\n pair, timeframe, _data, last_analyzed)\n\n def __rpc_analysed_dataframe_raw(\n self,\n pair: str,\n timeframe: str,\n limit: Optional[int]\n ) -> Tuple[DataFrame, datetime]:\n \"\"\"\n Get the dataframe and last analyze from the dataprovider\n\n :param pair: The pair to get\n :param timeframe: The timeframe of data to get\n :param limit: The amount of candles in the dataframe\n \"\"\"\n _data, last_analyzed = self._freqtrade.dataprovider.get_analyzed_dataframe(\n pair, timeframe)\n _data = _data.copy()\n\n if limit:\n _data = _data.iloc[-limit:]\n\n return _data, last_analyzed\n\n def _ws_all_analysed_dataframes(\n self,\n pairlist: List[str],\n limit: Optional[int]\n ) -> Generator[Dict[str, Any], None, None]:\n \"\"\"\n Get the analysed dataframes of each pair in the pairlist.\n If specified, only return the most recent `limit` candles for\n each dataframe.\n\n :param pairlist: A list of pairs to get\n :param limit: If an integer, limits the size of dataframe\n If a list of string date times, only returns those candles\n :returns: A generator of dictionaries with the key, dataframe, and last analyzed timestamp\n \"\"\"\n timeframe = self._freqtrade.config['timeframe']\n candle_type = self._freqtrade.config.get('candle_type_def', CandleType.SPOT)\n\n for pair in pairlist:\n dataframe, last_analyzed = self.__rpc_analysed_dataframe_raw(pair, timeframe, limit)\n\n yield {\n \"key\": (pair, timeframe, candle_type),\n \"df\": dataframe,\n \"la\": last_analyzed\n }\n\n def _ws_request_analyzed_df(\n self,\n limit: Optional[int] = None,\n pair: Optional[str] = None\n ):\n \"\"\" Historical Analyzed Dataframes for WebSocket \"\"\"\n pairlist = [pair] if pair else self._freqtrade.active_pair_whitelist\n\n return self._ws_all_analysed_dataframes(pairlist, limit)\n\n def _ws_request_whitelist(self):\n \"\"\" Whitelist data for WebSocket \"\"\"\n return self._freqtrade.active_pair_whitelist\n\n @staticmethod\n def _rpc_analysed_history_full(config: Config, pair: str, timeframe: str,\n exchange) -> Dict[str, Any]:\n timerange_parsed = TimeRange.parse_timerange(config.get('timerange'))\n\n from freqtrade.data.converter import trim_dataframe\n from freqtrade.data.dataprovider import DataProvider\n from freqtrade.resolvers.strategy_resolver import StrategyResolver\n\n strategy = StrategyResolver.load_strategy(config)\n startup_candles = strategy.startup_candle_count\n\n _data = load_data(\n datadir=config[\"datadir\"],\n pairs=[pair],\n timeframe=timeframe,\n timerange=timerange_parsed,\n data_format=config['dataformat_ohlcv'],\n candle_type=config.get('candle_type_def', CandleType.SPOT),\n startup_candles=startup_candles,\n )\n if pair not in _data:\n raise RPCException(\n f\"No data for {pair}, {timeframe} in {config.get('timerange')} found.\")\n\n strategy.dp = DataProvider(config, exchange=exchange, pairlists=None)\n strategy.ft_bot_start()\n\n df_analyzed = strategy.analyze_ticker(_data[pair], {'pair': pair})\n df_analyzed = trim_dataframe(df_analyzed, timerange_parsed, startup_candles=startup_candles)\n\n return RPC._convert_dataframe_to_dict(strategy.get_strategy_name(), pair, timeframe,\n df_analyzed.copy(), dt_now())\n\n def _rpc_plot_config(self) -> Dict[str, Any]:\n if (self._freqtrade.strategy.plot_config and\n 'subplots' not in self._freqtrade.strategy.plot_config):\n self._freqtrade.strategy.plot_config['subplots'] = {}\n return self._freqtrade.strategy.plot_config\n\n @staticmethod\n def _rpc_plot_config_with_strategy(config: Config) -> Dict[str, Any]:\n\n from freqtrade.resolvers.strategy_resolver import StrategyResolver\n strategy = StrategyResolver.load_strategy(config)\n\n if (strategy.plot_config and 'subplots' not in strategy.plot_config):\n strategy.plot_config['subplots'] = {}\n return strategy.plot_config\n\n @staticmethod\n def _rpc_sysinfo() -> Dict[str, Any]:\n return {\n \"cpu_pct\": psutil.cpu_percent(interval=1, percpu=True),\n \"ram_pct\": psutil.virtual_memory().percent\n }\n\n def health(self) -> Dict[str, Optional[Union[str, int]]]:\n last_p = self._freqtrade.last_process\n if last_p is None:\n return {\n \"last_process\": None,\n \"last_process_loc\": None,\n \"last_process_ts\": None,\n }\n\n return {\n \"last_process\": str(last_p),\n \"last_process_loc\": format_date(last_p.astimezone(tzlocal())),\n \"last_process_ts\": int(last_p.timestamp()),\n }\n\n def _update_market_direction(self, direction: MarketDirection) -> None:\n self._freqtrade.strategy.market_direction = direction\n\n def _get_market_direction(self) -> MarketDirection:\n return self._freqtrade.strategy.market_direction" }, { "identifier": "EXMS", "path": "tests/conftest.py", "snippet": "EXMS = 'freqtrade.exchange.exchange.Exchange'" }, { "identifier": "get_patched_freqtradebot", "path": "tests/conftest.py", "snippet": "def get_patched_freqtradebot(mocker, config) -> FreqtradeBot:\n \"\"\"\n This function patches _init_modules() to not call dependencies\n :param mocker: a Mocker object to apply patches\n :param config: Config to pass to the bot\n :return: FreqtradeBot\n \"\"\"\n patch_freqtradebot(mocker, config)\n return FreqtradeBot(config)" }, { "identifier": "log_has_re", "path": "tests/conftest.py", "snippet": "def log_has_re(line, logs):\n \"\"\"Check if line matches some caplog's message.\"\"\"\n return any(re.match(line, message) for message in logs.messages)" }, { "identifier": "patch_get_signal", "path": "tests/conftest.py", "snippet": "def patch_get_signal(\n freqtrade: FreqtradeBot,\n enter_long=True,\n exit_long=False,\n enter_short=False,\n exit_short=False,\n enter_tag: Optional[str] = None,\n exit_tag: Optional[str] = None,\n) -> None:\n \"\"\"\n :param mocker: mocker to patch IStrategy class\n :return: None\n \"\"\"\n # returns (Signal-direction, signaname)\n def patched_get_entry_signal(*args, **kwargs):\n direction = None\n if enter_long and not any([exit_long, enter_short]):\n direction = SignalDirection.LONG\n if enter_short and not any([exit_short, enter_long]):\n direction = SignalDirection.SHORT\n\n return direction, enter_tag\n\n freqtrade.strategy.get_entry_signal = patched_get_entry_signal\n\n def patched_get_exit_signal(pair, timeframe, dataframe, is_short):\n if is_short:\n return enter_short, exit_short, exit_tag\n else:\n return enter_long, exit_long, exit_tag\n\n # returns (enter, exit)\n freqtrade.strategy.get_exit_signal = patched_get_exit_signal\n\n freqtrade.exchange.refresh_latest_ohlcv = lambda p: None" } ]
from unittest.mock import MagicMock from sqlalchemy import select from freqtrade.enums import ExitCheckTuple, ExitType, TradingMode from freqtrade.persistence import Trade from freqtrade.persistence.models import Order from freqtrade.rpc.rpc import RPC from tests.conftest import EXMS, get_patched_freqtradebot, log_has_re, patch_get_signal import pytest
19,516
def test_may_execute_exit_stoploss_on_exchange_multi(default_conf, ticker, fee, limit_buy_order, mocker) -> None: """ Tests workflow of selling stoploss_on_exchange. Sells * first trade as stoploss * 2nd trade is kept * 3rd trade is sold via sell-signal """ default_conf['max_open_trades'] = 3 default_conf['exchange']['name'] = 'binance' stoploss = { 'id': 123, 'info': {} } stoploss_order_open = { "id": "123", "timestamp": 1542707426845, "datetime": "2018-11-20T09:50:26.845Z", "lastTradeTimestamp": None, "symbol": "BTC/USDT", "type": "stop_loss_limit", "side": "sell", "price": 1.08801, "amount": 91.07468123, "cost": 0.0, "average": 0.0, "filled": 0.0, "remaining": 0.0, "status": "open", "fee": None, "trades": None } stoploss_order_closed = stoploss_order_open.copy() stoploss_order_closed['status'] = 'closed' stoploss_order_closed['filled'] = stoploss_order_closed['amount'] # Sell first trade based on stoploss, keep 2nd and 3rd trade open stop_orders = [stoploss_order_closed, stoploss_order_open, stoploss_order_open] stoploss_order_mock = MagicMock( side_effect=stop_orders) # Sell 3rd trade (not called for the first trade) should_sell_mock = MagicMock(side_effect=[ [], [ExitCheckTuple(exit_type=ExitType.EXIT_SIGNAL)]] ) cancel_order_mock = MagicMock() mocker.patch.multiple( EXMS, create_stoploss=stoploss, fetch_ticker=ticker, get_fee=fee, amount_to_precision=lambda s, x, y: y, price_to_precision=lambda s, x, y: y, fetch_stoploss_order=stoploss_order_mock, cancel_stoploss_order_with_result=cancel_order_mock, ) mocker.patch.multiple( 'freqtrade.freqtradebot.FreqtradeBot', create_stoploss_order=MagicMock(return_value=True), _notify_exit=MagicMock(), ) mocker.patch("freqtrade.strategy.interface.IStrategy.should_exit", should_sell_mock) wallets_mock = mocker.patch("freqtrade.wallets.Wallets.update") mocker.patch("freqtrade.wallets.Wallets.get_free", return_value=1000) mocker.patch("freqtrade.wallets.Wallets.check_exit_amount", return_value=True)
def test_may_execute_exit_stoploss_on_exchange_multi(default_conf, ticker, fee, limit_buy_order, mocker) -> None: """ Tests workflow of selling stoploss_on_exchange. Sells * first trade as stoploss * 2nd trade is kept * 3rd trade is sold via sell-signal """ default_conf['max_open_trades'] = 3 default_conf['exchange']['name'] = 'binance' stoploss = { 'id': 123, 'info': {} } stoploss_order_open = { "id": "123", "timestamp": 1542707426845, "datetime": "2018-11-20T09:50:26.845Z", "lastTradeTimestamp": None, "symbol": "BTC/USDT", "type": "stop_loss_limit", "side": "sell", "price": 1.08801, "amount": 91.07468123, "cost": 0.0, "average": 0.0, "filled": 0.0, "remaining": 0.0, "status": "open", "fee": None, "trades": None } stoploss_order_closed = stoploss_order_open.copy() stoploss_order_closed['status'] = 'closed' stoploss_order_closed['filled'] = stoploss_order_closed['amount'] # Sell first trade based on stoploss, keep 2nd and 3rd trade open stop_orders = [stoploss_order_closed, stoploss_order_open, stoploss_order_open] stoploss_order_mock = MagicMock( side_effect=stop_orders) # Sell 3rd trade (not called for the first trade) should_sell_mock = MagicMock(side_effect=[ [], [ExitCheckTuple(exit_type=ExitType.EXIT_SIGNAL)]] ) cancel_order_mock = MagicMock() mocker.patch.multiple( EXMS, create_stoploss=stoploss, fetch_ticker=ticker, get_fee=fee, amount_to_precision=lambda s, x, y: y, price_to_precision=lambda s, x, y: y, fetch_stoploss_order=stoploss_order_mock, cancel_stoploss_order_with_result=cancel_order_mock, ) mocker.patch.multiple( 'freqtrade.freqtradebot.FreqtradeBot', create_stoploss_order=MagicMock(return_value=True), _notify_exit=MagicMock(), ) mocker.patch("freqtrade.strategy.interface.IStrategy.should_exit", should_sell_mock) wallets_mock = mocker.patch("freqtrade.wallets.Wallets.update") mocker.patch("freqtrade.wallets.Wallets.get_free", return_value=1000) mocker.patch("freqtrade.wallets.Wallets.check_exit_amount", return_value=True)
freqtrade = get_patched_freqtradebot(mocker, default_conf)
7
2023-10-21 10:02:05+00:00
24k
yanzhh/HGERE
transformers/src/transformers/modeling_bert.py
[ { "identifier": "gelu", "path": "transformers/src/transformers/activations.py", "snippet": "def swish(x):\ndef _gelu_python(x):\ndef gelu_new(x):\ndef get_activation(activation_string):\nACT2FN = {\n \"relu\": F.relu,\n \"swish\": swish,\n \"gelu\": gelu,\n \"tanh\": F.tanh,\n \"gelu_new\": gelu_new,\n}" }, { "identifier": "BertConfig", "path": "transformers/src/transformers/configuration_bert.py", "snippet": "class BertConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a :class:`~transformers.BertModel`.\n It is used to instantiate an BERT model according to the specified arguments, defining the model\n architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of\n the BERT `bert-base-uncased <https://huggingface.co/bert-base-uncased>`__ architecture.\n\n Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used\n to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig`\n for more information.\n\n\n Args:\n vocab_size (:obj:`int`, optional, defaults to 30522):\n Vocabulary size of the BERT model. Defines the different tokens that\n can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.BertModel`.\n hidden_size (:obj:`int`, optional, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n num_hidden_layers (:obj:`int`, optional, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (:obj:`int`, optional, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n intermediate_size (:obj:`int`, optional, defaults to 3072):\n Dimensionality of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n hidden_act (:obj:`str` or :obj:`function`, optional, defaults to \"gelu\"):\n The non-linear activation function (function or string) in the encoder and pooler.\n If string, \"gelu\", \"relu\", \"swish\" and \"gelu_new\" are supported.\n hidden_dropout_prob (:obj:`float`, optional, defaults to 0.1):\n The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.\n attention_probs_dropout_prob (:obj:`float`, optional, defaults to 0.1):\n The dropout ratio for the attention probabilities.\n max_position_embeddings (:obj:`int`, optional, defaults to 512):\n The maximum sequence length that this model might ever be used with.\n Typically set this to something large just in case (e.g., 512 or 1024 or 2048).\n type_vocab_size (:obj:`int`, optional, defaults to 2):\n The vocabulary size of the `token_type_ids` passed into :class:`~transformers.BertModel`.\n initializer_range (:obj:`float`, optional, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n layer_norm_eps (:obj:`float`, optional, defaults to 1e-12):\n The epsilon used by the layer normalization layers.\n\n Example::\n\n from transformers import BertModel, BertConfig\n\n # Initializing a BERT bert-base-uncased style configuration\n configuration = BertConfig()\n\n # Initializing a model from the bert-base-uncased style configuration\n model = BertModel(configuration)\n\n # Accessing the model configuration\n configuration = model.config\n\n Attributes:\n pretrained_config_archive_map (Dict[str, str]):\n A dictionary containing all the available pre-trained checkpoints.\n \"\"\"\n pretrained_config_archive_map = BERT_PRETRAINED_CONFIG_ARCHIVE_MAP\n model_type = \"bert\"\n\n def __init__(\n self,\n vocab_size=30522,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=2,\n initializer_range=0.02,\n layer_norm_eps=1e-12,\n **kwargs\n ):\n super().__init__(**kwargs)\n\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps" }, { "identifier": "add_start_docstrings", "path": "transformers/src/transformers/file_utils.py", "snippet": "def add_start_docstrings(*docstr):\n def docstring_decorator(fn):\n fn.__doc__ = \"\".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else \"\")\n return fn\n\n return docstring_decorator" }, { "identifier": "add_start_docstrings_to_callable", "path": "transformers/src/transformers/file_utils.py", "snippet": "def add_start_docstrings_to_callable(*docstr):\n def docstring_decorator(fn):\n class_name = \":class:`~transformers.{}`\".format(fn.__qualname__.split(\".\")[0])\n intro = \" The {} forward method, overrides the :func:`__call__` special method.\".format(class_name)\n note = r\"\"\"\n\n .. note::\n Although the recipe for forward pass needs to be defined within\n this function, one should call the :class:`Module` instance afterwards\n instead of this since the former takes care of running the\n pre and post processing steps while the latter silently ignores them.\n \"\"\"\n fn.__doc__ = intro + note + \"\".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else \"\")\n return fn\n\n return docstring_decorator" }, { "identifier": "PreTrainedModel", "path": "transformers/src/transformers/modeling_utils.py", "snippet": "class PreTrainedModel(nn.Module, ModuleUtilsMixin):\n r\"\"\" Base class for all models.\n\n :class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods for loading/downloading/saving models\n as well as a few methods common to all models to (i) resize the input embeddings and (ii) prune heads in the self-attention heads.\n\n Class attributes (overridden by derived classes):\n - ``config_class``: a class derived from :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.\n - ``pretrained_model_archive_map``: a python ``dict`` of with `short-cut-names` (string) as keys and `url` (string) of associated pretrained weights as values.\n - ``load_tf_weights``: a python ``method`` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments:\n\n - ``model``: an instance of the relevant subclass of :class:`~transformers.PreTrainedModel`,\n - ``config``: an instance of the relevant subclass of :class:`~transformers.PretrainedConfig`,\n - ``path``: a path (string) to the TensorFlow checkpoint.\n\n - ``base_model_prefix``: a string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.\n \"\"\"\n config_class = None\n pretrained_model_archive_map = {}\n base_model_prefix = \"\"\n\n @property\n def dummy_inputs(self):\n \"\"\" Dummy inputs to do a forward pass in the network.\n\n Returns:\n torch.Tensor with dummy inputs\n \"\"\"\n return {\"input_ids\": torch.tensor(DUMMY_INPUTS)}\n\n def __init__(self, config, *inputs, **kwargs):\n super().__init__()\n if not isinstance(config, PretrainedConfig):\n raise ValueError(\n \"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. \"\n \"To create a model from a pretrained model use \"\n \"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`\".format(\n self.__class__.__name__, self.__class__.__name__\n )\n )\n # Save config in model\n self.config = config\n\n @property\n def base_model(self):\n return getattr(self, self.base_model_prefix, self)\n\n def get_input_embeddings(self):\n \"\"\"\n Returns the model's input embeddings.\n\n Returns:\n :obj:`nn.Module`:\n A torch module mapping vocabulary to hidden states.\n \"\"\"\n base_model = getattr(self, self.base_model_prefix, self)\n if base_model is not self:\n return base_model.get_input_embeddings()\n else:\n raise NotImplementedError\n\n def set_input_embeddings(self, value):\n \"\"\"\n Set model's input embeddings\n\n Args:\n value (:obj:`nn.Module`):\n A module mapping vocabulary to hidden states.\n \"\"\"\n base_model = getattr(self, self.base_model_prefix, self)\n if base_model is not self:\n base_model.set_input_embeddings(value)\n else:\n raise NotImplementedError\n\n def get_output_embeddings(self):\n \"\"\"\n Returns the model's output embeddings.\n\n Returns:\n :obj:`nn.Module`:\n A torch module mapping hidden states to vocabulary.\n \"\"\"\n return None # Overwrite for models with output embeddings\n\n def tie_weights(self):\n \"\"\"\n Tie the weights between the input embeddings and the output embeddings.\n If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning\n the weights instead.\n \"\"\"\n output_embeddings = self.get_output_embeddings()\n if output_embeddings is not None:\n if isinstance(output_embeddings, list):\n for x in output_embeddings:\n self._tie_or_clone_weights(x, self.get_input_embeddings())\n else:\n self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())\n\n def _tie_or_clone_weights(self, output_embeddings, input_embeddings):\n \"\"\" Tie or clone module weights depending of weither we are using TorchScript or not\n \"\"\"\n if self.config.torchscript:\n output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())\n else:\n output_embeddings.weight = input_embeddings.weight\n\n if hasattr(output_embeddings, \"bias\") and output_embeddings.bias is not None:\n output_embeddings.bias.data = torch.nn.functional.pad(\n output_embeddings.bias.data,\n (0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0]),\n \"constant\",\n 0,\n )\n if hasattr(output_embeddings, \"out_features\") and hasattr(input_embeddings, \"num_embeddings\"):\n output_embeddings.out_features = input_embeddings.num_embeddings\n\n def resize_token_embeddings(self, new_num_tokens=None):\n \"\"\" Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size.\n Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.\n\n Arguments:\n\n new_num_tokens: (`optional`) int:\n New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end.\n If not provided or None: does nothing and just returns a pointer to the input tokens ``torch.nn.Embeddings`` Module of the model.\n\n Return: ``torch.nn.Embeddings``\n Pointer to the input tokens Embeddings Module of the model\n \"\"\"\n base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed\n model_embeds = base_model._resize_token_embeddings(new_num_tokens)\n if new_num_tokens is None:\n return model_embeds\n\n # Update base model and current model config\n self.config.vocab_size = new_num_tokens\n base_model.vocab_size = new_num_tokens\n\n # Tie weights again if needed\n self.tie_weights()\n\n return model_embeds\n\n def _resize_token_embeddings(self, new_num_tokens):\n old_embeddings = self.get_input_embeddings()\n new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)\n self.set_input_embeddings(new_embeddings)\n return self.get_input_embeddings()\n\n def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None):\n \"\"\" Build a resized Embedding Module from a provided token Embedding Module.\n Increasing the size will add newly initialized vectors at the end\n Reducing the size will remove vectors from the end\n\n Args:\n new_num_tokens: (`optional`) int\n New number of tokens in the embedding matrix.\n Increasing the size will add newly initialized vectors at the end\n Reducing the size will remove vectors from the end\n If not provided or None: return the provided token Embedding Module.\n Return: ``torch.nn.Embeddings``\n Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None\n \"\"\"\n if new_num_tokens is None:\n return old_embeddings\n\n old_num_tokens, old_embedding_dim = old_embeddings.weight.size()\n if old_num_tokens == new_num_tokens:\n return old_embeddings\n\n # Build new embeddings\n new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)\n new_embeddings.to(old_embeddings.weight.device)\n\n # initialize all new embeddings (in particular added tokens)\n self._init_weights(new_embeddings)\n\n # Copy word embeddings from the previous weights\n num_tokens_to_copy = min(old_num_tokens, new_num_tokens)\n new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]\n\n return new_embeddings\n\n def init_weights(self):\n \"\"\" Initialize and prunes weights if needed. \"\"\"\n # Initialize weights\n self.apply(self._init_weights)\n\n # Prune heads if needed\n if self.config.pruned_heads:\n self.prune_heads(self.config.pruned_heads)\n\n # Tie weights if needed\n self.tie_weights()\n\n def prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the base model.\n\n Arguments:\n\n heads_to_prune: dict with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`).\n E.g. {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.\n \"\"\"\n # save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads\n for layer, heads in heads_to_prune.items():\n union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)\n self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON\n\n self.base_model._prune_heads(heads_to_prune)\n\n def save_pretrained(self, save_directory):\n \"\"\" Save a model and its configuration file to a directory, so that it\n can be re-loaded using the `:func:`~transformers.PreTrainedModel.from_pretrained`` class method.\n \"\"\"\n assert os.path.isdir(\n save_directory\n ), \"Saving path should be a directory where the model and configuration can be saved\"\n\n # Only save the model itself if we are using distributed training\n model_to_save = self.module if hasattr(self, \"module\") else self\n\n # Attach architecture to the config\n model_to_save.config.architectures = [model_to_save.__class__.__name__]\n\n # Save configuration file\n model_to_save.config.save_pretrained(save_directory)\n\n # If we save using the predefined names, we can load using `from_pretrained`\n output_model_file = os.path.join(save_directory, WEIGHTS_NAME)\n torch.save(model_to_save.state_dict(), output_model_file)\n logger.info(\"Model weights saved in {}\".format(output_model_file))\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):\n r\"\"\"Instantiate a pretrained pytorch model from a pre-trained model configuration.\n\n The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated)\n To train the model, you should first set it back in training mode with ``model.train()``\n\n The warning ``Weights from XXX not initialized from pretrained model`` means that the weights of XXX do not come pre-trained with the rest of the model.\n It is up to you to train those weights with a downstream fine-tuning task.\n\n The warning ``Weights from XXX not used in YYY`` means that the layer XXX is not used by YYY, therefore those weights are discarded.\n\n Parameters:\n pretrained_model_name_or_path: either:\n - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.\n - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.\n - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.\n - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n - None if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``)\n\n model_args: (`optional`) Sequence of positional arguments:\n All remaning positional arguments will be passed to the underlying model's ``__init__`` method\n\n config: (`optional`) one of:\n - an instance of a class derived from :class:`~transformers.PretrainedConfig`, or\n - a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained()`\n Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:\n - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or\n - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.\n - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.\n\n state_dict: (`optional`) dict:\n an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.\n This option can be used if you want to create a model from a pretrained configuration but load your own weights.\n In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.\n\n cache_dir: (`optional`) string:\n Path to a directory in which a downloaded pre-trained model\n configuration should be cached if the standard cache should not be used.\n\n force_download: (`optional`) boolean, default False:\n Force to (re-)download the model weights and configuration files and override the cached versions if they exists.\n\n resume_download: (`optional`) boolean, default False:\n Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.\n\n proxies: (`optional`) dict, default None:\n A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.\n The proxies are used on each request.\n\n output_loading_info: (`optional`) boolean:\n Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.\n\n kwargs: (`optional`) Remaining dictionary of keyword arguments:\n Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:\n\n - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)\n - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.\n\n Examples::\n\n # For example purposes. Not runnable.\n model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.\n model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`\n model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading\n assert model.config.output_attention == True\n # Loading from a TF checkpoint file instead of a PyTorch model (slower)\n config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')\n model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)\n\n \"\"\"\n config = kwargs.pop(\"config\", None)\n state_dict = kwargs.pop(\"state_dict\", None)\n cache_dir = kwargs.pop(\"cache_dir\", None)\n from_tf = kwargs.pop(\"from_tf\", False)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n output_loading_info = kwargs.pop(\"output_loading_info\", False)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n\n # Load config if we don't provide a configuration\n if not isinstance(config, PretrainedConfig):\n config_path = config if config is not None else pretrained_model_name_or_path\n config, model_kwargs = cls.config_class.from_pretrained(\n config_path,\n *model_args,\n cache_dir=cache_dir,\n return_unused_kwargs=True,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n **kwargs,\n )\n else:\n model_kwargs = kwargs\n\n # Load model\n if pretrained_model_name_or_path is not None:\n if pretrained_model_name_or_path in cls.pretrained_model_archive_map:\n archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]\n elif os.path.isdir(pretrained_model_name_or_path):\n if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + \".index\")):\n # Load from a TF 1.0 checkpoint\n archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + \".index\")\n elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):\n # Load from a TF 2.0 checkpoint\n archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)\n elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):\n # Load from a PyTorch checkpoint\n archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)\n else:\n raise EnvironmentError(\n \"Error no file named {} found in directory {} or `from_tf` set to False\".format(\n [WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + \".index\"], pretrained_model_name_or_path\n )\n )\n elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):\n archive_file = pretrained_model_name_or_path\n elif os.path.isfile(pretrained_model_name_or_path + \".index\"):\n assert (\n from_tf\n ), \"We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint\".format(\n pretrained_model_name_or_path + \".index\"\n )\n archive_file = pretrained_model_name_or_path + \".index\"\n else:\n archive_file = hf_bucket_url(\n pretrained_model_name_or_path, postfix=(TF2_WEIGHTS_NAME if from_tf else WEIGHTS_NAME)\n )\n\n # redirect to the cache, if necessary\n try:\n resolved_archive_file = cached_path(\n archive_file,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n )\n except EnvironmentError:\n if pretrained_model_name_or_path in cls.pretrained_model_archive_map:\n msg = \"Couldn't reach server at '{}' to download pretrained weights.\".format(archive_file)\n else:\n msg = (\n \"Model name '{}' was not found in model name list ({}). \"\n \"We assumed '{}' was a path or url to model weight files named one of {} but \"\n \"couldn't find any such file at this path or url.\".format(\n pretrained_model_name_or_path,\n \", \".join(cls.pretrained_model_archive_map.keys()),\n archive_file,\n [WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME],\n )\n )\n raise EnvironmentError(msg)\n\n if resolved_archive_file == archive_file:\n logger.info(\"loading weights file {}\".format(archive_file))\n else:\n logger.info(\"loading weights file {} from cache at {}\".format(archive_file, resolved_archive_file))\n else:\n resolved_archive_file = None\n\n # Instantiate model.\n model = cls(config, *model_args, **model_kwargs)\n\n if state_dict is None and not from_tf:\n try:\n state_dict = torch.load(resolved_archive_file, map_location=\"cpu\")\n except Exception:\n raise OSError(\n \"Unable to load weights from pytorch checkpoint file. \"\n \"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. \"\n )\n\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n\n if from_tf:\n if resolved_archive_file.endswith(\".index\"):\n # Load from a TensorFlow 1.X checkpoint - provided by original authors\n model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index'\n else:\n # Load from our TensorFlow 2.0 checkpoints\n try:\n from transformers import load_tf2_checkpoint_in_pytorch_model\n\n model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True)\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see \"\n \"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n else:\n # Convert old format to new format if needed from a PyTorch state_dict\n old_keys = []\n new_keys = []\n for key in state_dict.keys():\n new_key = None\n if \"gamma\" in key:\n new_key = key.replace(\"gamma\", \"weight\")\n if \"beta\" in key:\n new_key = key.replace(\"beta\", \"bias\")\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n for old_key, new_key in zip(old_keys, new_keys):\n state_dict[new_key] = state_dict.pop(old_key)\n\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, \"_metadata\", None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants\n # so we need to apply the function recursively.\n def load(module: nn.Module, prefix=\"\"):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs\n )\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + \".\")\n\n # Make sure we are able to load base models as well as derived models (with heads)\n start_prefix = \"\"\n model_to_load = model\n if not hasattr(model, cls.base_model_prefix) and any(\n s.startswith(cls.base_model_prefix) for s in state_dict.keys()\n ):\n start_prefix = cls.base_model_prefix + \".\"\n if hasattr(model, cls.base_model_prefix) and not any(\n s.startswith(cls.base_model_prefix) for s in state_dict.keys()\n ):\n model_to_load = getattr(model, cls.base_model_prefix)\n\n load(model_to_load, prefix=start_prefix)\n if len(missing_keys) > 0:\n logger.info(\n \"Weights of {} not initialized from pretrained model: {}\".format(\n model.__class__.__name__, missing_keys\n )\n )\n if len(unexpected_keys) > 0:\n logger.info(\n \"Weights from pretrained model not used in {}: {}\".format(\n model.__class__.__name__, unexpected_keys\n )\n )\n if len(error_msgs) > 0:\n raise RuntimeError(\n \"Error(s) in loading state_dict for {}:\\n\\t{}\".format(\n model.__class__.__name__, \"\\n\\t\".join(error_msgs)\n )\n )\n\n model.tie_weights() # make sure word embedding weights are still tied if needed\n\n # Set model in evaluation mode to desactivate DropOut modules by default\n model.eval()\n\n if output_loading_info:\n loading_info = {\"missing_keys\": missing_keys, \"unexpected_keys\": unexpected_keys, \"error_msgs\": error_msgs}\n return model, loading_info\n\n return model\n\n def prepare_inputs_for_generation(self, input_ids, **kwargs):\n return {\"input_ids\": input_ids}\n\n def _do_output_past(self, outputs):\n has_output_past = hasattr(self.config, \"output_past\") and self.config.output_past\n has_mem_len = hasattr(self.config, \"mem_len\") and self.config.mem_len\n\n if has_output_past and not has_mem_len and len(outputs) > 1:\n return True\n elif has_mem_len and self.config.mem_len > 0 and len(outputs) > 1:\n return True\n\n return False\n\n @torch.no_grad()\n def generate(\n self,\n input_ids=None,\n max_length=None,\n do_sample=True,\n num_beams=None,\n temperature=None,\n top_k=None,\n top_p=None,\n repetition_penalty=None,\n bos_token_id=None,\n pad_token_id=None,\n eos_token_ids=None,\n length_penalty=None,\n num_return_sequences=None,\n ):\n r\"\"\" Generates sequences for models with a LM head. The method currently supports greedy or penalized greedy decoding, sampling with top-k or nucleus sampling\n and beam-search.\n\n Adapted in part from `Facebook's XLM beam search code`_.\n\n .. _`Facebook's XLM beam search code`:\n https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529\n\n\n Parameters:\n\n input_ids: (`optional`) `torch.LongTensor` of shape `(batch_size, sequence_length)`\n The sequence used as a prompt for the generation. If `None` the method initializes\n it as an empty `torch.LongTensor` of shape `(1,)`.\n\n max_length: (`optional`) int\n The max length of the sequence to be generated. Between 1 and infinity. Default to 20.\n\n do_sample: (`optional`) bool\n If set to `False` greedy decoding is used. Otherwise sampling is used. Defaults to `True`.\n\n num_beams: (`optional`) int\n Number of beams for beam search. Must be between 1 and infinity. 1 means no beam search. Default to 1.\n\n temperature: (`optional`) float\n The value used to module the next token probabilities. Must be strictely positive. Default to 1.0.\n\n top_k: (`optional`) int\n The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.\n\n top_p: (`optional`) float\n The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.\n\n repetition_penalty: (`optional`) float\n The parameter for repetition penalty. Between 1.0 and infinity. 1.0 means no penalty. Default to 1.0.\n\n bos_token_id: (`optional`) int\n Beginning of sentence token if no prompt is provided. Default to 0.\n\n eos_token_ids: (`optional`) int or list of int\n End of sequence token or list of tokens to stop the generation. Default to 0.\n length_penalty: (`optional`) float\n Exponential penalty to the length. Default to 1.\n\n num_return_sequences: (`optional`) int\n The number of independently computed returned sequences for each element in the batch. Default to 1.\n\n Return:\n\n output: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`\n sequence_length is either equal to max_length or shorter if all batches finished early due to the `eos_token_id`\n\n Examples::\n\n tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer\n model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.\n outputs = model.generate(max_length=40, bos_token_id=tokenizer.bos_token_id, eos_token_ids=tokenizer.eos_token_id, do_sample=False) # do greedy decoding\n print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer\n model = AutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.\n input_context = 'The dog'\n input_ids = torch.tensor(tokenizer.encode(input_context)).unsqueeze(0) # encode input context\n outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'\n for i in range(3): # 3 output sequences were generated\n print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer\n model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.\n input_context = 'The dog'\n input_ids = torch.tensor(tokenizer.encode(input_context)).unsqueeze(0) # encode input context\n outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, bos_token_id=tokenizer.bos_token_id, pad_token_id=tokenizer.pad_token_id, eos_token_ids=tokenizer.eos_token_id, num_return_sequences=3) # 3 generate sequences using by sampling\n for i in range(3): # 3 output sequences were generated\n print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer\n model = AutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache.\n input_context = 'Legal My neighbor is' # \"Legal\" is one of the control codes for ctrl\n input_ids = torch.tensor(tokenizer.encode(input_context)).unsqueeze(0) # encode input context\n outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences\n print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))\n\n \"\"\"\n\n # We cannot generate if the model does not have a LM head\n if self.get_output_embeddings() is None:\n raise AttributeError(\n \"You tried to generate sequences with a model that does not have a LM Head.\"\n \"Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`)\"\n )\n\n max_length = max_length if max_length is not None else self.config.max_length\n do_sample = do_sample if do_sample is not None else self.config.do_sample\n num_beams = num_beams if num_beams is not None else self.config.num_beams\n temperature = temperature if temperature is not None else self.config.temperature\n top_k = top_k if top_k is not None else self.config.top_k\n top_p = top_p if top_p is not None else self.config.top_p\n repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty\n bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id\n pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id\n eos_token_ids = eos_token_ids if eos_token_ids is not None else self.config.eos_token_ids\n length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty\n num_return_sequences = (\n num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences\n )\n\n if input_ids is not None:\n batch_size = input_ids.shape[0] # overriden by the input batch_size\n else:\n batch_size = 1\n if isinstance(eos_token_ids, int):\n eos_token_ids = [eos_token_ids]\n\n assert isinstance(max_length, int) and max_length > 0, \"`max_length` should be a strictely positive integer.\"\n assert isinstance(do_sample, bool), \"`do_sample` should be a boolean.\"\n assert isinstance(num_beams, int) and num_beams > 0, \"`num_beams` should be a strictely positive integer.\"\n assert temperature > 0, \"`temperature` should be strictely positive.\"\n assert isinstance(top_k, int) and top_k >= 0, \"`top_k` should be a positive integer.\"\n assert 0 <= top_p <= 1, \"`top_p` should be between 0 and 1.\"\n assert repetition_penalty >= 1.0, \"`repetition_penalty` should be >= 1.\"\n assert input_ids is not None or (\n isinstance(bos_token_id, int) and bos_token_id >= 0\n ), \"If input_ids is not defined, `bos_token_id` should be a positive integer.\"\n assert pad_token_id is None or (\n isinstance(pad_token_id, int) and (pad_token_id >= 0)\n ), \"`pad_token_id` should be a positive integer.\"\n assert (eos_token_ids is None) or (\n isinstance(eos_token_ids, (list, tuple)) and ((isinstance(e, int) and e >= 0) for e in eos_token_ids)\n ), \"`eos_token_ids` should be a positive integer or a list/tuple of positive integers.\"\n assert length_penalty > 0, \"`length_penalty` should be strictely positive.\"\n assert (\n isinstance(num_return_sequences, int) and num_return_sequences > 0\n ), \"`num_return_sequences` should be a strictely positive integer.\"\n\n if input_ids is None:\n assert isinstance(bos_token_id, int) and bos_token_id >= 0, (\n \"you should either supply a context to complete as `input_ids` input \"\n \"or a `bos_token_id` (integer >= 0) as a first token to start the generation.\"\n )\n input_ids = torch.full(\n (batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device\n )\n else:\n assert input_ids.dim() == 2, \"Input prompt should be of shape (batch_size, sequence length).\"\n\n if pad_token_id is None and eos_token_ids is not None:\n logger.warning(\n \"Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence\".format(eos_token_ids[0])\n )\n pad_token_id = eos_token_ids[0]\n\n # current position and vocab size\n cur_len = input_ids.shape[1]\n vocab_size = self.config.vocab_size\n\n if num_return_sequences != 1:\n # Expand input to num return sequences\n input_ids = input_ids.unsqueeze(1).expand(batch_size, num_return_sequences, cur_len)\n input_ids = input_ids.contiguous().view(\n batch_size * num_return_sequences, cur_len\n ) # (batch_size * num_return_sequences, cur_len)\n effective_batch_size = batch_size * num_return_sequences\n else:\n effective_batch_size = batch_size\n\n if num_beams > 1:\n output = self._generate_beam_search(\n input_ids,\n cur_len,\n max_length,\n do_sample,\n temperature,\n top_k,\n top_p,\n repetition_penalty,\n pad_token_id,\n eos_token_ids,\n effective_batch_size,\n length_penalty,\n num_beams,\n vocab_size,\n )\n else:\n output = self._generate_no_beam_search(\n input_ids,\n cur_len,\n max_length,\n do_sample,\n temperature,\n top_k,\n top_p,\n repetition_penalty,\n pad_token_id,\n eos_token_ids,\n effective_batch_size,\n )\n\n return output\n\n def _generate_no_beam_search(\n self,\n input_ids,\n cur_len,\n max_length,\n do_sample,\n temperature,\n top_k,\n top_p,\n repetition_penalty,\n pad_token_id,\n eos_token_ids,\n batch_size,\n ):\n \"\"\" Generate sequences for each example without beam search (num_beams == 1).\n All returned sequence are generated independantly.\n \"\"\"\n # current position / max lengths / length of generated sentences / unfinished sentences\n unfinished_sents = input_ids.new(batch_size).fill_(1)\n sent_lengths = input_ids.new(batch_size).fill_(max_length)\n\n past = None\n\n while cur_len < max_length:\n model_inputs = self.prepare_inputs_for_generation(input_ids, past=past)\n outputs = self(**model_inputs)\n next_token_logits = outputs[0][:, -1, :]\n\n # if model has past, then set the past variable to speed up decoding\n if self._do_output_past(outputs):\n past = outputs[1]\n\n # repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858)\n if repetition_penalty != 1.0:\n for i in range(batch_size):\n for previous_token in set(input_ids[i].tolist()):\n # if score < 0 then repetition penalty has to multiplied to reduce the previous token probability\n if next_token_logits[i, previous_token] < 0:\n next_token_logits[i, previous_token] *= repetition_penalty\n else:\n next_token_logits[i, previous_token] /= repetition_penalty\n\n if do_sample:\n # Temperature (higher temperature => more likely to sample low probability tokens)\n if temperature != 1.0:\n next_token_logits = next_token_logits / temperature\n # Top-p/top-k filtering\n next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)\n # Sample\n next_token = torch.multinomial(F.softmax(next_token_logits, dim=-1), num_samples=1).squeeze(1)\n else:\n # Greedy decoding\n next_token = torch.argmax(next_token_logits, dim=-1)\n\n # update generations and finished sentences\n if eos_token_ids is not None:\n # pad finished sentences if eos_token_ids exist\n tokens_to_add = next_token * unfinished_sents + (pad_token_id) * (1 - unfinished_sents)\n else:\n tokens_to_add = next_token\n\n input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)\n\n if eos_token_ids is not None:\n for eos_token_id in eos_token_ids:\n eos_in_sents = tokens_to_add == eos_token_id\n # if sentence is unfinished and the token to add is eos, sent_lengths is filled with current length\n is_sents_unfinished_and_token_to_add_is_eos = unfinished_sents.mul(eos_in_sents.long()).bool()\n sent_lengths.masked_fill_(is_sents_unfinished_and_token_to_add_is_eos, cur_len + 1)\n # unfinished_sents is set to zero if eos in sentence\n unfinished_sents.mul_((~eos_in_sents).long())\n\n cur_len = cur_len + 1\n\n # stop when there is a </s> in each sentence, or if we exceed the maximul length\n if unfinished_sents.max() == 0:\n break\n\n # if there are different sentences lengths in the batch, some batches have to be padded\n if sent_lengths.min().item() != sent_lengths.max().item():\n assert pad_token_id is not None, \"`Pad_token_id` has to be defined if batches have different lengths\"\n # finished sents are filled with pad_token\n decoded = input_ids.new(batch_size, sent_lengths.max().item()).fill_(pad_token_id)\n else:\n decoded = input_ids\n\n for hypo_idx, hypo in enumerate(input_ids):\n decoded[hypo_idx, : sent_lengths[hypo_idx]] = hypo[: sent_lengths[hypo_idx]]\n\n return decoded\n\n def _generate_beam_search(\n self,\n input_ids,\n cur_len,\n max_length,\n do_sample,\n temperature,\n top_k,\n top_p,\n repetition_penalty,\n pad_token_id,\n eos_token_ids,\n batch_size,\n length_penalty,\n num_beams,\n vocab_size,\n ):\n \"\"\" Generate sequences for each example with beam search.\n \"\"\"\n # Expand input to num beams\n input_ids = input_ids.unsqueeze(1).expand(batch_size, num_beams, cur_len)\n input_ids = input_ids.contiguous().view(batch_size * num_beams, cur_len) # (batch_size * num_beams, cur_len)\n\n # generated hypotheses\n generated_hyps = [\n BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=False) for _ in range(batch_size)\n ]\n\n # scores for each sentence in the beam\n beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)\n beam_scores[:, 1:] = -1e9\n beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)\n\n # cache compute states\n past = None\n\n # done sentences\n done = [False for _ in range(batch_size)]\n\n while cur_len < max_length:\n model_inputs = self.prepare_inputs_for_generation(input_ids, past=past)\n outputs = self(**model_inputs) # (batch_size * num_beams, cur_len, vocab_size)\n scores = outputs[0][:, -1, :] # (batch_size * num_beams, vocab_size)\n\n # if model has past, then set the past variable to speed up decoding\n if self._do_output_past(outputs):\n past = outputs[1]\n\n # repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)\n if repetition_penalty != 1.0:\n for i in range(batch_size * num_beams):\n for previous_token in set(input_ids[i].tolist()):\n # if score < 0 then repetition penalty has to multiplied to reduce the previous token probability\n if scores[i, previous_token] < 0:\n scores[i, previous_token] *= repetition_penalty\n else:\n scores[i, previous_token] /= repetition_penalty\n\n if do_sample:\n # Temperature (higher temperature => more likely to sample low probability tokens)\n if temperature != 1.0:\n scores = scores / temperature\n # Top-p/top-k filtering\n scores = top_k_top_p_filtering(\n scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2\n ) # (batch_size * num_beams, vocab_size)\n # Sample 2 next words for each beam (so we have some spare tokens and match output of greedy beam search)\n next_words = torch.multinomial(F.softmax(scores, dim=-1), num_samples=2) # (batch_size * num_beams, 2)\n # Compute next scores\n _scores = F.log_softmax(scores, dim=-1) # (batch_size * num_beams, vocab_size)\n _scores = torch.gather(_scores, -1, next_words) # (batch_size * num_beams, 2)\n next_scores = _scores + beam_scores[:, None].expand_as(_scores) # (batch_size * num_beams, 2)\n # Match shape of greedy beam search\n next_words = next_words.view(batch_size, 2 * num_beams) # (batch_size, 2 * num_beams)\n next_scores = next_scores.view(batch_size, 2 * num_beams) # (batch_size, 2 * num_beams)\n else:\n # do greedy beam search\n scores = F.log_softmax(scores, dim=-1) # (batch_size * num_beams, vocab_size)\n assert scores.size() == (batch_size * num_beams, vocab_size)\n # Add the log prob of the new beams to the log prob of the beginning of the sequence (sum of logs == log of the product)\n _scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)\n # re-organize to group the beam together (we are keeping top hypothesis accross beams)\n _scores = _scores.view(batch_size, num_beams * vocab_size) # (batch_size, num_beams * vocab_size)\n next_scores, next_words = torch.topk(_scores, 2 * num_beams, dim=1, largest=True, sorted=True)\n\n assert next_scores.size() == next_words.size() == (batch_size, 2 * num_beams)\n\n # next batch beam content\n # list of (batch_size * num_beams) tuple(next hypothesis score, next word, current position in the batch)\n next_batch_beam = []\n\n # for each sentence\n for batch_idx in range(batch_size):\n\n # if we are done with this sentence\n done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done(\n next_scores[batch_idx].max().item()\n )\n if done[batch_idx]:\n assert (\n len(generated_hyps[batch_idx]) >= num_beams\n ), \"Batch can only be done if at least {} beams have been generated\".format(num_beams)\n assert (\n eos_token_ids is not None and pad_token_id is not None\n ), \"generated beams >= num_beams -> eos_token_id and pad_token have to be defined\"\n next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch\n continue\n\n # next sentence beam content\n next_sent_beam = []\n\n # next words for this sentence\n for idx, score in zip(next_words[batch_idx], next_scores[batch_idx]):\n\n # get beam and word IDs\n beam_id = idx // vocab_size\n word_id = idx % vocab_size\n\n # add to generated hypotheses if end of sentence or last iteration\n if eos_token_ids is not None and word_id.item() in eos_token_ids:\n generated_hyps[batch_idx].add(\n input_ids[batch_idx * num_beams + beam_id, :cur_len].clone(), score.item()\n )\n else:\n # add next predicted word if it is not eos_token\n next_sent_beam.append((score, word_id, batch_idx * num_beams + beam_id))\n\n # the beam for next step is full\n if len(next_sent_beam) == num_beams:\n break\n\n # update next beam content\n assert len(next_sent_beam) == num_beams, \"Beam should always be full\"\n next_batch_beam.extend(next_sent_beam)\n assert len(next_batch_beam) == num_beams * (batch_idx + 1)\n\n # sanity check / prepare next batch\n assert len(next_batch_beam) == batch_size * num_beams\n beam_scores = beam_scores.new([x[0] for x in next_batch_beam])\n beam_words = input_ids.new([x[1] for x in next_batch_beam])\n beam_idx = input_ids.new([x[2] for x in next_batch_beam])\n\n # re-order batch\n input_ids = input_ids[beam_idx, :]\n input_ids = torch.cat([input_ids, beam_words.unsqueeze(1)], dim=-1)\n\n # re-order internal states\n if past:\n reordered_past = []\n for layer_past in past:\n # get the correct batch idx from layer past batch dim\n # batch dim of `past` and `mems` is at 2nd position\n reordered_layer_past = [layer_past[:, i].unsqueeze(1).clone().detach() for i in beam_idx]\n reordered_layer_past = torch.cat(reordered_layer_past, dim=1)\n # check that shape matches\n assert reordered_layer_past.shape == layer_past.shape\n reordered_past.append(reordered_layer_past)\n past = tuple(reordered_past)\n\n # update current length\n cur_len = cur_len + 1\n\n # stop when we are done with each sentence\n if all(done):\n break\n\n for batch_idx in range(batch_size):\n # Add all open beam hypothesis to generated_hyps\n if not done[batch_idx]:\n for idx, score in zip(next_words[batch_idx], next_scores[batch_idx]):\n\n # get beam and word IDs\n beam_id = idx // vocab_size\n word_id = idx % vocab_size\n generated_hyps[batch_idx].add(\n input_ids[batch_idx * num_beams + beam_id, :cur_len].clone(), score.item()\n )\n\n # select the best hypotheses\n sent_lengths = input_ids.new(batch_size)\n best = []\n\n for i, hypotheses in enumerate(generated_hyps):\n best_hyp = max(hypotheses.beams, key=lambda x: x[0])[1]\n sent_lengths[i] = len(best_hyp)\n best.append(best_hyp)\n\n # shorter batches are filled with pad_token\n if sent_lengths.min().item() != sent_lengths.max().item():\n assert pad_token_id is not None, \"`Pad_token_id` has to be defined\"\n sent_max_len = min(sent_lengths.max().item() + 1, max_length)\n decoded = input_ids.new(batch_size, sent_max_len).fill_(pad_token_id)\n\n # fill with hypothesis and eos_token_id if necessary\n for i, hypo in enumerate(best):\n decoded[i, : sent_lengths[i]] = hypo\n if sent_lengths[i] < max_length:\n decoded[i, sent_lengths[i]] = eos_token_ids[0]\n else:\n # none of the hypotheses have an eos_token\n assert (len(hypo) == max_length for hypo in best)\n decoded = torch.stack(best).type(torch.long).to(next(self.parameters()).device)\n\n return decoded" }, { "identifier": "prune_linear_layer", "path": "transformers/src/transformers/modeling_utils.py", "snippet": "def prune_linear_layer(layer, index, dim=0):\n \"\"\" Prune a linear layer (a model parameters) to keep only entries in index.\n Return the pruned layer as a new layer with requires_grad=True.\n Used to remove heads.\n \"\"\"\n index = index.to(layer.weight.device)\n W = layer.weight.index_select(dim, index).clone().detach()\n if layer.bias is not None:\n if dim == 1:\n b = layer.bias.clone().detach()\n else:\n b = layer.bias[index].clone().detach()\n new_size = list(layer.weight.size())\n new_size[dim] = len(index)\n new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)\n new_layer.weight.requires_grad = False\n new_layer.weight.copy_(W.contiguous())\n new_layer.weight.requires_grad = True\n if layer.bias is not None:\n new_layer.bias.requires_grad = False\n new_layer.bias.copy_(b.contiguous())\n new_layer.bias.requires_grad = True\n return new_layer" } ]
import logging import math import os import torch import torch.nn.functional as F import pdb import re import numpy as np import tensorflow as tf import pdb from symbol import factor from tkinter import E from torch import nn from torch.nn import CrossEntropyLoss, MSELoss, BCEWithLogitsLoss from torch.nn.utils.rnn import pad_sequence from .modules import * from .activations import gelu, gelu_new, swish from .configuration_bert import BertConfig from .file_utils import add_start_docstrings, add_start_docstrings_to_callable from .modeling_utils import PreTrainedModel, prune_linear_layer
16,669
self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertLMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class BertOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class BertPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class BertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BertConfig pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP load_tf_weights = load_tf_weights_in_bert base_model_prefix = "bert" def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, BertLayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() BERT_START_DOCSTRING = r""" This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ BERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`transformers.BertTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.encode_plus` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. """
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BERT model. """ # from .modules import BiaffineSpanRepr, BiaffineRelationCls, BiafEncoder, \ # CatEncoder, max_pool, Tetrafine, BiaffineMessagePasser, \ # LinearMessegePasser, CPDTrilinear, CatEncoderCross, \ # bilinear_classifier, BiafCrossEncoder logger = logging.getLogger(__name__) BERT_PRETRAINED_MODEL_ARCHIVE_MAP = { "bert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin", "bert-large-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin", "bert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin", "bert-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin", "bert-base-multilingual-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin", "bert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin", "bert-base-chinese": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin", "bert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin", "bert-large-uncased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin", "bert-large-cased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin", "bert-large-uncased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin", "bert-large-cased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin", "bert-base-cased-finetuned-mrpc": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin", "bert-base-german-dbmdz-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-pytorch_model.bin", "bert-base-german-dbmdz-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-pytorch_model.bin", "bert-base-japanese": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-pytorch_model.bin", "bert-base-japanese-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-whole-word-masking-pytorch_model.bin", "bert-base-japanese-char": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-pytorch_model.bin", "bert-base-japanese-char-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-whole-word-masking-pytorch_model.bin", "bert-base-finnish-cased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-cased-v1/pytorch_model.bin", "bert-base-finnish-uncased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-uncased-v1/pytorch_model.bin", "bert-base-dutch-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/wietsedv/bert-base-dutch-cased/pytorch_model.bin", } def load_tf_weights_in_bert(model, config, tf_checkpoint_path): """ Load tf checkpoints in a pytorch model. """ try: except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info("Skipping {}".format("/".join(name))) continue pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info("Skipping {}".format("/".join(name))) continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: assert pointer.shape == array.shape except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model def mish(x): return x * torch.tanh(nn.functional.softplus(x)) ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish, "gelu_new": gelu_new, "mish": mish} BertLayerNorm = torch.nn.LayerNorm class BertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings. """ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] device = input_ids.device if input_ids is not None else inputs_embeds.device if position_ids is None: position_ids = torch.arange(seq_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0).expand(input_shape) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads) ) self.output_attentions = config.output_attentions self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, ): mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. if encoder_hidden_states is not None: mixed_key_layer = self.key(encoder_hidden_states) mixed_value_layer = self.value(encoder_hidden_states) attention_mask = encoder_attention_mask else: mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,) return outputs class BertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config): super().__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size) heads = set(heads) - self.pruned_heads # Convert to set and remove already pruned heads for head in heads: # Compute how many pruned heads are before the head and move the index accordingly head = head - sum(1 if h < head else 0 for h in self.pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index = torch.arange(len(mask))[mask].long() # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, ): self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class BertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BertOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertLayer(nn.Module): def __init__(self, config): super().__init__() self.attention = BertAttention(config) self.is_decoder = config.is_decoder if self.is_decoder: self.crossattention = BertAttention(config) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, ): self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights if self.is_decoder and encoder_hidden_states is not None: cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) outputs = (layer_output,) + outputs return outputs class BertEncoder(nn.Module): def __init__(self, config): super().__init__() self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) try: self.use_full_layer = config.use_full_layer except: self.use_full_layer = -1 def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, full_attention_mask=None, ): all_hidden_states = () all_attentions = () for i, layer_module in enumerate(self.layer): if i==self.use_full_layer: attention_mask = full_attention_mask if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask ) hidden_states = layer_outputs[0] if self.output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states,) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: outputs = outputs + (all_attentions,) return outputs # last-layer hidden state, (all hidden states), (all attentions) class BertPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertLMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class BertOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class BertPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class BertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BertConfig pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP load_tf_weights = load_tf_weights_in_bert base_model_prefix = "bert" def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, BertLayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() BERT_START_DOCSTRING = r""" This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ BERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`transformers.BertTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.encode_plus` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. """
@add_start_docstrings(
2
2023-10-15 02:31:09+00:00
24k
akashgreninja/GreSec
backend/venv/lib/python3.10/site-packages/urllib3/connectionpool.py
[ { "identifier": "_TYPE_BODY", "path": "backend/venv/lib/python3.10/site-packages/urllib3/_base_connection.py", "snippet": "_TYPE_BODY = typing.Union[bytes, typing.IO[typing.Any], typing.Iterable[bytes], str]" }, { "identifier": "HTTPHeaderDict", "path": "backend/venv/lib/python3.10/site-packages/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(typing.MutableMapping[str, str]):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-insensitively.\n\n :param kwargs:\n Additional field-value pairs to pass in to ``dict.update``.\n\n A ``dict`` like container for storing HTTP Headers.\n\n Field names are stored and compared case-insensitively in compliance with\n RFC 7230. Iteration provides the first case-sensitive key seen for each\n case-insensitive pair.\n\n Using ``__setitem__`` syntax overwrites fields that compare equal\n case-insensitively in order to maintain ``dict``'s api. For fields that\n compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``\n in a loop.\n\n If multiple fields that are equal case-insensitively are passed to the\n constructor or ``.update``, the behavior is undefined and some will be\n lost.\n\n >>> headers = HTTPHeaderDict()\n >>> headers.add('Set-Cookie', 'foo=bar')\n >>> headers.add('set-cookie', 'baz=quxx')\n >>> headers['content-length'] = '7'\n >>> headers['SET-cookie']\n 'foo=bar, baz=quxx'\n >>> headers['Content-Length']\n '7'\n \"\"\"\n\n _container: typing.MutableMapping[str, list[str]]\n\n def __init__(self, headers: ValidHTTPHeaderSource | None = None, **kwargs: str):\n super().__init__()\n self._container = {} # 'dict' is insert-ordered in Python 3.7+\n if headers is not None:\n if isinstance(headers, HTTPHeaderDict):\n self._copy_from(headers)\n else:\n self.extend(headers)\n if kwargs:\n self.extend(kwargs)\n\n def __setitem__(self, key: str, val: str) -> None:\n # avoid a bytes/str comparison by decoding before httplib\n if isinstance(key, bytes):\n key = key.decode(\"latin-1\")\n self._container[key.lower()] = [key, val]\n\n def __getitem__(self, key: str) -> str:\n val = self._container[key.lower()]\n return \", \".join(val[1:])\n\n def __delitem__(self, key: str) -> None:\n del self._container[key.lower()]\n\n def __contains__(self, key: object) -> bool:\n if isinstance(key, str):\n return key.lower() in self._container\n return False\n\n def setdefault(self, key: str, default: str = \"\") -> str:\n return super().setdefault(key, default)\n\n def __eq__(self, other: object) -> bool:\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return False\n else:\n other_as_http_header_dict = type(self)(maybe_constructable)\n\n return {k.lower(): v for k, v in self.itermerged()} == {\n k.lower(): v for k, v in other_as_http_header_dict.itermerged()\n }\n\n def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)\n\n def __len__(self) -> int:\n return len(self._container)\n\n def __iter__(self) -> typing.Iterator[str]:\n # Only provide the originally cased names\n for vals in self._container.values():\n yield vals[0]\n\n def discard(self, key: str) -> None:\n try:\n del self[key]\n except KeyError:\n pass\n\n def add(self, key: str, val: str, *, combine: bool = False) -> None:\n \"\"\"Adds a (name, value) pair, doesn't overwrite the value if it already\n exists.\n\n If this is called with combine=True, instead of adding a new header value\n as a distinct item during iteration, this will instead append the value to\n any existing header value with a comma. If no existing header value exists\n for the key, then the value will simply be added, ignoring the combine parameter.\n\n >>> headers = HTTPHeaderDict(foo='bar')\n >>> headers.add('Foo', 'baz')\n >>> headers['foo']\n 'bar, baz'\n >>> list(headers.items())\n [('foo', 'bar'), ('foo', 'baz')]\n >>> headers.add('foo', 'quz', combine=True)\n >>> list(headers.items())\n [('foo', 'bar, baz, quz')]\n \"\"\"\n # avoid a bytes/str comparison by decoding before httplib\n if isinstance(key, bytes):\n key = key.decode(\"latin-1\")\n key_lower = key.lower()\n new_vals = [key, val]\n # Keep the common case aka no item present as fast as possible\n vals = self._container.setdefault(key_lower, new_vals)\n if new_vals is not vals:\n # if there are values here, then there is at least the initial\n # key/value pair\n assert len(vals) >= 2\n if combine:\n vals[-1] = vals[-1] + \", \" + val\n else:\n vals.append(val)\n\n def extend(self, *args: ValidHTTPHeaderSource, **kwargs: str) -> None:\n \"\"\"Generic import function for any type of header-like object.\n Adapted version of MutableMapping.update in order to insert items\n with self.add instead of self.__setitem__\n \"\"\"\n if len(args) > 1:\n raise TypeError(\n f\"extend() takes at most 1 positional arguments ({len(args)} given)\"\n )\n other = args[0] if len(args) >= 1 else ()\n\n if isinstance(other, HTTPHeaderDict):\n for key, val in other.iteritems():\n self.add(key, val)\n elif isinstance(other, typing.Mapping):\n for key, val in other.items():\n self.add(key, val)\n elif isinstance(other, typing.Iterable):\n other = typing.cast(typing.Iterable[typing.Tuple[str, str]], other)\n for key, value in other:\n self.add(key, value)\n elif hasattr(other, \"keys\") and hasattr(other, \"__getitem__\"):\n # THIS IS NOT A TYPESAFE BRANCH\n # In this branch, the object has a `keys` attr but is not a Mapping or any of\n # the other types indicated in the method signature. We do some stuff with\n # it as though it partially implements the Mapping interface, but we're not\n # doing that stuff safely AT ALL.\n for key in other.keys():\n self.add(key, other[key])\n\n for key, value in kwargs.items():\n self.add(key, value)\n\n @typing.overload\n def getlist(self, key: str) -> list[str]:\n ...\n\n @typing.overload\n def getlist(self, key: str, default: _DT) -> list[str] | _DT:\n ...\n\n def getlist(\n self, key: str, default: _Sentinel | _DT = _Sentinel.not_passed\n ) -> list[str] | _DT:\n \"\"\"Returns a list of all the values for the named field. Returns an\n empty list if the key doesn't exist.\"\"\"\n try:\n vals = self._container[key.lower()]\n except KeyError:\n if default is _Sentinel.not_passed:\n # _DT is unbound; empty list is instance of List[str]\n return []\n # _DT is bound; default is instance of _DT\n return default\n else:\n # _DT may or may not be bound; vals[1:] is instance of List[str], which\n # meets our external interface requirement of `Union[List[str], _DT]`.\n return vals[1:]\n\n def _prepare_for_method_change(self) -> Self:\n \"\"\"\n Remove content-specific header fields before changing the request\n method to GET or HEAD according to RFC 9110, Section 15.4.\n \"\"\"\n content_specific_headers = [\n \"Content-Encoding\",\n \"Content-Language\",\n \"Content-Location\",\n \"Content-Type\",\n \"Content-Length\",\n \"Digest\",\n \"Last-Modified\",\n ]\n for header in content_specific_headers:\n self.discard(header)\n return self\n\n # Backwards compatibility for httplib\n getheaders = getlist\n getallmatchingheaders = getlist\n iget = getlist\n\n # Backwards compatibility for http.cookiejar\n get_all = getlist\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}({dict(self.itermerged())})\"\n\n def _copy_from(self, other: HTTPHeaderDict) -> None:\n for key in other:\n val = other.getlist(key)\n self._container[key.lower()] = [key, *val]\n\n def copy(self) -> HTTPHeaderDict:\n clone = type(self)()\n clone._copy_from(self)\n return clone\n\n def iteritems(self) -> typing.Iterator[tuple[str, str]]:\n \"\"\"Iterate over all header lines, including duplicate ones.\"\"\"\n for key in self:\n vals = self._container[key.lower()]\n for val in vals[1:]:\n yield vals[0], val\n\n def itermerged(self) -> typing.Iterator[tuple[str, str]]:\n \"\"\"Iterate over all headers, merging duplicate ones together.\"\"\"\n for key in self:\n val = self._container[key.lower()]\n yield val[0], \", \".join(val[1:])\n\n def items(self) -> HTTPHeaderDictItemView: # type: ignore[override]\n return HTTPHeaderDictItemView(self)\n\n def _has_value_for_header(self, header_name: str, potential_value: str) -> bool:\n if header_name in self:\n return potential_value in self._container[header_name.lower()][1:]\n return False\n\n def __ior__(self, other: object) -> HTTPHeaderDict:\n # Supports extending a header dict in-place using operator |=\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n self.extend(maybe_constructable)\n return self\n\n def __or__(self, other: object) -> HTTPHeaderDict:\n # Supports merging header dicts using operator |\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n result = self.copy()\n result.extend(maybe_constructable)\n return result\n\n def __ror__(self, other: object) -> HTTPHeaderDict:\n # Supports merging header dicts using operator | when other is on left side\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n result = type(self)(maybe_constructable)\n result.extend(self)\n return result" }, { "identifier": "RequestMethods", "path": "backend/venv/lib/python3.10/site-packages/urllib3/_request_methods.py", "snippet": "class RequestMethods:\n \"\"\"\n Convenience mixin for classes who implement a :meth:`urlopen` method, such\n as :class:`urllib3.HTTPConnectionPool` and\n :class:`urllib3.PoolManager`.\n\n Provides behavior for making common types of HTTP request methods and\n decides which type of request field encoding to use.\n\n Specifically,\n\n :meth:`.request_encode_url` is for sending requests whose fields are\n encoded in the URL (such as GET, HEAD, DELETE).\n\n :meth:`.request_encode_body` is for sending requests whose fields are\n encoded in the *body* of the request using multipart or www-form-urlencoded\n (such as for POST, PUT, PATCH).\n\n :meth:`.request` is for making any kind of request, it will look up the\n appropriate encoding format and use one of the above two methods to make\n the request.\n\n Initializer parameters:\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n \"\"\"\n\n _encode_url_methods = {\"DELETE\", \"GET\", \"HEAD\", \"OPTIONS\"}\n\n def __init__(self, headers: typing.Mapping[str, str] | None = None) -> None:\n self.headers = headers or {}\n\n def urlopen(\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n encode_multipart: bool = True,\n multipart_boundary: str | None = None,\n **kw: typing.Any,\n ) -> BaseHTTPResponse: # Abstract\n raise NotImplementedError(\n \"Classes extending RequestMethods must implement \"\n \"their own ``urlopen`` method.\"\n )\n\n def request(\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n fields: _TYPE_FIELDS | None = None,\n headers: typing.Mapping[str, str] | None = None,\n json: typing.Any | None = None,\n **urlopen_kw: typing.Any,\n ) -> BaseHTTPResponse:\n \"\"\"\n Make a request using :meth:`urlopen` with the appropriate encoding of\n ``fields`` based on the ``method`` used.\n\n This is a convenience method that requires the least amount of manual\n effort. It can be used in most situations, while still having the\n option to drop down to more specific methods when necessary, such as\n :meth:`request_encode_url`, :meth:`request_encode_body`,\n or even the lowest level :meth:`urlopen`.\n \"\"\"\n method = method.upper()\n\n if json is not None and body is not None:\n raise TypeError(\n \"request got values for both 'body' and 'json' parameters which are mutually exclusive\"\n )\n\n if json is not None:\n if headers is None:\n headers = self.headers.copy() # type: ignore\n if not (\"content-type\" in map(str.lower, headers.keys())):\n headers[\"Content-Type\"] = \"application/json\" # type: ignore\n\n body = _json.dumps(json, separators=(\",\", \":\"), ensure_ascii=False).encode(\n \"utf-8\"\n )\n\n if body is not None:\n urlopen_kw[\"body\"] = body\n\n if method in self._encode_url_methods:\n return self.request_encode_url(\n method,\n url,\n fields=fields, # type: ignore[arg-type]\n headers=headers,\n **urlopen_kw,\n )\n else:\n return self.request_encode_body(\n method, url, fields=fields, headers=headers, **urlopen_kw\n )\n\n def request_encode_url(\n self,\n method: str,\n url: str,\n fields: _TYPE_ENCODE_URL_FIELDS | None = None,\n headers: typing.Mapping[str, str] | None = None,\n **urlopen_kw: str,\n ) -> BaseHTTPResponse:\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the url. This is useful for request methods like GET, HEAD, DELETE, etc.\n \"\"\"\n if headers is None:\n headers = self.headers\n\n extra_kw: dict[str, typing.Any] = {\"headers\": headers}\n extra_kw.update(urlopen_kw)\n\n if fields:\n url += \"?\" + urlencode(fields)\n\n return self.urlopen(method, url, **extra_kw)\n\n def request_encode_body(\n self,\n method: str,\n url: str,\n fields: _TYPE_FIELDS | None = None,\n headers: typing.Mapping[str, str] | None = None,\n encode_multipart: bool = True,\n multipart_boundary: str | None = None,\n **urlopen_kw: str,\n ) -> BaseHTTPResponse:\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the body. This is useful for request methods like POST, PUT, PATCH, etc.\n\n When ``encode_multipart=True`` (default), then\n :func:`urllib3.encode_multipart_formdata` is used to encode\n the payload with the appropriate content type. Otherwise\n :func:`urllib.parse.urlencode` is used with the\n 'application/x-www-form-urlencoded' content type.\n\n Multipart encoding must be used when posting files, and it's reasonably\n safe to use it in other times too. However, it may break request\n signing, such as with OAuth.\n\n Supports an optional ``fields`` parameter of key/value strings AND\n key/filetuple. A filetuple is a (filename, data, MIME type) tuple where\n the MIME type is optional. For example::\n\n fields = {\n 'foo': 'bar',\n 'fakefile': ('foofile.txt', 'contents of foofile'),\n 'realfile': ('barfile.txt', open('realfile').read()),\n 'typedfile': ('bazfile.bin', open('bazfile').read(),\n 'image/jpeg'),\n 'nonamefile': 'contents of nonamefile field',\n }\n\n When uploading a file, providing a filename (the first parameter of the\n tuple) is optional but recommended to best mimic behavior of browsers.\n\n Note that if ``headers`` are supplied, the 'Content-Type' header will\n be overwritten because it depends on the dynamic random boundary string\n which is used to compose the body of the request. The random boundary\n string can be explicitly set with the ``multipart_boundary`` parameter.\n \"\"\"\n if headers is None:\n headers = self.headers\n\n extra_kw: dict[str, typing.Any] = {\"headers\": HTTPHeaderDict(headers)}\n body: bytes | str\n\n if fields:\n if \"body\" in urlopen_kw:\n raise TypeError(\n \"request got values for both 'fields' and 'body', can only specify one.\"\n )\n\n if encode_multipart:\n body, content_type = encode_multipart_formdata(\n fields, boundary=multipart_boundary\n )\n else:\n body, content_type = (\n urlencode(fields), # type: ignore[arg-type]\n \"application/x-www-form-urlencoded\",\n )\n\n extra_kw[\"body\"] = body\n extra_kw[\"headers\"].setdefault(\"Content-Type\", content_type)\n\n extra_kw.update(urlopen_kw)\n\n return self.urlopen(method, url, **extra_kw)" }, { "identifier": "BaseSSLError", "path": "backend/venv/lib/python3.10/site-packages/urllib3/connection.py", "snippet": " class BaseSSLError(BaseException): # type: ignore[no-redef]\nclass HTTPConnection(_HTTPConnection):\nclass HTTPSConnection(HTTPConnection):\nclass _WrappedAndVerifiedSocket(typing.NamedTuple):\nclass DummyConnection:\nRECENT_DATE = datetime.date(2022, 1, 1)\n_CONTAINS_CONTROL_CHAR_RE = re.compile(r\"[^-!#$%&'*+.^_`|~0-9a-zA-Z]\")\n_HAS_SYS_AUDIT = hasattr(sys, \"audit\")\n def __init__(\n self,\n host: str,\n port: int | None = None,\n *,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n source_address: tuple[str, int] | None = None,\n blocksize: int = 16384,\n socket_options: None\n | (connection._TYPE_SOCKET_OPTIONS) = default_socket_options,\n proxy: Url | None = None,\n proxy_config: ProxyConfig | None = None,\n ) -> None:\n def host(self) -> str:\n def host(self, value: str) -> None:\n def _new_conn(self) -> socket.socket:\n def set_tunnel(\n self,\n host: str,\n port: int | None = None,\n headers: typing.Mapping[str, str] | None = None,\n scheme: str = \"http\",\n ) -> None:\n def connect(self) -> None:\n def is_closed(self) -> bool:\n def is_connected(self) -> bool:\n def has_connected_to_proxy(self) -> bool:\n def close(self) -> None:\n def putrequest(\n self,\n method: str,\n url: str,\n skip_host: bool = False,\n skip_accept_encoding: bool = False,\n ) -> None:\n def putheader(self, header: str, *values: str) -> None:\n def request( # type: ignore[override]\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n *,\n chunked: bool = False,\n preload_content: bool = True,\n decode_content: bool = True,\n enforce_content_length: bool = True,\n ) -> None:\n def request_chunked(\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n ) -> None:\n def getresponse( # type: ignore[override]\n self,\n ) -> HTTPResponse:\n def __init__(\n self,\n host: str,\n port: int | None = None,\n *,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n source_address: tuple[str, int] | None = None,\n blocksize: int = 16384,\n socket_options: None\n | (connection._TYPE_SOCKET_OPTIONS) = HTTPConnection.default_socket_options,\n proxy: Url | None = None,\n proxy_config: ProxyConfig | None = None,\n cert_reqs: int | str | None = None,\n assert_hostname: None | str | Literal[False] = None,\n assert_fingerprint: str | None = None,\n server_hostname: str | None = None,\n ssl_context: ssl.SSLContext | None = None,\n ca_certs: str | None = None,\n ca_cert_dir: str | None = None,\n ca_cert_data: None | str | bytes = None,\n ssl_minimum_version: int | None = None,\n ssl_maximum_version: int | None = None,\n ssl_version: int | str | None = None, # Deprecated\n cert_file: str | None = None,\n key_file: str | None = None,\n key_password: str | None = None,\n ) -> None:\n def set_cert(\n self,\n key_file: str | None = None,\n cert_file: str | None = None,\n cert_reqs: int | str | None = None,\n key_password: str | None = None,\n ca_certs: str | None = None,\n assert_hostname: None | str | Literal[False] = None,\n assert_fingerprint: str | None = None,\n ca_cert_dir: str | None = None,\n ca_cert_data: None | str | bytes = None,\n ) -> None:\n def connect(self) -> None:\n def _connect_tls_proxy(self, hostname: str, sock: socket.socket) -> ssl.SSLSocket:\ndef _ssl_wrap_socket_and_match_hostname(\n sock: socket.socket,\n *,\n cert_reqs: None | str | int,\n ssl_version: None | str | int,\n ssl_minimum_version: int | None,\n ssl_maximum_version: int | None,\n cert_file: str | None,\n key_file: str | None,\n key_password: str | None,\n ca_certs: str | None,\n ca_cert_dir: str | None,\n ca_cert_data: None | str | bytes,\n assert_hostname: None | str | Literal[False],\n assert_fingerprint: str | None,\n server_hostname: str | None,\n ssl_context: ssl.SSLContext | None,\n tls_in_tls: bool = False,\n) -> _WrappedAndVerifiedSocket:\ndef _match_hostname(\n cert: _TYPE_PEER_CERT_RET_DICT | None,\n asserted_hostname: str,\n hostname_checks_common_name: bool = False,\n) -> None:\ndef _wrap_proxy_error(err: Exception, proxy_scheme: str | None) -> ProxyError:\ndef _get_default_user_agent() -> str:\ndef _url_from_connection(\n conn: HTTPConnection | HTTPSConnection, path: str | None = None\n) -> str:" }, { "identifier": "port_by_scheme", "path": "backend/venv/lib/python3.10/site-packages/urllib3/connection.py", "snippet": " class BaseSSLError(BaseException): # type: ignore[no-redef]\nclass HTTPConnection(_HTTPConnection):\nclass HTTPSConnection(HTTPConnection):\nclass _WrappedAndVerifiedSocket(typing.NamedTuple):\nclass DummyConnection:\nRECENT_DATE = datetime.date(2022, 1, 1)\n_CONTAINS_CONTROL_CHAR_RE = re.compile(r\"[^-!#$%&'*+.^_`|~0-9a-zA-Z]\")\n_HAS_SYS_AUDIT = hasattr(sys, \"audit\")\n def __init__(\n self,\n host: str,\n port: int | None = None,\n *,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n source_address: tuple[str, int] | None = None,\n blocksize: int = 16384,\n socket_options: None\n | (connection._TYPE_SOCKET_OPTIONS) = default_socket_options,\n proxy: Url | None = None,\n proxy_config: ProxyConfig | None = None,\n ) -> None:\n def host(self) -> str:\n def host(self, value: str) -> None:\n def _new_conn(self) -> socket.socket:\n def set_tunnel(\n self,\n host: str,\n port: int | None = None,\n headers: typing.Mapping[str, str] | None = None,\n scheme: str = \"http\",\n ) -> None:\n def connect(self) -> None:\n def is_closed(self) -> bool:\n def is_connected(self) -> bool:\n def has_connected_to_proxy(self) -> bool:\n def close(self) -> None:\n def putrequest(\n self,\n method: str,\n url: str,\n skip_host: bool = False,\n skip_accept_encoding: bool = False,\n ) -> None:\n def putheader(self, header: str, *values: str) -> None:\n def request( # type: ignore[override]\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n *,\n chunked: bool = False,\n preload_content: bool = True,\n decode_content: bool = True,\n enforce_content_length: bool = True,\n ) -> None:\n def request_chunked(\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n ) -> None:\n def getresponse( # type: ignore[override]\n self,\n ) -> HTTPResponse:\n def __init__(\n self,\n host: str,\n port: int | None = None,\n *,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n source_address: tuple[str, int] | None = None,\n blocksize: int = 16384,\n socket_options: None\n | (connection._TYPE_SOCKET_OPTIONS) = HTTPConnection.default_socket_options,\n proxy: Url | None = None,\n proxy_config: ProxyConfig | None = None,\n cert_reqs: int | str | None = None,\n assert_hostname: None | str | Literal[False] = None,\n assert_fingerprint: str | None = None,\n server_hostname: str | None = None,\n ssl_context: ssl.SSLContext | None = None,\n ca_certs: str | None = None,\n ca_cert_dir: str | None = None,\n ca_cert_data: None | str | bytes = None,\n ssl_minimum_version: int | None = None,\n ssl_maximum_version: int | None = None,\n ssl_version: int | str | None = None, # Deprecated\n cert_file: str | None = None,\n key_file: str | None = None,\n key_password: str | None = None,\n ) -> None:\n def set_cert(\n self,\n key_file: str | None = None,\n cert_file: str | None = None,\n cert_reqs: int | str | None = None,\n key_password: str | None = None,\n ca_certs: str | None = None,\n assert_hostname: None | str | Literal[False] = None,\n assert_fingerprint: str | None = None,\n ca_cert_dir: str | None = None,\n ca_cert_data: None | str | bytes = None,\n ) -> None:\n def connect(self) -> None:\n def _connect_tls_proxy(self, hostname: str, sock: socket.socket) -> ssl.SSLSocket:\ndef _ssl_wrap_socket_and_match_hostname(\n sock: socket.socket,\n *,\n cert_reqs: None | str | int,\n ssl_version: None | str | int,\n ssl_minimum_version: int | None,\n ssl_maximum_version: int | None,\n cert_file: str | None,\n key_file: str | None,\n key_password: str | None,\n ca_certs: str | None,\n ca_cert_dir: str | None,\n ca_cert_data: None | str | bytes,\n assert_hostname: None | str | Literal[False],\n assert_fingerprint: str | None,\n server_hostname: str | None,\n ssl_context: ssl.SSLContext | None,\n tls_in_tls: bool = False,\n) -> _WrappedAndVerifiedSocket:\ndef _match_hostname(\n cert: _TYPE_PEER_CERT_RET_DICT | None,\n asserted_hostname: str,\n hostname_checks_common_name: bool = False,\n) -> None:\ndef _wrap_proxy_error(err: Exception, proxy_scheme: str | None) -> ProxyError:\ndef _get_default_user_agent() -> str:\ndef _url_from_connection(\n conn: HTTPConnection | HTTPSConnection, path: str | None = None\n) -> str:" }, { "identifier": "ClosedPoolError", "path": "backend/venv/lib/python3.10/site-packages/urllib3/exceptions.py", "snippet": "class ClosedPoolError(PoolError):\n \"\"\"Raised when a request enters a pool after the pool has been closed.\"\"\"" }, { "identifier": "EmptyPoolError", "path": "backend/venv/lib/python3.10/site-packages/urllib3/exceptions.py", "snippet": "class EmptyPoolError(PoolError):\n \"\"\"Raised when a pool runs out of connections and no more are allowed.\"\"\"" }, { "identifier": "FullPoolError", "path": "backend/venv/lib/python3.10/site-packages/urllib3/exceptions.py", "snippet": "class FullPoolError(PoolError):\n \"\"\"Raised when we try to add a connection to a full pool in blocking mode.\"\"\"" }, { "identifier": "HostChangedError", "path": "backend/venv/lib/python3.10/site-packages/urllib3/exceptions.py", "snippet": "class HostChangedError(RequestError):\n \"\"\"Raised when an existing pool gets a request for a foreign host.\"\"\"\n\n def __init__(\n self, pool: ConnectionPool, url: str, retries: Retry | int = 3\n ) -> None:\n message = f\"Tried to open a foreign host with url: {url}\"\n super().__init__(pool, url, message)\n self.retries = retries" }, { "identifier": "InsecureRequestWarning", "path": "backend/venv/lib/python3.10/site-packages/urllib3/exceptions.py", "snippet": "class InsecureRequestWarning(SecurityWarning):\n \"\"\"Warned when making an unverified HTTPS request.\"\"\"" }, { "identifier": "LocationValueError", "path": "backend/venv/lib/python3.10/site-packages/urllib3/exceptions.py", "snippet": "class LocationValueError(ValueError, HTTPError):\n \"\"\"Raised when there is something wrong with a given URL input.\"\"\"" }, { "identifier": "MaxRetryError", "path": "backend/venv/lib/python3.10/site-packages/urllib3/exceptions.py", "snippet": "class MaxRetryError(RequestError):\n \"\"\"Raised when the maximum number of retries is exceeded.\n\n :param pool: The connection pool\n :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`\n :param str url: The requested Url\n :param reason: The underlying error\n :type reason: :class:`Exception`\n\n \"\"\"\n\n def __init__(\n self, pool: ConnectionPool, url: str, reason: Exception | None = None\n ) -> None:\n self.reason = reason\n\n message = f\"Max retries exceeded with url: {url} (Caused by {reason!r})\"\n\n super().__init__(pool, url, message)" }, { "identifier": "NewConnectionError", "path": "backend/venv/lib/python3.10/site-packages/urllib3/exceptions.py", "snippet": "class NewConnectionError(ConnectTimeoutError, HTTPError):\n \"\"\"Raised when we fail to establish a new connection. Usually ECONNREFUSED.\"\"\"\n\n def __init__(self, conn: HTTPConnection, message: str) -> None:\n self.conn = conn\n super().__init__(f\"{conn}: {message}\")\n\n @property\n def pool(self) -> HTTPConnection:\n warnings.warn(\n \"The 'pool' property is deprecated and will be removed \"\n \"in urllib3 v2.1.0. Use 'conn' instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n return self.conn" }, { "identifier": "ProtocolError", "path": "backend/venv/lib/python3.10/site-packages/urllib3/exceptions.py", "snippet": "class ProtocolError(HTTPError):\n \"\"\"Raised when something unexpected happens mid-request/response.\"\"\"" }, { "identifier": "ProxyError", "path": "backend/venv/lib/python3.10/site-packages/urllib3/exceptions.py", "snippet": "class ProxyError(HTTPError):\n \"\"\"Raised when the connection to a proxy fails.\"\"\"\n\n # The original error is also available as __cause__.\n original_error: Exception\n\n def __init__(self, message: str, error: Exception) -> None:\n super().__init__(message, error)\n self.original_error = error" }, { "identifier": "ReadTimeoutError", "path": "backend/venv/lib/python3.10/site-packages/urllib3/exceptions.py", "snippet": "class ReadTimeoutError(TimeoutError, RequestError):\n \"\"\"Raised when a socket timeout occurs while receiving data from a server\"\"\"" }, { "identifier": "SSLError", "path": "backend/venv/lib/python3.10/site-packages/urllib3/exceptions.py", "snippet": "class SSLError(HTTPError):\n \"\"\"Raised when SSL certificate fails in an HTTPS connection.\"\"\"" }, { "identifier": "TimeoutError", "path": "backend/venv/lib/python3.10/site-packages/urllib3/exceptions.py", "snippet": "class TimeoutError(HTTPError):\n \"\"\"Raised when a socket timeout error occurs.\n\n Catching this error will catch both :exc:`ReadTimeoutErrors\n <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.\n \"\"\"" }, { "identifier": "BaseHTTPResponse", "path": "backend/venv/lib/python3.10/site-packages/urllib3/response.py", "snippet": "class BaseHTTPResponse(io.IOBase):\n CONTENT_DECODERS = [\"gzip\", \"deflate\"]\n if brotli is not None:\n CONTENT_DECODERS += [\"br\"]\n if zstd is not None:\n CONTENT_DECODERS += [\"zstd\"]\n REDIRECT_STATUSES = [301, 302, 303, 307, 308]\n\n DECODER_ERROR_CLASSES: tuple[type[Exception], ...] = (IOError, zlib.error)\n if brotli is not None:\n DECODER_ERROR_CLASSES += (brotli.error,)\n\n if zstd is not None:\n DECODER_ERROR_CLASSES += (zstd.ZstdError,)\n\n def __init__(\n self,\n *,\n headers: typing.Mapping[str, str] | typing.Mapping[bytes, bytes] | None = None,\n status: int,\n version: int,\n reason: str | None,\n decode_content: bool,\n request_url: str | None,\n retries: Retry | None = None,\n ) -> None:\n if isinstance(headers, HTTPHeaderDict):\n self.headers = headers\n else:\n self.headers = HTTPHeaderDict(headers) # type: ignore[arg-type]\n self.status = status\n self.version = version\n self.reason = reason\n self.decode_content = decode_content\n self._has_decoded_content = False\n self._request_url: str | None = request_url\n self.retries = retries\n\n self.chunked = False\n tr_enc = self.headers.get(\"transfer-encoding\", \"\").lower()\n # Don't incur the penalty of creating a list and then discarding it\n encodings = (enc.strip() for enc in tr_enc.split(\",\"))\n if \"chunked\" in encodings:\n self.chunked = True\n\n self._decoder: ContentDecoder | None = None\n\n def get_redirect_location(self) -> str | None | Literal[False]:\n \"\"\"\n Should we redirect and where to?\n\n :returns: Truthy redirect location string if we got a redirect status\n code and valid location. ``None`` if redirect status and no\n location. ``False`` if not a redirect status code.\n \"\"\"\n if self.status in self.REDIRECT_STATUSES:\n return self.headers.get(\"location\")\n return False\n\n @property\n def data(self) -> bytes:\n raise NotImplementedError()\n\n def json(self) -> typing.Any:\n \"\"\"\n Parses the body of the HTTP response as JSON.\n\n To use a custom JSON decoder pass the result of :attr:`HTTPResponse.data` to the decoder.\n\n This method can raise either `UnicodeDecodeError` or `json.JSONDecodeError`.\n\n Read more :ref:`here <json>`.\n \"\"\"\n data = self.data.decode(\"utf-8\")\n return _json.loads(data)\n\n @property\n def url(self) -> str | None:\n raise NotImplementedError()\n\n @url.setter\n def url(self, url: str | None) -> None:\n raise NotImplementedError()\n\n @property\n def connection(self) -> HTTPConnection | None:\n raise NotImplementedError()\n\n @property\n def retries(self) -> Retry | None:\n return self._retries\n\n @retries.setter\n def retries(self, retries: Retry | None) -> None:\n # Override the request_url if retries has a redirect location.\n if retries is not None and retries.history:\n self.url = retries.history[-1].redirect_location\n self._retries = retries\n\n def stream(\n self, amt: int | None = 2**16, decode_content: bool | None = None\n ) -> typing.Iterator[bytes]:\n raise NotImplementedError()\n\n def read(\n self,\n amt: int | None = None,\n decode_content: bool | None = None,\n cache_content: bool = False,\n ) -> bytes:\n raise NotImplementedError()\n\n def read_chunked(\n self,\n amt: int | None = None,\n decode_content: bool | None = None,\n ) -> typing.Iterator[bytes]:\n raise NotImplementedError()\n\n def release_conn(self) -> None:\n raise NotImplementedError()\n\n def drain_conn(self) -> None:\n raise NotImplementedError()\n\n def close(self) -> None:\n raise NotImplementedError()\n\n def _init_decoder(self) -> None:\n \"\"\"\n Set-up the _decoder attribute if necessary.\n \"\"\"\n # Note: content-encoding value should be case-insensitive, per RFC 7230\n # Section 3.2\n content_encoding = self.headers.get(\"content-encoding\", \"\").lower()\n if self._decoder is None:\n if content_encoding in self.CONTENT_DECODERS:\n self._decoder = _get_decoder(content_encoding)\n elif \",\" in content_encoding:\n encodings = [\n e.strip()\n for e in content_encoding.split(\",\")\n if e.strip() in self.CONTENT_DECODERS\n ]\n if encodings:\n self._decoder = _get_decoder(content_encoding)\n\n def _decode(\n self, data: bytes, decode_content: bool | None, flush_decoder: bool\n ) -> bytes:\n \"\"\"\n Decode the data passed in and potentially flush the decoder.\n \"\"\"\n if not decode_content:\n if self._has_decoded_content:\n raise RuntimeError(\n \"Calling read(decode_content=False) is not supported after \"\n \"read(decode_content=True) was called.\"\n )\n return data\n\n try:\n if self._decoder:\n data = self._decoder.decompress(data)\n self._has_decoded_content = True\n except self.DECODER_ERROR_CLASSES as e:\n content_encoding = self.headers.get(\"content-encoding\", \"\").lower()\n raise DecodeError(\n \"Received response with content-encoding: %s, but \"\n \"failed to decode it.\" % content_encoding,\n e,\n ) from e\n if flush_decoder:\n data += self._flush_decoder()\n\n return data\n\n def _flush_decoder(self) -> bytes:\n \"\"\"\n Flushes the decoder. Should only be called if the decoder is actually\n being used.\n \"\"\"\n if self._decoder:\n return self._decoder.decompress(b\"\") + self._decoder.flush()\n return b\"\"\n\n # Compatibility methods for `io` module\n def readinto(self, b: bytearray) -> int:\n temp = self.read(len(b))\n if len(temp) == 0:\n return 0\n else:\n b[: len(temp)] = temp\n return len(temp)\n\n # Compatibility methods for http.client.HTTPResponse\n def getheaders(self) -> HTTPHeaderDict:\n warnings.warn(\n \"HTTPResponse.getheaders() is deprecated and will be removed \"\n \"in urllib3 v2.1.0. Instead access HTTPResponse.headers directly.\",\n category=DeprecationWarning,\n stacklevel=2,\n )\n return self.headers\n\n def getheader(self, name: str, default: str | None = None) -> str | None:\n warnings.warn(\n \"HTTPResponse.getheader() is deprecated and will be removed \"\n \"in urllib3 v2.1.0. Instead use HTTPResponse.headers.get(name, default).\",\n category=DeprecationWarning,\n stacklevel=2,\n )\n return self.headers.get(name, default)\n\n # Compatibility method for http.cookiejar\n def info(self) -> HTTPHeaderDict:\n return self.headers\n\n def geturl(self) -> str | None:\n return self.url" }, { "identifier": "is_connection_dropped", "path": "backend/venv/lib/python3.10/site-packages/urllib3/util/connection.py", "snippet": "def is_connection_dropped(conn: BaseHTTPConnection) -> bool: # Platform-specific\n \"\"\"\n Returns True if the connection is dropped and should be closed.\n :param conn: :class:`urllib3.connection.HTTPConnection` object.\n \"\"\"\n return not conn.is_connected" }, { "identifier": "connection_requires_http_tunnel", "path": "backend/venv/lib/python3.10/site-packages/urllib3/util/proxy.py", "snippet": "def connection_requires_http_tunnel(\n proxy_url: Url | None = None,\n proxy_config: ProxyConfig | None = None,\n destination_scheme: str | None = None,\n) -> bool:\n \"\"\"\n Returns True if the connection requires an HTTP CONNECT through the proxy.\n\n :param URL proxy_url:\n URL of the proxy.\n :param ProxyConfig proxy_config:\n Proxy configuration from poolmanager.py\n :param str destination_scheme:\n The scheme of the destination. (i.e https, http, etc)\n \"\"\"\n # If we're not using a proxy, no way to use a tunnel.\n if proxy_url is None:\n return False\n\n # HTTP destinations never require tunneling, we always forward.\n if destination_scheme == \"http\":\n return False\n\n # Support for forwarding with HTTPS proxies and HTTPS destinations.\n if (\n proxy_url.scheme == \"https\"\n and proxy_config\n and proxy_config.use_forwarding_for_https\n ):\n return False\n\n # Otherwise always use a tunnel.\n return True" }, { "identifier": "_TYPE_BODY_POSITION", "path": "backend/venv/lib/python3.10/site-packages/urllib3/util/request.py", "snippet": "_TYPE_BODY_POSITION = typing.Union[int, _TYPE_FAILEDTELL]" }, { "identifier": "set_file_position", "path": "backend/venv/lib/python3.10/site-packages/urllib3/util/request.py", "snippet": "def set_file_position(\n body: typing.Any, pos: _TYPE_BODY_POSITION | None\n) -> _TYPE_BODY_POSITION | None:\n \"\"\"\n If a position is provided, move file to that point.\n Otherwise, we'll attempt to record a position for future use.\n \"\"\"\n if pos is not None:\n rewind_body(body, pos)\n elif getattr(body, \"tell\", None) is not None:\n try:\n pos = body.tell()\n except OSError:\n # This differentiates from None, allowing us to catch\n # a failed `tell()` later when trying to rewind the body.\n pos = _FAILEDTELL\n\n return pos" }, { "identifier": "Retry", "path": "backend/venv/lib/python3.10/site-packages/urllib3/util/retry.py", "snippet": "class Retry:\n \"\"\"Retry configuration.\n\n Each retry attempt will create a new Retry object with updated values, so\n they can be safely reused.\n\n Retries can be defined as a default for a pool:\n\n .. code-block:: python\n\n retries = Retry(connect=5, read=2, redirect=5)\n http = PoolManager(retries=retries)\n response = http.request(\"GET\", \"https://example.com/\")\n\n Or per-request (which overrides the default for the pool):\n\n .. code-block:: python\n\n response = http.request(\"GET\", \"https://example.com/\", retries=Retry(10))\n\n Retries can be disabled by passing ``False``:\n\n .. code-block:: python\n\n response = http.request(\"GET\", \"https://example.com/\", retries=False)\n\n Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless\n retries are disabled, in which case the causing exception will be raised.\n\n :param int total:\n Total number of retries to allow. Takes precedence over other counts.\n\n Set to ``None`` to remove this constraint and fall back on other\n counts.\n\n Set to ``0`` to fail on the first retry.\n\n Set to ``False`` to disable and imply ``raise_on_redirect=False``.\n\n :param int connect:\n How many connection-related errors to retry on.\n\n These are errors raised before the request is sent to the remote server,\n which we assume has not triggered the server to process the request.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int read:\n How many times to retry on read errors.\n\n These errors are raised after the request was sent to the server, so the\n request may have side-effects.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int redirect:\n How many redirects to perform. Limit this to avoid infinite redirect\n loops.\n\n A redirect is a HTTP response with a status code 301, 302, 303, 307 or\n 308.\n\n Set to ``0`` to fail on the first retry of this type.\n\n Set to ``False`` to disable and imply ``raise_on_redirect=False``.\n\n :param int status:\n How many times to retry on bad status codes.\n\n These are retries made on responses, where status code matches\n ``status_forcelist``.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int other:\n How many times to retry on other errors.\n\n Other errors are errors that are not connect, read, redirect or status errors.\n These errors might be raised after the request was sent to the server, so the\n request might have side-effects.\n\n Set to ``0`` to fail on the first retry of this type.\n\n If ``total`` is not set, it's a good idea to set this to 0 to account\n for unexpected edge cases and avoid infinite retry loops.\n\n :param Collection allowed_methods:\n Set of uppercased HTTP method verbs that we should retry on.\n\n By default, we only retry on methods which are considered to be\n idempotent (multiple requests with the same parameters end with the\n same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.\n\n Set to a ``None`` value to retry on any verb.\n\n :param Collection status_forcelist:\n A set of integer HTTP status codes that we should force a retry on.\n A retry is initiated if the request method is in ``allowed_methods``\n and the response status code is in ``status_forcelist``.\n\n By default, this is disabled with ``None``.\n\n :param float backoff_factor:\n A backoff factor to apply between attempts after the second try\n (most errors are resolved immediately by a second try without a\n delay). urllib3 will sleep for::\n\n {backoff factor} * (2 ** ({number of previous retries}))\n\n seconds. If `backoff_jitter` is non-zero, this sleep is extended by::\n\n random.uniform(0, {backoff jitter})\n\n seconds. For example, if the backoff_factor is 0.1, then :func:`Retry.sleep` will\n sleep for [0.0s, 0.2s, 0.4s, 0.8s, ...] between retries. No backoff will ever\n be longer than `backoff_max`.\n\n By default, backoff is disabled (factor set to 0).\n\n :param bool raise_on_redirect: Whether, if the number of redirects is\n exhausted, to raise a MaxRetryError, or to return a response with a\n response code in the 3xx range.\n\n :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:\n whether we should raise an exception, or return a response,\n if status falls in ``status_forcelist`` range and retries have\n been exhausted.\n\n :param tuple history: The history of the request encountered during\n each call to :meth:`~Retry.increment`. The list is in the order\n the requests occurred. Each list item is of class :class:`RequestHistory`.\n\n :param bool respect_retry_after_header:\n Whether to respect Retry-After header on status codes defined as\n :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.\n\n :param Collection remove_headers_on_redirect:\n Sequence of headers to remove from the request when a response\n indicating a redirect is returned before firing off the redirected\n request.\n \"\"\"\n\n #: Default methods to be used for ``allowed_methods``\n DEFAULT_ALLOWED_METHODS = frozenset(\n [\"HEAD\", \"GET\", \"PUT\", \"DELETE\", \"OPTIONS\", \"TRACE\"]\n )\n\n #: Default status codes to be used for ``status_forcelist``\n RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])\n\n #: Default headers to be used for ``remove_headers_on_redirect``\n DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset([\"Cookie\", \"Authorization\"])\n\n #: Default maximum backoff time.\n DEFAULT_BACKOFF_MAX = 120\n\n # Backward compatibility; assigned outside of the class.\n DEFAULT: typing.ClassVar[Retry]\n\n def __init__(\n self,\n total: bool | int | None = 10,\n connect: int | None = None,\n read: int | None = None,\n redirect: bool | int | None = None,\n status: int | None = None,\n other: int | None = None,\n allowed_methods: typing.Collection[str] | None = DEFAULT_ALLOWED_METHODS,\n status_forcelist: typing.Collection[int] | None = None,\n backoff_factor: float = 0,\n backoff_max: float = DEFAULT_BACKOFF_MAX,\n raise_on_redirect: bool = True,\n raise_on_status: bool = True,\n history: tuple[RequestHistory, ...] | None = None,\n respect_retry_after_header: bool = True,\n remove_headers_on_redirect: typing.Collection[\n str\n ] = DEFAULT_REMOVE_HEADERS_ON_REDIRECT,\n backoff_jitter: float = 0.0,\n ) -> None:\n self.total = total\n self.connect = connect\n self.read = read\n self.status = status\n self.other = other\n\n if redirect is False or total is False:\n redirect = 0\n raise_on_redirect = False\n\n self.redirect = redirect\n self.status_forcelist = status_forcelist or set()\n self.allowed_methods = allowed_methods\n self.backoff_factor = backoff_factor\n self.backoff_max = backoff_max\n self.raise_on_redirect = raise_on_redirect\n self.raise_on_status = raise_on_status\n self.history = history or ()\n self.respect_retry_after_header = respect_retry_after_header\n self.remove_headers_on_redirect = frozenset(\n h.lower() for h in remove_headers_on_redirect\n )\n self.backoff_jitter = backoff_jitter\n\n def new(self, **kw: typing.Any) -> Retry:\n params = dict(\n total=self.total,\n connect=self.connect,\n read=self.read,\n redirect=self.redirect,\n status=self.status,\n other=self.other,\n allowed_methods=self.allowed_methods,\n status_forcelist=self.status_forcelist,\n backoff_factor=self.backoff_factor,\n backoff_max=self.backoff_max,\n raise_on_redirect=self.raise_on_redirect,\n raise_on_status=self.raise_on_status,\n history=self.history,\n remove_headers_on_redirect=self.remove_headers_on_redirect,\n respect_retry_after_header=self.respect_retry_after_header,\n backoff_jitter=self.backoff_jitter,\n )\n\n params.update(kw)\n return type(self)(**params) # type: ignore[arg-type]\n\n @classmethod\n def from_int(\n cls,\n retries: Retry | bool | int | None,\n redirect: bool | int | None = True,\n default: Retry | bool | int | None = None,\n ) -> Retry:\n \"\"\"Backwards-compatibility for the old retries format.\"\"\"\n if retries is None:\n retries = default if default is not None else cls.DEFAULT\n\n if isinstance(retries, Retry):\n return retries\n\n redirect = bool(redirect) and None\n new_retries = cls(retries, redirect=redirect)\n log.debug(\"Converted retries value: %r -> %r\", retries, new_retries)\n return new_retries\n\n def get_backoff_time(self) -> float:\n \"\"\"Formula for computing the current backoff\n\n :rtype: float\n \"\"\"\n # We want to consider only the last consecutive errors sequence (Ignore redirects).\n consecutive_errors_len = len(\n list(\n takewhile(lambda x: x.redirect_location is None, reversed(self.history))\n )\n )\n if consecutive_errors_len <= 1:\n return 0\n\n backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))\n if self.backoff_jitter != 0.0:\n backoff_value += random.random() * self.backoff_jitter\n return float(max(0, min(self.backoff_max, backoff_value)))\n\n def parse_retry_after(self, retry_after: str) -> float:\n seconds: float\n # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4\n if re.match(r\"^\\s*[0-9]+\\s*$\", retry_after):\n seconds = int(retry_after)\n else:\n retry_date_tuple = email.utils.parsedate_tz(retry_after)\n if retry_date_tuple is None:\n raise InvalidHeader(f\"Invalid Retry-After header: {retry_after}\")\n\n retry_date = email.utils.mktime_tz(retry_date_tuple)\n seconds = retry_date - time.time()\n\n seconds = max(seconds, 0)\n\n return seconds\n\n def get_retry_after(self, response: BaseHTTPResponse) -> float | None:\n \"\"\"Get the value of Retry-After in seconds.\"\"\"\n\n retry_after = response.headers.get(\"Retry-After\")\n\n if retry_after is None:\n return None\n\n return self.parse_retry_after(retry_after)\n\n def sleep_for_retry(self, response: BaseHTTPResponse) -> bool:\n retry_after = self.get_retry_after(response)\n if retry_after:\n time.sleep(retry_after)\n return True\n\n return False\n\n def _sleep_backoff(self) -> None:\n backoff = self.get_backoff_time()\n if backoff <= 0:\n return\n time.sleep(backoff)\n\n def sleep(self, response: BaseHTTPResponse | None = None) -> None:\n \"\"\"Sleep between retry attempts.\n\n This method will respect a server's ``Retry-After`` response header\n and sleep the duration of the time requested. If that is not present, it\n will use an exponential backoff. By default, the backoff factor is 0 and\n this method will return immediately.\n \"\"\"\n\n if self.respect_retry_after_header and response:\n slept = self.sleep_for_retry(response)\n if slept:\n return\n\n self._sleep_backoff()\n\n def _is_connection_error(self, err: Exception) -> bool:\n \"\"\"Errors when we're fairly sure that the server did not receive the\n request, so it should be safe to retry.\n \"\"\"\n if isinstance(err, ProxyError):\n err = err.original_error\n return isinstance(err, ConnectTimeoutError)\n\n def _is_read_error(self, err: Exception) -> bool:\n \"\"\"Errors that occur after the request has been started, so we should\n assume that the server began processing it.\n \"\"\"\n return isinstance(err, (ReadTimeoutError, ProtocolError))\n\n def _is_method_retryable(self, method: str) -> bool:\n \"\"\"Checks if a given HTTP method should be retried upon, depending if\n it is included in the allowed_methods\n \"\"\"\n if self.allowed_methods and method.upper() not in self.allowed_methods:\n return False\n return True\n\n def is_retry(\n self, method: str, status_code: int, has_retry_after: bool = False\n ) -> bool:\n \"\"\"Is this method/status code retryable? (Based on allowlists and control\n variables such as the number of total retries to allow, whether to\n respect the Retry-After header, whether this header is present, and\n whether the returned status code is on the list of status codes to\n be retried upon on the presence of the aforementioned header)\n \"\"\"\n if not self._is_method_retryable(method):\n return False\n\n if self.status_forcelist and status_code in self.status_forcelist:\n return True\n\n return bool(\n self.total\n and self.respect_retry_after_header\n and has_retry_after\n and (status_code in self.RETRY_AFTER_STATUS_CODES)\n )\n\n def is_exhausted(self) -> bool:\n \"\"\"Are we out of retries?\"\"\"\n retry_counts = [\n x\n for x in (\n self.total,\n self.connect,\n self.read,\n self.redirect,\n self.status,\n self.other,\n )\n if x\n ]\n if not retry_counts:\n return False\n\n return min(retry_counts) < 0\n\n def increment(\n self,\n method: str | None = None,\n url: str | None = None,\n response: BaseHTTPResponse | None = None,\n error: Exception | None = None,\n _pool: ConnectionPool | None = None,\n _stacktrace: TracebackType | None = None,\n ) -> Retry:\n \"\"\"Return a new Retry object with incremented retry counters.\n\n :param response: A response object, or None, if the server did not\n return a response.\n :type response: :class:`~urllib3.response.BaseHTTPResponse`\n :param Exception error: An error encountered during the request, or\n None if the response was received successfully.\n\n :return: A new ``Retry`` object.\n \"\"\"\n if self.total is False and error:\n # Disabled, indicate to re-raise the error.\n raise reraise(type(error), error, _stacktrace)\n\n total = self.total\n if total is not None:\n total -= 1\n\n connect = self.connect\n read = self.read\n redirect = self.redirect\n status_count = self.status\n other = self.other\n cause = \"unknown\"\n status = None\n redirect_location = None\n\n if error and self._is_connection_error(error):\n # Connect retry?\n if connect is False:\n raise reraise(type(error), error, _stacktrace)\n elif connect is not None:\n connect -= 1\n\n elif error and self._is_read_error(error):\n # Read retry?\n if read is False or method is None or not self._is_method_retryable(method):\n raise reraise(type(error), error, _stacktrace)\n elif read is not None:\n read -= 1\n\n elif error:\n # Other retry?\n if other is not None:\n other -= 1\n\n elif response and response.get_redirect_location():\n # Redirect retry?\n if redirect is not None:\n redirect -= 1\n cause = \"too many redirects\"\n response_redirect_location = response.get_redirect_location()\n if response_redirect_location:\n redirect_location = response_redirect_location\n status = response.status\n\n else:\n # Incrementing because of a server error like a 500 in\n # status_forcelist and the given method is in the allowed_methods\n cause = ResponseError.GENERIC_ERROR\n if response and response.status:\n if status_count is not None:\n status_count -= 1\n cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)\n status = response.status\n\n history = self.history + (\n RequestHistory(method, url, error, status, redirect_location),\n )\n\n new_retry = self.new(\n total=total,\n connect=connect,\n read=read,\n redirect=redirect,\n status=status_count,\n other=other,\n history=history,\n )\n\n if new_retry.is_exhausted():\n reason = error or ResponseError(cause)\n raise MaxRetryError(_pool, url, reason) from reason # type: ignore[arg-type]\n\n log.debug(\"Incremented Retry for (url='%s'): %r\", url, new_retry)\n\n return new_retry\n\n def __repr__(self) -> str:\n return (\n f\"{type(self).__name__}(total={self.total}, connect={self.connect}, \"\n f\"read={self.read}, redirect={self.redirect}, status={self.status})\"\n )" }, { "identifier": "CertificateError", "path": "backend/venv/lib/python3.10/site-packages/urllib3/util/ssl_match_hostname.py", "snippet": "class CertificateError(ValueError):\n pass" }, { "identifier": "_DEFAULT_TIMEOUT", "path": "backend/venv/lib/python3.10/site-packages/urllib3/util/timeout.py", "snippet": "_DEFAULT_TIMEOUT: Final[_TYPE_DEFAULT] = _TYPE_DEFAULT.token" }, { "identifier": "_TYPE_DEFAULT", "path": "backend/venv/lib/python3.10/site-packages/urllib3/util/timeout.py", "snippet": "class _TYPE_DEFAULT(Enum):\n # This value should never be passed to socket.settimeout() so for safety we use a -1.\n # socket.settimout() raises a ValueError for negative values.\n token = -1" }, { "identifier": "Timeout", "path": "backend/venv/lib/python3.10/site-packages/urllib3/util/timeout.py", "snippet": "class Timeout:\n \"\"\"Timeout configuration.\n\n Timeouts can be defined as a default for a pool:\n\n .. code-block:: python\n\n import urllib3\n\n timeout = urllib3.util.Timeout(connect=2.0, read=7.0)\n\n http = urllib3.PoolManager(timeout=timeout)\n\n resp = http.request(\"GET\", \"https://example.com/\")\n\n print(resp.status)\n\n Or per-request (which overrides the default for the pool):\n\n .. code-block:: python\n\n response = http.request(\"GET\", \"https://example.com/\", timeout=Timeout(10))\n\n Timeouts can be disabled by setting all the parameters to ``None``:\n\n .. code-block:: python\n\n no_timeout = Timeout(connect=None, read=None)\n response = http.request(\"GET\", \"https://example.com/\", timeout=no_timeout)\n\n\n :param total:\n This combines the connect and read timeouts into one; the read timeout\n will be set to the time leftover from the connect attempt. In the\n event that both a connect timeout and a total are specified, or a read\n timeout and a total are specified, the shorter timeout will be applied.\n\n Defaults to None.\n\n :type total: int, float, or None\n\n :param connect:\n The maximum amount of time (in seconds) to wait for a connection\n attempt to a server to succeed. Omitting the parameter will default the\n connect timeout to the system default, probably `the global default\n timeout in socket.py\n <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.\n None will set an infinite timeout for connection attempts.\n\n :type connect: int, float, or None\n\n :param read:\n The maximum amount of time (in seconds) to wait between consecutive\n read operations for a response from the server. Omitting the parameter\n will default the read timeout to the system default, probably `the\n global default timeout in socket.py\n <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.\n None will set an infinite timeout.\n\n :type read: int, float, or None\n\n .. note::\n\n Many factors can affect the total amount of time for urllib3 to return\n an HTTP response.\n\n For example, Python's DNS resolver does not obey the timeout specified\n on the socket. Other factors that can affect total request time include\n high CPU load, high swap, the program running at a low priority level,\n or other behaviors.\n\n In addition, the read and total timeouts only measure the time between\n read operations on the socket connecting the client and the server,\n not the total amount of time for the request to return a complete\n response. For most requests, the timeout is raised because the server\n has not sent the first byte in the specified time. This is not always\n the case; if a server streams one byte every fifteen seconds, a timeout\n of 20 seconds will not trigger, even though the request will take\n several minutes to complete.\n\n If your goal is to cut off any request after a set amount of wall clock\n time, consider having a second \"watcher\" thread to cut off a slow\n request.\n \"\"\"\n\n #: A sentinel object representing the default timeout value\n DEFAULT_TIMEOUT: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT\n\n def __init__(\n self,\n total: _TYPE_TIMEOUT = None,\n connect: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n read: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n ) -> None:\n self._connect = self._validate_timeout(connect, \"connect\")\n self._read = self._validate_timeout(read, \"read\")\n self.total = self._validate_timeout(total, \"total\")\n self._start_connect: float | None = None\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}(connect={self._connect!r}, read={self._read!r}, total={self.total!r})\"\n\n # __str__ provided for backwards compatibility\n __str__ = __repr__\n\n @staticmethod\n def resolve_default_timeout(timeout: _TYPE_TIMEOUT) -> float | None:\n return getdefaulttimeout() if timeout is _DEFAULT_TIMEOUT else timeout\n\n @classmethod\n def _validate_timeout(cls, value: _TYPE_TIMEOUT, name: str) -> _TYPE_TIMEOUT:\n \"\"\"Check that a timeout attribute is valid.\n\n :param value: The timeout value to validate\n :param name: The name of the timeout attribute to validate. This is\n used to specify in error messages.\n :return: The validated and casted version of the given value.\n :raises ValueError: If it is a numeric value less than or equal to\n zero, or the type is not an integer, float, or None.\n \"\"\"\n if value is None or value is _DEFAULT_TIMEOUT:\n return value\n\n if isinstance(value, bool):\n raise ValueError(\n \"Timeout cannot be a boolean value. It must \"\n \"be an int, float or None.\"\n )\n try:\n float(value)\n except (TypeError, ValueError):\n raise ValueError(\n \"Timeout value %s was %s, but it must be an \"\n \"int, float or None.\" % (name, value)\n ) from None\n\n try:\n if value <= 0:\n raise ValueError(\n \"Attempted to set %s timeout to %s, but the \"\n \"timeout cannot be set to a value less \"\n \"than or equal to 0.\" % (name, value)\n )\n except TypeError:\n raise ValueError(\n \"Timeout value %s was %s, but it must be an \"\n \"int, float or None.\" % (name, value)\n ) from None\n\n return value\n\n @classmethod\n def from_float(cls, timeout: _TYPE_TIMEOUT) -> Timeout:\n \"\"\"Create a new Timeout from a legacy timeout value.\n\n The timeout value used by httplib.py sets the same timeout on the\n connect(), and recv() socket requests. This creates a :class:`Timeout`\n object that sets the individual timeouts to the ``timeout`` value\n passed to this function.\n\n :param timeout: The legacy timeout value.\n :type timeout: integer, float, :attr:`urllib3.util.Timeout.DEFAULT_TIMEOUT`, or None\n :return: Timeout object\n :rtype: :class:`Timeout`\n \"\"\"\n return Timeout(read=timeout, connect=timeout)\n\n def clone(self) -> Timeout:\n \"\"\"Create a copy of the timeout object\n\n Timeout properties are stored per-pool but each request needs a fresh\n Timeout object to ensure each one has its own start/stop configured.\n\n :return: a copy of the timeout object\n :rtype: :class:`Timeout`\n \"\"\"\n # We can't use copy.deepcopy because that will also create a new object\n # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to\n # detect the user default.\n return Timeout(connect=self._connect, read=self._read, total=self.total)\n\n def start_connect(self) -> float:\n \"\"\"Start the timeout clock, used during a connect() attempt\n\n :raises urllib3.exceptions.TimeoutStateError: if you attempt\n to start a timer that has been started already.\n \"\"\"\n if self._start_connect is not None:\n raise TimeoutStateError(\"Timeout timer has already been started.\")\n self._start_connect = time.monotonic()\n return self._start_connect\n\n def get_connect_duration(self) -> float:\n \"\"\"Gets the time elapsed since the call to :meth:`start_connect`.\n\n :return: Elapsed time in seconds.\n :rtype: float\n :raises urllib3.exceptions.TimeoutStateError: if you attempt\n to get duration for a timer that hasn't been started.\n \"\"\"\n if self._start_connect is None:\n raise TimeoutStateError(\n \"Can't get connect duration for timer that has not started.\"\n )\n return time.monotonic() - self._start_connect\n\n @property\n def connect_timeout(self) -> _TYPE_TIMEOUT:\n \"\"\"Get the value to use when setting a connection timeout.\n\n This will be a positive float or integer, the value None\n (never timeout), or the default system timeout.\n\n :return: Connect timeout.\n :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None\n \"\"\"\n if self.total is None:\n return self._connect\n\n if self._connect is None or self._connect is _DEFAULT_TIMEOUT:\n return self.total\n\n return min(self._connect, self.total) # type: ignore[type-var]\n\n @property\n def read_timeout(self) -> float | None:\n \"\"\"Get the value for the read timeout.\n\n This assumes some time has elapsed in the connection timeout and\n computes the read timeout appropriately.\n\n If self.total is set, the read timeout is dependent on the amount of\n time taken by the connect timeout. If the connection time has not been\n established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be\n raised.\n\n :return: Value to use for the read timeout.\n :rtype: int, float or None\n :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`\n has not yet been called on this object.\n \"\"\"\n if (\n self.total is not None\n and self.total is not _DEFAULT_TIMEOUT\n and self._read is not None\n and self._read is not _DEFAULT_TIMEOUT\n ):\n # In case the connect timeout has not yet been established.\n if self._start_connect is None:\n return self._read\n return max(0, min(self.total - self.get_connect_duration(), self._read))\n elif self.total is not None and self.total is not _DEFAULT_TIMEOUT:\n return max(0, self.total - self.get_connect_duration())\n else:\n return self.resolve_default_timeout(self._read)" }, { "identifier": "Url", "path": "backend/venv/lib/python3.10/site-packages/urllib3/util/url.py", "snippet": "class Url(\n typing.NamedTuple(\n \"Url\",\n [\n (\"scheme\", typing.Optional[str]),\n (\"auth\", typing.Optional[str]),\n (\"host\", typing.Optional[str]),\n (\"port\", typing.Optional[int]),\n (\"path\", typing.Optional[str]),\n (\"query\", typing.Optional[str]),\n (\"fragment\", typing.Optional[str]),\n ],\n )\n):\n \"\"\"\n Data structure for representing an HTTP URL. Used as a return value for\n :func:`parse_url`. Both the scheme and host are normalized as they are\n both case-insensitive according to RFC 3986.\n \"\"\"\n\n def __new__( # type: ignore[no-untyped-def]\n cls,\n scheme: str | None = None,\n auth: str | None = None,\n host: str | None = None,\n port: int | None = None,\n path: str | None = None,\n query: str | None = None,\n fragment: str | None = None,\n ):\n if path and not path.startswith(\"/\"):\n path = \"/\" + path\n if scheme is not None:\n scheme = scheme.lower()\n return super().__new__(cls, scheme, auth, host, port, path, query, fragment)\n\n @property\n def hostname(self) -> str | None:\n \"\"\"For backwards-compatibility with urlparse. We're nice like that.\"\"\"\n return self.host\n\n @property\n def request_uri(self) -> str:\n \"\"\"Absolute path including the query string.\"\"\"\n uri = self.path or \"/\"\n\n if self.query is not None:\n uri += \"?\" + self.query\n\n return uri\n\n @property\n def authority(self) -> str | None:\n \"\"\"\n Authority component as defined in RFC 3986 3.2.\n This includes userinfo (auth), host and port.\n\n i.e.\n userinfo@host:port\n \"\"\"\n userinfo = self.auth\n netloc = self.netloc\n if netloc is None or userinfo is None:\n return netloc\n else:\n return f\"{userinfo}@{netloc}\"\n\n @property\n def netloc(self) -> str | None:\n \"\"\"\n Network location including host and port.\n\n If you need the equivalent of urllib.parse's ``netloc``,\n use the ``authority`` property instead.\n \"\"\"\n if self.host is None:\n return None\n if self.port:\n return f\"{self.host}:{self.port}\"\n return self.host\n\n @property\n def url(self) -> str:\n \"\"\"\n Convert self into a url\n\n This function should more or less round-trip with :func:`.parse_url`. The\n returned url may not be exactly the same as the url inputted to\n :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls\n with a blank port will have : removed).\n\n Example:\n\n .. code-block:: python\n\n import urllib3\n\n U = urllib3.util.parse_url(\"https://google.com/mail/\")\n\n print(U.url)\n # \"https://google.com/mail/\"\n\n print( urllib3.util.Url(\"https\", \"username:password\",\n \"host.com\", 80, \"/path\", \"query\", \"fragment\"\n ).url\n )\n # \"https://username:[email protected]:80/path?query#fragment\"\n \"\"\"\n scheme, auth, host, port, path, query, fragment = self\n url = \"\"\n\n # We use \"is not None\" we want things to happen with empty strings (or 0 port)\n if scheme is not None:\n url += scheme + \"://\"\n if auth is not None:\n url += auth + \"@\"\n if host is not None:\n url += host\n if port is not None:\n url += \":\" + str(port)\n if path is not None:\n url += path\n if query is not None:\n url += \"?\" + query\n if fragment is not None:\n url += \"#\" + fragment\n\n return url\n\n def __str__(self) -> str:\n return self.url" }, { "identifier": "_encode_target", "path": "backend/venv/lib/python3.10/site-packages/urllib3/util/url.py", "snippet": "def _encode_target(target: str) -> str:\n \"\"\"Percent-encodes a request target so that there are no invalid characters\n\n Pre-condition for this function is that 'target' must start with '/'.\n If that is the case then _TARGET_RE will always produce a match.\n \"\"\"\n match = _TARGET_RE.match(target)\n if not match: # Defensive:\n raise LocationParseError(f\"{target!r} is not a valid request URI\")\n\n path, query = match.groups()\n encoded_target = _encode_invalid_chars(path, _PATH_CHARS)\n if query is not None:\n query = _encode_invalid_chars(query, _QUERY_CHARS)\n encoded_target += \"?\" + query\n return encoded_target" }, { "identifier": "_normalize_host", "path": "backend/venv/lib/python3.10/site-packages/urllib3/util/url.py", "snippet": "@typing.overload\ndef _normalize_host(host: None, scheme: str | None) -> None:\n ..." }, { "identifier": "parse_url", "path": "backend/venv/lib/python3.10/site-packages/urllib3/util/url.py", "snippet": "def parse_url(url: str) -> Url:\n \"\"\"\n Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is\n performed to parse incomplete urls. Fields not provided will be None.\n This parser is RFC 3986 and RFC 6874 compliant.\n\n The parser logic and helper functions are based heavily on\n work done in the ``rfc3986`` module.\n\n :param str url: URL to parse into a :class:`.Url` namedtuple.\n\n Partly backwards-compatible with :mod:`urllib.parse`.\n\n Example:\n\n .. code-block:: python\n\n import urllib3\n\n print( urllib3.util.parse_url('http://google.com/mail/'))\n # Url(scheme='http', host='google.com', port=None, path='/mail/', ...)\n\n print( urllib3.util.parse_url('google.com:80'))\n # Url(scheme=None, host='google.com', port=80, path=None, ...)\n\n print( urllib3.util.parse_url('/foo?bar'))\n # Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)\n \"\"\"\n if not url:\n # Empty\n return Url()\n\n source_url = url\n if not _SCHEME_RE.search(url):\n url = \"//\" + url\n\n scheme: str | None\n authority: str | None\n auth: str | None\n host: str | None\n port: str | None\n port_int: int | None\n path: str | None\n query: str | None\n fragment: str | None\n\n try:\n scheme, authority, path, query, fragment = _URI_RE.match(url).groups() # type: ignore[union-attr]\n normalize_uri = scheme is None or scheme.lower() in _NORMALIZABLE_SCHEMES\n\n if scheme:\n scheme = scheme.lower()\n\n if authority:\n auth, _, host_port = authority.rpartition(\"@\")\n auth = auth or None\n host, port = _HOST_PORT_RE.match(host_port).groups() # type: ignore[union-attr]\n if auth and normalize_uri:\n auth = _encode_invalid_chars(auth, _USERINFO_CHARS)\n if port == \"\":\n port = None\n else:\n auth, host, port = None, None, None\n\n if port is not None:\n port_int = int(port)\n if not (0 <= port_int <= 65535):\n raise LocationParseError(url)\n else:\n port_int = None\n\n host = _normalize_host(host, scheme)\n\n if normalize_uri and path:\n path = _remove_path_dot_segments(path)\n path = _encode_invalid_chars(path, _PATH_CHARS)\n if normalize_uri and query:\n query = _encode_invalid_chars(query, _QUERY_CHARS)\n if normalize_uri and fragment:\n fragment = _encode_invalid_chars(fragment, _FRAGMENT_CHARS)\n\n except (ValueError, AttributeError) as e:\n raise LocationParseError(source_url) from e\n\n # For the sake of backwards compatibility we put empty\n # string values for path if there are any defined values\n # beyond the path in the URL.\n # TODO: Remove this when we break backwards compatibility.\n if not path:\n if query is not None or fragment is not None:\n path = \"\"\n else:\n path = None\n\n return Url(\n scheme=scheme,\n auth=auth,\n host=host,\n port=port_int,\n path=path,\n query=query,\n fragment=fragment,\n )" }, { "identifier": "to_str", "path": "backend/venv/lib/python3.10/site-packages/urllib3/util/util.py", "snippet": "def to_str(\n x: str | bytes, encoding: str | None = None, errors: str | None = None\n) -> str:\n if isinstance(x, str):\n return x\n elif not isinstance(x, bytes):\n raise TypeError(f\"not expecting type {type(x).__name__}\")\n if encoding or errors:\n return x.decode(encoding or \"utf-8\", errors=errors or \"strict\")\n return x.decode()" } ]
import errno import logging import queue import sys import typing import warnings import weakref import ssl from socket import timeout as SocketTimeout from types import TracebackType from ._base_connection import _TYPE_BODY from ._collections import HTTPHeaderDict from ._request_methods import RequestMethods from .connection import ( BaseSSLError, BrokenPipeError, DummyConnection, HTTPConnection, HTTPException, HTTPSConnection, ProxyConfig, _wrap_proxy_error, ) from .connection import port_by_scheme as port_by_scheme from .exceptions import ( ClosedPoolError, EmptyPoolError, FullPoolError, HostChangedError, InsecureRequestWarning, LocationValueError, MaxRetryError, NewConnectionError, ProtocolError, ProxyError, ReadTimeoutError, SSLError, TimeoutError, ) from .response import BaseHTTPResponse from .util.connection import is_connection_dropped from .util.proxy import connection_requires_http_tunnel from .util.request import _TYPE_BODY_POSITION, set_file_position from .util.retry import Retry from .util.ssl_match_hostname import CertificateError from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_DEFAULT, Timeout from .util.url import Url, _encode_target from .util.url import _normalize_host as normalize_host from .util.url import parse_url from .util.util import to_str from typing_extensions import Literal from ._base_connection import BaseHTTPConnection, BaseHTTPSConnection
21,573
# Pool objects class ConnectionPool: """ Base class for all connection pools, such as :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. .. note:: ConnectionPool.urlopen() does not normalize or percent-encode target URIs which is useful if your target server doesn't support percent-encoded target URIs. """ scheme: str | None = None QueueCls = queue.LifoQueue def __init__(self, host: str, port: int | None = None) -> None: if not host: raise LocationValueError("No host specified.") self.host = _normalize_host(host, scheme=self.scheme) self.port = port # This property uses 'normalize_host()' (not '_normalize_host()') # to avoid removing square braces around IPv6 addresses. # This value is sent to `HTTPConnection.set_tunnel()` if called # because square braces are required for HTTP CONNECT tunneling. self._tunnel_host = normalize_host(host, scheme=self.scheme).lower() def __str__(self) -> str: return f"{type(self).__name__}(host={self.host!r}, port={self.port!r})" def __enter__(self: _SelfT) -> _SelfT: return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> Literal[False]: self.close() # Return False to re-raise any potential exceptions return False def close(self) -> None: """ Close all pooled connections and disable the pool. """ # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 _blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK} class HTTPConnectionPool(ConnectionPool, RequestMethods): """ Thread-safe connection pool for one host. :param host: Host used for this HTTP Connection (e.g. "localhost"), passed into :class:`http.client.HTTPConnection`. :param port: Port used for this HTTP Connection (None is equivalent to 80), passed into :class:`http.client.HTTPConnection`. :param timeout: Socket timeout in seconds for each individual connection. This can be a float or integer, which sets the timeout for the HTTP request, or an instance of :class:`urllib3.util.Timeout` which gives you more fine-grained control over request timeouts. After the constructor has been parsed, this is always a `urllib3.util.Timeout` object. :param maxsize: Number of connections to save that can be reused. More than 1 is useful in multithreaded situations. If ``block`` is set to False, more connections will be created but they will not be saved once they've been used. :param block: If set to True, no more than ``maxsize`` connections will be used at a time. When no free connections are available, the call will block until a connection has been released. This is a useful side effect for particular multithreaded situations where one does not want to use more than maxsize connections per host to prevent flooding. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param retries: Retry configuration to use by default with requests in this pool. :param _proxy: Parsed proxy URL, should not be used directly, instead, see :class:`urllib3.ProxyManager` :param _proxy_headers: A dictionary with proxy headers, should not be used directly, instead, see :class:`urllib3.ProxyManager` :param \\**conn_kw: Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, :class:`urllib3.connection.HTTPSConnection` instances. """ scheme = "http" ConnectionCls: ( type[BaseHTTPConnection] | type[BaseHTTPSConnection] ) = HTTPConnection def __init__( self, host: str, port: int | None = None, timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT, maxsize: int = 1, block: bool = False, headers: typing.Mapping[str, str] | None = None, retries: Retry | bool | int | None = None,
from __future__ import annotations if typing.TYPE_CHECKING: log = logging.getLogger(__name__) _TYPE_TIMEOUT = typing.Union[Timeout, float, _TYPE_DEFAULT, None] _SelfT = typing.TypeVar("_SelfT") # Pool objects class ConnectionPool: """ Base class for all connection pools, such as :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. .. note:: ConnectionPool.urlopen() does not normalize or percent-encode target URIs which is useful if your target server doesn't support percent-encoded target URIs. """ scheme: str | None = None QueueCls = queue.LifoQueue def __init__(self, host: str, port: int | None = None) -> None: if not host: raise LocationValueError("No host specified.") self.host = _normalize_host(host, scheme=self.scheme) self.port = port # This property uses 'normalize_host()' (not '_normalize_host()') # to avoid removing square braces around IPv6 addresses. # This value is sent to `HTTPConnection.set_tunnel()` if called # because square braces are required for HTTP CONNECT tunneling. self._tunnel_host = normalize_host(host, scheme=self.scheme).lower() def __str__(self) -> str: return f"{type(self).__name__}(host={self.host!r}, port={self.port!r})" def __enter__(self: _SelfT) -> _SelfT: return self def __exit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> Literal[False]: self.close() # Return False to re-raise any potential exceptions return False def close(self) -> None: """ Close all pooled connections and disable the pool. """ # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 _blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK} class HTTPConnectionPool(ConnectionPool, RequestMethods): """ Thread-safe connection pool for one host. :param host: Host used for this HTTP Connection (e.g. "localhost"), passed into :class:`http.client.HTTPConnection`. :param port: Port used for this HTTP Connection (None is equivalent to 80), passed into :class:`http.client.HTTPConnection`. :param timeout: Socket timeout in seconds for each individual connection. This can be a float or integer, which sets the timeout for the HTTP request, or an instance of :class:`urllib3.util.Timeout` which gives you more fine-grained control over request timeouts. After the constructor has been parsed, this is always a `urllib3.util.Timeout` object. :param maxsize: Number of connections to save that can be reused. More than 1 is useful in multithreaded situations. If ``block`` is set to False, more connections will be created but they will not be saved once they've been used. :param block: If set to True, no more than ``maxsize`` connections will be used at a time. When no free connections are available, the call will block until a connection has been released. This is a useful side effect for particular multithreaded situations where one does not want to use more than maxsize connections per host to prevent flooding. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param retries: Retry configuration to use by default with requests in this pool. :param _proxy: Parsed proxy URL, should not be used directly, instead, see :class:`urllib3.ProxyManager` :param _proxy_headers: A dictionary with proxy headers, should not be used directly, instead, see :class:`urllib3.ProxyManager` :param \\**conn_kw: Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, :class:`urllib3.connection.HTTPSConnection` instances. """ scheme = "http" ConnectionCls: ( type[BaseHTTPConnection] | type[BaseHTTPSConnection] ) = HTTPConnection def __init__( self, host: str, port: int | None = None, timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT, maxsize: int = 1, block: bool = False, headers: typing.Mapping[str, str] | None = None, retries: Retry | bool | int | None = None,
_proxy: Url | None = None,
28
2023-10-23 18:09:28+00:00
24k
zju3dv/nr_in_a_room
test/test_optim_pano.py
[ { "identifier": "RoomOptimizer", "path": "optim/room_optimizer.py", "snippet": "class RoomOptimizer:\n def __init__(\n self,\n scale_factor: float,\n bg_scale_factor: float,\n bg_scene_center: list,\n img_wh: list,\n near: float,\n far: float,\n chunk: int,\n model_ckpt_path_dict: Dict[str, Any],\n config=None,\n scale_factor_dict: Dict[str, Any] = {},\n scene_info_path: str = None,\n scene_info_json_path: str = None,\n model_type=\"NeuS\",\n N_samples: int = 64,\n N_importance: int = 128,\n relation_info: Dict[str, Any] = {},\n output_path: str = None,\n prefix: str = \"\",\n active_instance_id: list = [46, 4, 9, 102],\n virtual_instance_id: list = [], # specific for edit (insert virtual to real) mode\n filter_door_and_window: bool = True,\n lr: float = 1e-2,\n N_optim_step: int = 500,\n adjust_lr_per_step: int = 150,\n optim_batch_size: int = 1024,\n use_amp: bool = False,\n extract_obj_bbox_from_neural_model: bool = False,\n ig_data_base_dir: str = \"data/ig_dataset_v1.0.1/\",\n mask_per_object: bool = False,\n bbox_ray_intersect: bool = True,\n bbox_enlarge: float = 0.1,\n optimize_light_env: bool = True,\n optimize_appearance_code: bool = False,\n use_light_from_image_attr: bool = False,\n use_appearance_from_image_attr: bool = False,\n optimize_option: list = [\n \"photometric_loss\",\n \"perceptual_loss\",\n \"z_axis_align_loss\",\n \"object_room_wall_attach\",\n \"object_room_floor_attach\",\n \"physical_violation\",\n \"object_object_attach\",\n ],\n ):\n # load config\n self.scene_info_path = scene_info_path\n self.scale_factor = scale_factor\n self.scale_factor_dict = scale_factor_dict\n self.bg_scale_factor = bg_scale_factor\n self.bg_scene_center = np.array(bg_scene_center)\n self.ig_data_base_dir = ig_data_base_dir\n self.mask_per_object = mask_per_object\n self.bbox_ray_intersect = bbox_ray_intersect\n self.bbox_enlarge = bbox_enlarge\n self.virtual_instance_id = virtual_instance_id\n\n self.img_wh = img_wh\n self.w = img_wh[0]\n self.h = img_wh[1]\n self.near = near\n self.far = far\n self.N_importance = N_importance\n self.N_samples = N_samples\n self.chunk = chunk\n self.lr = lr\n self.N_optim_step = N_optim_step\n self.adjust_lr_per_step = adjust_lr_per_step\n self.optim_batch_size = optim_batch_size\n self.use_amp = use_amp\n self.optimize_light_env = optimize_light_env\n self.optimize_appearance_code = optimize_appearance_code\n self.optimize_option = optimize_option\n self.config = config\n\n self.use_light_from_image_attr = use_light_from_image_attr\n if self.use_light_from_image_attr:\n print(\n \"WARNING: self.use_light_from_image_attr = True, using hard coded light env.\"\n )\n self.hard_coded_light_id = 0 # just for compatibility\n # self.hard_coded_light_id = 9 # probe_03 in 10 HDR multi_light training\n\n self.use_appearance_from_image_attr = use_appearance_from_image_attr\n if self.use_appearance_from_image_attr:\n print(\n \"WARNING: self.use_appearance_from_image_attr = True, using first frame appearance code.\"\n )\n self.hard_coded_appearance_frame_id = 0\n\n self.optimize_exposure = \"optimize_exposure\" in self.optimize_option\n\n # laod scene info\n if scene_info_json_path is None:\n scene_info_json_path = os.path.join(scene_info_path, \"data.json\")\n self.scene_meta = read_json(scene_info_json_path)\n\n self.active_instance_id = active_instance_id\n if filter_door_and_window:\n self.filter_door_and_window()\n\n self.relation_info = relation_info\n\n self.model_type = model_type\n # self.load_model(\n # model_type, model_ckpt_path_dict[\"obj\"], model_ckpt_path_dict[\"bg\"]\n # )\n self.load_model_from_dict_path(model_type, model_ckpt_path_dict)\n\n self.reset_optimizable_parameters()\n\n if extract_obj_bbox_from_neural_model:\n self.extract_bounding_boxes_from_neural_model()\n\n if self.bbox_ray_intersect:\n self.prepare_bbox_ray_helper()\n\n self.set_output_path(output_path, prefix)\n\n print(\"RoomOptimizer initialize finished.\")\n\n def load_model_from_dict_path(self, model_type, model_ckpt_path_dict):\n assert model_type == \"NeuS\"\n self.models = {}\n self.image_attrs = {}\n\n # avoid duplicate loading\n self.models_cache = {}\n self.image_attrs_cache = {}\n\n print(\"loading model with instance_id\", self.active_instance_id)\n\n # print(model_ckpt_path_dict)\n for obj_id in self.active_instance_id:\n # identify ckpt_path\n if str(obj_id) in model_ckpt_path_dict:\n ckpt_info = model_ckpt_path_dict[str(obj_id)]\n elif obj_id == 0:\n assert (\n \"bg\" in model_ckpt_path_dict or \"0\" in model_ckpt_path_dict\n ), \"model_ckpt_path_dict missing background 'bg' or '0' ckpt\"\n ckpt_info = model_ckpt_path_dict.get(\"bg\", model_ckpt_path_dict[\"0\"])\n else:\n print(\n f\"Cannot find specific model for obj_id = {obj_id}, \\\n maybe config file is not compatible with given active_instance_id.\"\n )\n ckpt_info = model_ckpt_path_dict[\"obj\"]\n # load with cache\n ckpt_path, neus_conf = ckpt_info[\"path\"], ckpt_info[\"neus_conf\"]\n if ckpt_info not in self.models_cache:\n (\n self.models_cache[ckpt_path],\n self.image_attrs_cache[ckpt_path],\n ) = self.load_model_neus(ckpt_path, obj_id, neus_conf)\n self.models[f\"neus_{obj_id}\"] = self.models_cache[ckpt_path]\n self.image_attrs[str(obj_id)] = self.image_attrs_cache[ckpt_path]\n\n def load_model_nerf(self, ckpt_path):\n # TODO(ybbbbt): fix hard coding\n conf = {\n \"N_max_objs\": 128,\n \"N_obj_embedding\": 64,\n }\n nerf_coarse = NeRF_Object(conf)\n nerf_fine = NeRF_Object(conf)\n image_attributes = ImageAttributes(conf)\n load_ckpt(nerf_coarse, ckpt_path, model_name=\"nerf_coarse\")\n load_ckpt(nerf_fine, ckpt_path, model_name=\"nerf_fine\")\n load_ckpt(image_attributes, ckpt_path, model_name=\"image_attributes\")\n\n nerf_coarse = nerf_coarse.cuda().eval()\n nerf_fine = nerf_fine.cuda().eval()\n image_attributes = image_attributes.cuda().eval()\n\n models = {\n \"coarse\": nerf_coarse,\n \"fine\": nerf_fine,\n }\n\n embedding_xyz = Embedding(3, 10)\n embedding_dir = Embedding(3, 4)\n embeddings = {\n \"xyz\": embedding_xyz,\n \"dir\": embedding_dir,\n }\n return models, embeddings, image_attributes\n\n def load_model_neus(self, ckpt_path, obj_id, config_path=\"config/neus.yaml\"):\n conf = {\n \"model\": {\n \"N_max_objs\": 128,\n \"N_obj_embedding\": 64,\n },\n }\n if self.optimize_light_env:\n # conf[\"model\"].update({\"N_max_lights\": 128, \"N_light_embedding\": 16})\n conf[\"model\"].update({\"N_max_lights\": 1024, \"N_light_embedding\": 16})\n\n if self.optimize_appearance_code and obj_id not in self.virtual_instance_id:\n conf[\"model\"].update(\n {\"N_max_appearance_frames\": 10000, \"N_appearance_embedding\": 16}\n )\n\n neus, render_kwargs_train, render_kwargs_test = get_model_neus(\n config_path=config_path, need_trainer=False, extra_conf=conf\n )\n self.render_kwargs_neus = render_kwargs_test\n image_attributes = ImageAttributes(conf[\"model\"])\n\n print(ckpt_path)\n load_ckpt(neus, ckpt_path, model_name=\"neus\")\n load_ckpt(image_attributes, ckpt_path, model_name=\"image_attributes\")\n\n if self.config is not None and (\n str(obj_id) in self.config.get(\"map_virtual_to_local\", {})\n ):\n # image_attributes.embedding_instance\n real_id_in_ckpt = self.config.map_virtual_to_local[str(obj_id)]\n image_attributes.embedding_instance.weight.requires_grad = False\n image_attributes.embedding_instance.weight[\n obj_id\n ] = image_attributes.embedding_instance.weight[real_id_in_ckpt]\n # ipdb.set_trace()\n\n neus.cuda().eval()\n image_attributes.cuda().eval()\n return neus, image_attributes\n\n def reset_optimizable_parameters(self):\n self.params = []\n self.relation_info = {}\n if self.optimize_light_env:\n self.initialize_light_code()\n\n if self.optimize_appearance_code:\n self.initialize_appearance_code()\n\n if self.optimize_exposure:\n self.initialize_autoexposure()\n\n def save_optimizable_parameters(self, path):\n all_param_dict = {}\n # all_param_dict[\"params\"] = self.params\n all_param_dict[\"relation_info\"] = self.relation_info\n all_param_dict[\"object_pose_dict\"] = copy.deepcopy(self.object_pose_dict)\n all_param_dict[\"active_instance_id\"] = copy.deepcopy(self.active_instance_id)\n if self.optimize_light_env:\n all_param_dict[\"light_code\"] = copy.deepcopy(self.light_code_dict)\n if self.optimize_appearance_code:\n all_param_dict[\"appearance_code\"] = copy.deepcopy(self.appearance_code_dict)\n if self.optimize_exposure:\n all_param_dict[\"exposure\"] = copy.deepcopy(self.autoexposure_param)\n torch.save(all_param_dict, path)\n\n def load_optimizable_parameters(self, path):\n all_param_dict = torch.load(path)\n # self.params = all_param_dict[\"params\"]\n self.relation_info = all_param_dict[\"relation_info\"]\n if len(self.virtual_instance_id) == 0: # not overwrite in edit mode\n self.active_instance_id = all_param_dict[\"active_instance_id\"]\n\n def to_gpu(code_dict):\n for k, v in code_dict.items():\n if isinstance(v, torch.Tensor):\n code_dict[k] = v.cuda()\n elif isinstance(v, dict):\n for k2, v2 in v.items():\n if isinstance(v2, torch.Tensor):\n code_dict[k][k2] = v2.cuda()\n\n if len(self.virtual_instance_id) == 0: # not modify edit mode pose\n if hasattr(self, \"object_pose_dict\"):\n self.object_pose_dict.update(all_param_dict[\"object_pose_dict\"])\n else:\n self.object_pose_dict = all_param_dict[\"object_pose_dict\"]\n if self.optimize_light_env:\n self.light_code_dict = all_param_dict[\"light_code\"]\n to_gpu(self.light_code_dict)\n if self.optimize_appearance_code:\n self.appearance_code_dict = all_param_dict[\"appearance_code\"]\n to_gpu(self.appearance_code_dict)\n if self.optimize_exposure and \"exposure\" in all_param_dict:\n self.autoexposure_param = all_param_dict[\"exposure\"]\n to_gpu(self.autoexposure_param)\n # ipdb.set_trace()\n\n def interpolate_light_env_from_states(self, path1, path2, interp):\n all_param_dict_1 = torch.load(path1)\n all_param_dict_2 = torch.load(path2)\n\n # self.params = all_param_dict[\"params\"]\n def to_gpu(code_dict):\n for k, v in code_dict.items():\n if isinstance(v, torch.Tensor):\n code_dict[k] = v.cuda()\n elif isinstance(v, dict):\n for k2, v2 in v.items():\n if isinstance(v2, torch.Tensor):\n code_dict[k][k2] = v2.cuda()\n\n if self.optimize_light_env:\n light_code_dict_1 = all_param_dict_1[\"light_code\"]\n light_code_dict_2 = all_param_dict_2[\"light_code\"]\n for k, v in self.light_code_dict.items():\n self.light_code_dict[k] = light_code_dict_1[\n k\n ] * interp + light_code_dict_2[k] * (1 - interp)\n to_gpu(self.light_code_dict)\n if self.optimize_appearance_code:\n appearance_code_dict_1 = all_param_dict_1[\"appearance_code\"]\n appearance_code_dict_2 = all_param_dict_2[\"appearance_code\"]\n for k, v in self.appearance_code_dict.items():\n self.appearance_code_dict[k] = appearance_code_dict_1[\n k\n ] * interp + appearance_code_dict_2[k] * (1 - interp)\n to_gpu(self.appearance_code_dict)\n if self.optimize_exposure:\n autoexposure_param_1 = all_param_dict_1[\"exposure\"]\n autoexposure_param_2 = all_param_dict_2[\"exposure\"]\n for k, v in self.autoexposure_param.items():\n self.autoexposure_param[k] = autoexposure_param_1[\n k\n ] * interp + autoexposure_param_2[k] * (1 - interp)\n to_gpu(self.autoexposure_param)\n\n def reset_active_instance_id(self, active_instance_id, filter_door_and_window=True):\n self.active_instance_id = active_instance_id\n if filter_door_and_window:\n self.filter_door_and_window()\n\n def set_output_path(self, output_path: str, prefix: str, with_timestamp=True):\n if output_path is not None:\n if with_timestamp:\n self.output_path = os.path.join(\n output_path, f\"rendered_{get_timestamp()}_{prefix}\"\n )\n else:\n self.output_path = os.path.join(output_path, f\"{prefix}\")\n os.makedirs(self.output_path, exist_ok=True)\n\n def filter_door_and_window(self):\n print(\"Filtering door and window objects.\")\n filtered_active_instance_id = []\n for obj_id in self.active_instance_id:\n if self.get_type_of_instance(obj_id) not in [\"door\", \"window\"]:\n filtered_active_instance_id += [obj_id]\n self.active_instance_id = filtered_active_instance_id\n\n def initialize_light_code(self):\n self.light_code_dict = {}\n for obj_id in self.active_instance_id:\n # light_code = torch.randn((16)).cuda()\n light_code = torch.zeros((16)).cuda()\n light_code.requires_grad = True\n self.params += [\n {\"params\": light_code, \"lr\": self.lr}\n ] # light code can be optimized with larger lr\n self.light_code_dict[str(obj_id)] = light_code\n\n def initialize_appearance_code(self):\n self.appearance_code_dict = {}\n for obj_id in self.active_instance_id:\n # appearance_code = torch.randn((16)).cuda()\n appearance_code = torch.zeros((16)).cuda()\n appearance_code.requires_grad = True\n self.params += [\n {\"params\": appearance_code, \"lr\": self.lr}\n ] # light code can be optimized with larger lr\n self.appearance_code_dict[str(obj_id)] = appearance_code\n\n def initialize_autoexposure(self):\n self.autoexposure_param = {}\n for obj_id in self.active_instance_id:\n # scale and shift\n autoexposure_param = torch.Tensor([1, 1, 1, 0, 0, 0]).cuda()\n autoexposure_param.requires_grad = True\n self.params += [\n {\"params\": autoexposure_param, \"lr\": self.lr * 0.1}\n ] # light code can be optimized with larger lr\n self.autoexposure_param[str(obj_id)] = autoexposure_param\n\n def get_scale_factor(self, obj_id):\n if obj_id == 0:\n return self.bg_scale_factor\n elif str(obj_id) in self.scale_factor_dict:\n return self.scale_factor_dict[str(obj_id)]\n else:\n return self.scale_factor\n\n def extract_bounding_boxes_from_neural_model(self):\n print(\"Extracting object bounding boxes from neural model...\")\n assert self.model_type == \"NeuS\"\n for obj_id in tqdm(self.active_instance_id):\n mesh = extract_mesh_from_neus(\n self.models[f\"neus_{obj_id}\"],\n self.image_attrs[str(obj_id)],\n obj_id,\n )\n bbox = mesh.get_axis_aligned_bounding_box()\n bound = np.array([bbox.min_bound, bbox.max_bound])\n size = (bound[1] - bound[0]) * self.get_scale_factor(obj_id)\n # update scene_meta\n for idx, obj_info in enumerate(self.scene_meta[\"objs\"]):\n if obj_info[\"id\"] == obj_id:\n self.scene_meta[\"objs\"][idx][\"bdb3d\"][\"size\"] = size.tolist()\n\n def prepare_bbox_ray_helper(self):\n # bbox ray helper dict\n self.bbox_ray_helper_dict = {}\n for obj_id in self.active_instance_id:\n if obj_id == 0:\n continue\n obj_meta_info = get_object_meta_info(\n self.ig_data_base_dir, self.scene_meta, obj_id\n )\n length = np.array(obj_meta_info[\"bbox3d\"][\"size\"])\n self.bbox_ray_helper_dict[str(obj_id)] = BBoxRayHelper(np.zeros(3), length)\n\n def generate_object_rays(\n self, rays_o_obj, rays_d_obj, obj_id, near=None, far=None, select_ind=None\n ):\n \"\"\"\n Generate object rays given rays_o, rays_d and obj_id\n Input:\n select_ind: only for masked rendering\n \"\"\"\n if obj_id == 0: # background\n return self.generate_bg_rays(rays_o_obj, rays_d_obj, near=near, far=far)\n if self.bbox_ray_intersect:\n # for object, rays_o and rays_d should lie in world scale (unscaled)\n bbox_mask, bbox_batch_near, bbox_batch_far = self.bbox_ray_helper_dict[\n str(obj_id)\n ].get_ray_bbox_intersections(\n rays_o_obj,\n rays_d_obj,\n self.get_scale_factor(obj_id),\n # bbox_enlarge=self.bbox_enlarge / self.get_scale_factor(obj_id),\n bbox_enlarge=self.bbox_enlarge, # in physical world\n )\n # for area which hits bbox, we use bbox hit near far\n # bbox_ray_helper has scale for us, do no need to rescale\n batch_near_obj, batch_far_obj = bbox_batch_near, bbox_batch_far\n rays_o_obj = rays_o_obj / self.get_scale_factor(obj_id)\n # for the invalid part, we use 0 as near far, which assume that (0, 0, 0) is empty\n batch_near_obj[~bbox_mask] = torch.zeros_like(batch_near_obj[~bbox_mask])\n batch_far_obj[~bbox_mask] = torch.zeros_like(batch_far_obj[~bbox_mask])\n else:\n near = self.near if near is None else near\n far = self.far if far is None else far\n batch_near_obj = (\n near\n / self.get_scale_factor(obj_id)\n * torch.ones_like(rays_o_obj[:, :1])\n )\n batch_far_obj = (\n far / self.get_scale_factor(obj_id) * torch.ones_like(rays_d_obj[:, :1])\n )\n rays_o_obj = rays_o_obj / self.get_scale_factor(obj_id)\n\n if self.mask_per_object:\n # mask out of bound rendering\n obj_mask = torch.from_numpy(self.instance_mask == obj_id).view(-1)\n obj_mask = obj_mask[select_ind]\n batch_near_obj[~obj_mask] = 0\n batch_far_obj[~obj_mask] = 0\n\n rays_obj = torch.cat(\n [rays_o_obj, rays_d_obj, batch_near_obj, batch_far_obj], 1\n ) # (H*W, 8)\n rays_obj = rays_obj.cuda()\n return rays_obj\n\n def generate_bg_rays(self, rays_o_bg, rays_d_bg, near=None, far=None):\n near = self.near if near is None else near\n far = self.far if far is None else far\n batch_near_bg = near / self.bg_scale_factor * torch.ones_like(rays_o_bg[:, :1])\n batch_far_bg = far / self.bg_scale_factor * torch.ones_like(rays_d_bg[:, :1])\n rays_o_bg = rays_o_bg / self.bg_scale_factor\n rays_bg = torch.cat(\n [rays_o_bg, rays_d_bg, batch_near_bg, batch_far_bg], 1\n ) # (H*W, 8)\n rays_bg = rays_bg.cuda()\n return rays_bg\n\n def batched_inference_multi(\n self,\n rays_list,\n obj_id_list,\n to_cpu=True,\n hit_test_only=False,\n need_normal=False,\n use_sphere_tracing=True,\n safe_region_volume_rendering=True,\n refine_edge=False,\n refine_edge_obj_ids=[],\n render_mask=False,\n # use_sphere_tracing=False,\n show_progress=False,\n **kwargs,\n ):\n \"\"\"Do batched inference on rays using chunk.\"\"\"\n B = rays_list[0].shape[0]\n results = defaultdict(list)\n for i in tqdm(range(0, B, self.chunk), disable=not show_progress):\n extra_chunk = dict()\n for k, v in kwargs.items():\n if isinstance(v, torch.Tensor) and \"autoexposure_\" not in k:\n extra_chunk[k] = v[i : i + self.chunk]\n else:\n extra_chunk[k] = v\n if self.model_type == \"NeRF\":\n rendered_ray_chunks = render_rays_multi(\n self.models,\n self.embeddings,\n [r[i : i + self.chunk] for r in rays_list],\n obj_id_list,\n self.N_samples,\n use_disp=False,\n perturb=0.001,\n # perturb=0.00,\n noise_std=0,\n N_importance=self.N_importance,\n chunk=self.chunk,\n white_back=True,\n individual_weight_for_coarse=True,\n obj_bg_relative_scale=self.bg_scale_factor / self.scale_factor,\n **extra_chunk,\n )\n elif self.model_type == \"NeuS\":\n rendered_ray_chunks = render_rays_multi_neus(\n self,\n self.models,\n [r[i : i + self.chunk] for r in rays_list],\n obj_id_list,\n noise_std=0,\n white_back=True,\n # white_back=False,\n # obj_bg_relative_scale=self.bg_scale_factor / self.scale_factor,\n hit_test_only=hit_test_only,\n need_normal=need_normal,\n use_sphere_tracing=use_sphere_tracing,\n safe_region_volume_rendering=safe_region_volume_rendering,\n refine_edge=refine_edge,\n refine_edge_obj_ids=refine_edge_obj_ids,\n render_mask=render_mask,\n extra_dict=extra_chunk,\n render_kwargs=self.render_kwargs_neus,\n )\n\n for k, v in rendered_ray_chunks.items():\n if to_cpu:\n results[k] += [v.cpu()]\n else:\n results[k] += [v]\n\n for k, v in results.items():\n results[k] = torch.cat(v, 0)\n return results\n\n def render_full_scene(\n self,\n pose: np.ndarray,\n idx: int,\n h: int,\n w: int,\n write_idx_on_image=True,\n return_raw_image=False,\n render_mask=False,\n refine_edge=False,\n use_sphere_tracing=True,\n safe_region_volume_rendering=False,\n show_progress=False,\n refine_edge_obj_ids=[],\n fovx_deg=0,\n ):\n extra_dict = dict()\n extra_dict[\"compute_3d_mask\"] = False\n extra_dict[\"is_eval\"] = True\n\n rays_list = []\n object_id_list = []\n\n if fovx_deg > 0:\n focal = (w / 2) / np.tan((fovx_deg / 2) / (180 / np.pi))\n print(\"focal =\", focal)\n directions = get_ray_directions(h, w, focal).cuda() # (h, w, 3)\n else:\n directions = get_ray_directions_equirectangular(h, w).cuda() # (h, w, 3)\n\n for obj_id in self.active_instance_id:\n # get object location\n # Two: object to world pose\n if obj_id == 0: # 0 denotes background\n Two = np.eye(4)\n Two[:3, 3] = self.bg_scene_center\n else: # other objects\n Two = torch.eye(4).cuda()\n Two[:3, :3] = rotation_6d_to_matrix(\n self.object_pose_dict[str(obj_id)][\"rot6d\"]\n )\n Two[:3, 3] = self.object_pose_dict[str(obj_id)][\"trans\"]\n Two = Two.detach().cpu().numpy()\n # pose: Twc\n # we need: Toc\n Twc = np.eye(4)\n Twc[:3, :4] = pose[:3, :4]\n\n Toc = np.linalg.inv(Two) @ Twc\n\n Toc = torch.from_numpy(Toc).float().cuda()[:3, :4]\n rays_o, rays_d = get_rays(directions, Toc)\n\n rays = self.generate_object_rays(rays_o, rays_d, obj_id)\n\n rays_list += [rays]\n object_id_list += [obj_id]\n\n # set image_attr for object code\n extra_dict[\"embedding_inst_{}\".format(obj_id)] = self.image_attrs[\n str(obj_id)\n ].embedding_instance(torch.ones_like(rays_o[..., 0]).long().cuda() * obj_id)\n # light code\n if self.optimize_light_env:\n if self.use_light_from_image_attr or obj_id in self.virtual_instance_id:\n if not hasattr(self, \"hard_code_light_id\"):\n self.hard_coded_light_id = 0\n extra_dict[\"embedding_light_{}\".format(obj_id)] = self.image_attrs[\n str(obj_id)\n ].embedding_light(\n torch.ones_like(rays_o[..., 0]).long().cuda()\n * self.hard_coded_light_id\n )\n else:\n extra_dict[\"embedding_light_{}\".format(obj_id)] = (\n self.light_code_dict[str(obj_id)]\n .view(1, -1)\n .expand(rays_o.shape[0], -1)\n )\n # appearance code\n if self.optimize_appearance_code and obj_id not in self.virtual_instance_id:\n if self.use_appearance_from_image_attr:\n extra_dict[\n \"embedding_appearance_{}\".format(obj_id)\n ] = self.image_attrs[str(obj_id)].embedding_appearance(\n torch.ones_like(rays_o[..., 0]).long().cuda() * 0\n )\n else:\n extra_dict[\"embedding_appearance_{}\".format(obj_id)] = (\n self.appearance_code_dict[str(obj_id)]\n .view(1, -1)\n .expand(rays_o.shape[0], -1)\n )\n\n # optimize exposure\n if self.optimize_exposure and obj_id not in self.virtual_instance_id:\n extra_dict[f\"autoexposure_{obj_id}\"] = self.autoexposure_param[\n str(obj_id)\n ]\n\n with torch.cuda.amp.autocast(enabled=True):\n with torch.no_grad():\n results = self.batched_inference_multi(\n rays_list,\n object_id_list,\n to_cpu=False,\n use_sphere_tracing=use_sphere_tracing,\n # use_sphere_tracing=True,\n safe_region_volume_rendering=safe_region_volume_rendering,\n refine_edge=refine_edge,\n render_mask=render_mask,\n show_progress=show_progress,\n **extra_dict,\n )\n img = results[f\"rgb_fine\"]\n img_pred = np.clip(img.view(h, w, 3).cpu().numpy(), 0, 1)\n img_pred_ = (img_pred * 255).astype(np.uint8)\n\n if return_raw_image:\n if render_mask:\n img_mask = results[f\"rendered_instance_mask\"]\n img_mask = (\n img_mask.view(h, w, 3)[:, :, 0]\n .cpu()\n .numpy()\n .round()\n .astype(np.uint16)\n )\n return img_pred_, img_mask\n return img_pred_ # raw image in [h, w, 3] np.uint8\n\n if write_idx_on_image:\n img_pred_ = cv2.putText(\n img_pred_,\n \"Iter: {:03d}\".format(idx),\n (20, 20),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.7,\n (255, 0, 0),\n 2,\n )\n\n imageio.imwrite(\n os.path.join(self.output_path, f\"{idx:06d}.multi_obj.png\"), img_pred_\n )\n if render_mask:\n img_mask = results[f\"rendered_instance_mask\"]\n img_mask = (\n img_mask.view(h, w, 3)[:, :, 0].cpu().numpy().round().astype(np.uint16)\n )\n cv2.imwrite(os.path.join(self.output_path, f\"{idx:06d}.seg.png\"), img_mask)\n\n def set_initial_object_poses_from_scene_meta(self, add_noise=True):\n self.object_pose_dict = {}\n\n for obj_id in self.active_instance_id:\n if obj_id == 0:\n continue\n obj_meta_info = get_object_meta_info(\n self.ig_data_base_dir, self.scene_meta, obj_id\n )\n if \"gt_T_wo\" in obj_meta_info:\n Two = obj_meta_info[\"gt_T_wo\"]\n else:\n print(\n f\"Cannot find object pose for obj_id = {obj_id}, use custom pose with minor offset.\"\n )\n Two = np.eye(4)\n from scipy.spatial.transform import Rotation as R\n\n rot_fix = np.array([1, 0, 0, 0, 0, 1, 0, -1, 0]).reshape(3, 3)\n # TODO: update initial pose for real-world scenes\n # if obj_id == 31:\n # blender_xyz = np.array([-1.44, 1.18, 0.1])\n # blender_rot = R.from_quat([0.5, -0.5, 0.5, 0.5]).as_matrix()\n # elif obj_id == 32:\n # blender_xyz = np.array([0.76, 0.54, 0.98])\n # blender_rot = R.from_quat([0.707107, 0, 0, 0.707107]).as_matrix()\n # elif obj_id == 33:\n # blender_xyz = np.array([-0.06, 1.01, -0.9])\n # blender_rot = R.from_quat([0, 0.707107, -0.707107, 0]).as_matrix()\n # elif obj_id == 34:\n # blender_xyz = np.array([-0.05, 1.14, -0.15])\n # blender_rot = R.from_quat([0, 0.707107, -0.707107, 0]).as_matrix()\n # elif obj_id == 35:\n # blender_xyz = np.array([-0.35, 1.1, 0.98])\n # blender_rot = R.from_quat([0.707107, 0, 0, 0.707107]).as_matrix()\n\n # Two[:3, :3] = blender_rot @ rot_fix\n # Two[:3, :3] = rot_fix @ blender_rot\n # Two[:3, 3] = rot_fix @ blender_xyz\n\n # Two[1, 3] += 0.75\n # Two[2, 3] -= 0.7\n\n # add noise\n if add_noise:\n Two[:3, 3] += 0.1\n from scipy.spatial.transform import Rotation as R\n\n rot_noise = R.from_euler(\"z\", 20, degrees=True).as_matrix()\n Two[:3, :3] = Two[:3, :3] @ rot_noise\n Two = torch.from_numpy(Two).float().cuda()\n\n # split parameters\n rot6d = matrix_to_rotation_6d(Two[:3, :3])\n trans = Two[:3, 3]\n rot6d.requires_grad = True\n trans.requires_grad = True\n\n self.object_pose_dict[str(obj_id)] = {\n \"trans\": trans,\n \"rot6d\": rot6d,\n }\n if \"fix_object_pose\" not in self.optimize_option:\n self.params += [{\"params\": trans, \"lr\": self.lr}]\n self.params += [{\"params\": rot6d, \"lr\": self.lr}]\n\n def set_initial_pose_from_prediction(self, pred_json_path):\n print(\"Initial pose from\", pred_json_path)\n self.object_pose_dict = {}\n self.initial_pose_prediction = {}\n pred_info = read_json(pred_json_path)\n for obj_id in self.active_instance_id:\n if obj_id == 0:\n continue\n Two = np.array(pred_info[str(obj_id)][\"Two\"])\n Two = torch.from_numpy(Two).float().cuda()\n self.initial_pose_prediction[str(obj_id)] = {\"Two\": Two.clone()}\n\n # split parameters\n rot6d = matrix_to_rotation_6d(Two[:3, :3])\n trans = Two[:3, 3]\n\n if not \"fix_object_pose\" in self.optimize_option:\n rot6d.requires_grad = True\n trans.requires_grad = True\n\n self.object_pose_dict[str(obj_id)] = {\n \"trans\": trans,\n \"rot6d\": rot6d,\n }\n self.params += [{\"params\": trans, \"lr\": self.lr}]\n self.params += [{\"params\": rot6d, \"lr\": self.lr}]\n\n def set_initial_pose_as_identity(self):\n print(\"Initial pose as identity.\")\n self.object_pose_dict = {}\n self.initial_pose_prediction = {}\n for obj_id in self.active_instance_id:\n if obj_id == 0:\n continue\n Two = np.eye(4)\n Two = torch.from_numpy(Two).float().cuda()\n self.initial_pose_prediction[str(obj_id)] = {\"Two\": Two.clone()}\n\n # split parameters\n rot6d = matrix_to_rotation_6d(Two[:3, :3])\n trans = Two[:3, 3]\n rot6d.requires_grad = True\n trans.requires_grad = True\n\n self.object_pose_dict[str(obj_id)] = {\n \"trans\": trans,\n \"rot6d\": rot6d,\n }\n self.params += [{\"params\": trans, \"lr\": self.lr}]\n self.params += [{\"params\": rot6d, \"lr\": self.lr}]\n\n def set_sampling_mask_from_seg(\n self,\n seg_mask=None,\n seg_mask_path=None,\n add_noise_to_seg=0,\n convert_seg_mask_to_box_mask=False,\n ):\n if seg_mask_path is not None:\n print(\"Read segmentation from gt mask\")\n # read mask\n self.instance_mask = get_instance_mask(seg_mask_path, img_wh=self.img_wh)\n elif seg_mask is not None:\n self.instance_mask = seg_mask\n else:\n print(\"Warning: empty mask\")\n self.merged_mask = (\n np.ones((self.img_wh[1], self.img_wh[0])).reshape(-1).astype(bool)\n )\n return\n\n # merge active object masks\n merged_mask = np.zeros_like(self.instance_mask)\n for i_obj, obj_id in enumerate(self.active_instance_id):\n if obj_id == 0:\n continue # do not accumulate background obj_id\n instance_mask_obj = self.instance_mask == obj_id\n # use tightly fit bbox instead of segmentation mask\n if convert_seg_mask_to_box_mask:\n instance_mask_obj = seg_mask_to_box_mask(instance_mask_obj)\n merged_mask = np.logical_or(merged_mask, instance_mask_obj)\n\n # if add noise to gt segmentation\n if add_noise_to_seg != 0:\n is_dilate = add_noise_to_seg > 0\n add_noise_to_seg = abs(add_noise_to_seg)\n kernel = np.ones((add_noise_to_seg, add_noise_to_seg), np.uint8)\n if is_dilate:\n merged_mask = cv2.dilate(\n merged_mask.astype(np.uint8), kernel, iterations=1\n ).astype(bool)\n else:\n merged_mask = cv2.erode(\n merged_mask.astype(np.uint8), kernel, iterations=1\n ).astype(bool)\n cv2.imwrite(\n f\"{self.output_path}/merged_mask.png\", merged_mask.astype(np.uint8) * 255\n )\n self.merged_mask = merged_mask.reshape(-1)\n\n def get_type_of_instance(self, instance_id):\n for obj_info in self.scene_meta[\"objs\"]:\n if obj_info[\"id\"] == instance_id:\n return obj_info[\"classname\"]\n return \"unknown\"\n\n def generate_relation(\n self,\n obj_to_room_distance_th: float = 0.5,\n top_down_dist_th: float = 0.3,\n top_down_xy_close_factor: float = 0.8,\n ):\n \"\"\"\n Generate relationship : object-wall, object-floor, object-object\n \"\"\"\n print(\"Start to generate relation from initial poses and neural models...\")\n all_obj_info = {}\n for i, obj_id in enumerate(self.active_instance_id):\n if obj_id == 0:\n continue\n Rwo = rotation_6d_to_matrix(self.object_pose_dict[str(obj_id)][\"rot6d\"])\n two = self.object_pose_dict[str(obj_id)][\"trans\"]\n optimized_meta = get_object_meta_info(\n self.ig_data_base_dir, self.scene_meta, obj_id\n )\n optimized_meta.pop(\"gt_T_wo\", None) # pop gt\n # pass optimized object pose\n optimized_meta[\"Rwo\"] = Rwo\n optimized_meta[\"two\"] = two\n optimized_meta[\"obj_id\"] = obj_id\n all_obj_info[str(obj_id)] = optimized_meta\n with torch.no_grad():\n generate_relation_for_all(\n room_optimizer=self,\n all_obj_info=all_obj_info,\n obj_to_room_distance_th=obj_to_room_distance_th,\n top_down_dist_th=top_down_dist_th,\n top_down_xy_close_factor=top_down_xy_close_factor,\n )\n # print(\"Relation:\\n\", self.relation_info)\n for k, v in self.relation_info.items():\n print(k, v)\n\n def optimize(self, input_rgb: torch.Tensor, pose=None):\n \"\"\"\n Inputs:\n input_rgb: torch.Tensor [h, w, 3] normalized in 0...1\n \"\"\"\n if pose is None:\n pose = np.array(self.scene_meta[\"camera\"][\"cam3d2world\"]).reshape(4, 4)\n # Original poses has rotation in form \"right down forward\", change to NDC \"right up back\"\n fix_rot = np.array([1, 0, 0, 0, -1, 0, 0, 0, -1]).reshape(3, 3)\n pose[:3, :3] = pose[:3, :3] @ fix_rot\n\n # camera to world pose\n Twc = np.eye(4)\n Twc[:3, :4] = pose[:3, :4]\n Twc = torch.from_numpy(Twc).float().cuda()\n\n if \"keypoint_mask\" in self.optimize_option:\n # detect keypoint for interest region\n keypoint_mask = detect_keypoints(input_rgb.numpy(), circle_radius=5)\n self.merged_mask = np.logical_and(\n keypoint_mask, self.merged_mask.reshape(keypoint_mask.shape)\n )\n cv2.imwrite(\n f\"{self.output_path}/merged_mask_keypoint.png\",\n self.merged_mask.astype(np.uint8) * 255,\n )\n self.merged_mask = self.merged_mask.reshape(-1)\n\n input_rgb = input_rgb.view(-1, 3) # (H*W, 3) RGB\n\n directions = get_ray_directions_equirectangular(\n self.h, self.w\n ).cuda() # (h, w, 3)\n\n mse_loss = nn.MSELoss(reduction=\"none\")\n\n assert hasattr(\n self, \"params\"\n ), \"Please set initial pose params before optimization.\"\n optimizer = torch.optim.Adam(self.params)\n\n scaler = torch.cuda.amp.GradScaler(enabled=self.use_amp)\n perceptual_net = perceptual_model.VGG16_for_Perceptual().cuda()\n\n sample_prob = pano_sample_probability(self.h, self.w).reshape(-1)\n\n t = trange(self.N_optim_step, desc=\"Opt.\", leave=True)\n for i_step in t:\n if \"regenerate_relation_during_test\" in self.optimize_option:\n if i_step != 0 and i_step % 50 == 0:\n self.generate_relation()\n if self.adjust_lr_per_step > 0:\n adjust_learning_rate(\n self.lr,\n optimizer,\n i_step,\n base=0.5,\n adjust_lr_every=self.adjust_lr_per_step,\n )\n extra_dict = dict()\n rays_list = []\n object_id_list = []\n # sample according to batch size limitation\n select_ind = np.arange(self.merged_mask.shape[0])[self.merged_mask]\n if (\n \"perceptual_loss\" not in self.optimize_option\n ): # we only sample some points in this case\n # sample according to pano distribution\n select_sample_prob = sample_prob[self.merged_mask]\n select_sample_prob /= select_sample_prob.sum()\n # assert select_ind.shape[0] > self.optim_batch_size\n sample_size = min(select_ind.shape[0], self.optim_batch_size)\n select_ind = np.random.choice(\n select_ind,\n size=sample_size,\n replace=False,\n p=select_sample_prob,\n )\n\n # add some sampling on the background for bg light code\n if self.optimize_light_env:\n bg_sample_ratio = 0.2\n bg_sample_prob = sample_prob[~self.merged_mask]\n bg_sample_prob /= bg_sample_prob.sum()\n bg_sample_ind = np.arange(self.merged_mask.shape[0])[~self.merged_mask]\n # assert bg_sample_ind.shape[0] > self.optim_batch_size\n bg_sample_size = min(\n bg_sample_ind.shape[0], int(bg_sample_ratio * self.optim_batch_size)\n )\n if bg_sample_size > 0:\n bg_sample_ind = np.random.choice(\n bg_sample_ind,\n size=bg_sample_size,\n replace=False,\n p=bg_sample_prob,\n )\n select_ind = np.concatenate([select_ind, bg_sample_ind], axis=-1)\n\n select_ind = np.unique(select_ind)\n if i_step == 0:\n print(\"Actual optimization rays\", select_ind.shape[0])\n select_input_rgb = input_rgb[select_ind].float().cuda()\n\n loss_dict = {}\n all_obj_info = {} # prepare for violation loss\n\n for i, obj_id in enumerate(self.active_instance_id):\n # object to world pose\n if obj_id == 0:\n Rwo = torch.eye(3).cuda()\n two = torch.from_numpy(self.bg_scene_center).float().cuda()\n else:\n Rwo = rotation_6d_to_matrix(\n self.object_pose_dict[str(obj_id)][\"rot6d\"]\n )\n two = self.object_pose_dict[str(obj_id)][\"trans\"]\n\n # camera to object pose\n Toc = torch.eye(4).cuda()\n Toc[:3, :3] = Rwo.T @ Twc[:3, :3]\n Toc[:3, 3] = Rwo.T @ (Twc[:3, 3] - two)\n\n # generate object rays\n rays_o, rays_d = get_rays(directions, Toc[:3, :4])\n\n rays_o = rays_o[select_ind]\n rays_d = rays_d[select_ind]\n\n rays = self.generate_object_rays(rays_o, rays_d, obj_id)\n rays_list += [rays]\n object_id_list += [obj_id]\n\n # set image_attr for object code\n extra_dict[\"embedding_inst_{}\".format(obj_id)] = self.image_attrs[\n str(obj_id)\n ].embedding_instance(\n torch.ones_like(rays_o[..., 0]).long().cuda() * obj_id\n )\n # light code\n if self.optimize_light_env:\n if self.use_light_from_image_attr:\n extra_dict[\n \"embedding_light_{}\".format(obj_id)\n ] = self.image_attrs[str(obj_id)].embedding_light(\n torch.ones_like(rays_o[..., 0]).long().cuda()\n * self.hard_coded_light_id\n )\n else:\n extra_dict[\"embedding_light_{}\".format(obj_id)] = (\n self.light_code_dict[str(obj_id)]\n .view(1, -1)\n .expand(rays_o.shape[0], -1)\n )\n # appearance code\n if self.optimize_appearance_code:\n if self.use_appearance_from_image_attr:\n extra_dict[\n \"embedding_appearance_{}\".format(obj_id)\n ] = self.image_attrs[str(obj_id)].embedding_appearance(\n torch.ones_like(rays_o[..., 0]).long().cuda() * 0\n )\n else:\n extra_dict[\"embedding_appearance_{}\".format(obj_id)] = (\n self.appearance_code_dict[str(obj_id)]\n .view(1, -1)\n .expand(rays_o.shape[0], -1)\n )\n # autoexposure\n if self.optimize_exposure:\n extra_dict[f\"autoexposure_{obj_id}\"] = self.autoexposure_param[\n str(obj_id)\n ]\n\n # we do not need to add relation constraints to bg\n if obj_id == 0:\n continue\n\n # enforce optimising on yaw\n if \"z_axis_align_loss\" in self.optimize_option:\n loss_dict[\"z_axis_loss_{}\".format(obj_id)] = (\n z_axis_loss(Rwo, 1.0) * 1e2\n )\n\n optimized_meta = get_object_meta_info(\n self.ig_data_base_dir, self.scene_meta, obj_id\n )\n optimized_meta.pop(\"gt_T_wo\", None) # pop gt\n # pass optimized object pose\n optimized_meta[\"Rwo\"] = Rwo\n optimized_meta[\"two\"] = two\n optimized_meta[\"obj_id\"] = obj_id\n obj_id_key = str(obj_id)\n\n if obj_id_key not in self.relation_info:\n continue\n\n # get obj_relation from input\n obj_relation = self.relation_info[obj_id_key]\n # supplement obj_type\n obj_type = self.get_type_of_instance(obj_id)\n optimized_meta[\"obj_type\"] = obj_type\n\n all_obj_info[str(obj_id)] = optimized_meta\n\n with torch.cuda.amp.autocast(enabled=self.use_amp):\n \"\"\"attach wall loss\"\"\"\n if (\n \"object_room_wall_attach\" in self.optimize_option\n and obj_relation.get(\"attach_wall\", False)\n ):\n kwargs = {\n \"room_optimizer\": self,\n \"obj_info\": optimized_meta,\n # \"face_direction\": torch.Tensor([0, 1, 0]),\n # \"face_direction\": obj_relation.get(\n # \"attach_wall_face_dir\", torch.Tensor([0, 1, 0])\n # ),\n \"face_direction\": obj_relation[\"attach_wall_face_dir\"],\n \"ray_grid_size\": 10,\n }\n # for door object, we slightly stretch the size to ensure successive hit-test\n if obj_type == \"door\" or obj_type == \"window\":\n kwargs.update(\n {\n \"ray_grid_stretch\": torch.Tensor([1.2, 1.2, 1]),\n \"use_bbox_surface_as_in_detect\": True,\n }\n )\n loss_dict.update(object_room_magnetic_loss(**kwargs))\n\n \"\"\"attach floor loss\"\"\"\n if (\n \"object_room_floor_attach\" in self.optimize_option\n and obj_relation.get(\"attach_floor\", False)\n ):\n # # TODO(ybbbbt): hard code floor\n # loss_dict.update(\n # obj_attach_floor_loss(optimized_meta, floor=0.0)\n # )\n kwargs = {\n \"room_optimizer\": self,\n \"obj_info\": optimized_meta,\n \"face_direction\": torch.Tensor([0, 0, -1]),\n \"ray_grid_stretch\": torch.Tensor(\n [0.8, 0.8, 1.0]\n ), # avoid too close to wall\n \"use_bbox_surface_as_in_detect\": True,\n \"ray_grid_size\": 3,\n }\n if obj_type == \"door\":\n # kwargs[\"ray_grid_offset\"] = torch.Tensor(\n # [0, -0.3, 0]\n # ) # to avoid to close to wall\n assert (\n \"attach_wall_face_dir\" in obj_relation\n ), f\"door {obj_id} relation prediction failed.\"\n kwargs[\"ray_grid_offset\"] = (\n obj_relation[\"attach_wall_face_dir\"] * -0.3\n ) # to avoid to close to wall\n loss_dict.update(object_room_magnetic_loss(**kwargs))\n\n with torch.cuda.amp.autocast(enabled=self.use_amp):\n results = self.batched_inference_multi(\n rays_list,\n object_id_list,\n to_cpu=False,\n # use_sphere_tracing=True,\n use_sphere_tracing=False,\n **extra_dict,\n )\n pred_rgb = results[\"rgb_fine\"]\n\n if \"photometric_loss\" in self.optimize_option:\n loss_dict[\"mse_loss\"] = mse_loss(pred_rgb, select_input_rgb).mean()\n\n if \"visualize_pred\" in self.optimize_option: # dump image for debug\n # pred_rgb_full = input_rgb.cuda()\n pred_rgb_full = torch.zeros_like(input_rgb.cuda())\n pred_rgb_full[select_ind] = pred_rgb\n\n imageio.imwrite(\n f\"debug/pred_rgb_full.png\",\n (pred_rgb_full * 255)\n .view(self.img_wh[1], self.img_wh[0], 3)\n .detach()\n .cpu()\n .numpy()\n .astype(np.uint8),\n )\n\n if \"perceptual_loss\" in self.optimize_option:\n pred_rgb_full = input_rgb.cuda()\n pred_rgb_full[select_ind] = pred_rgb\n loss_dict.update(\n patch_perceptual_loss(\n perceptual_net,\n pred_rgb_full,\n input_rgb,\n all_obj_info,\n self.instance_mask,\n self.img_wh,\n )\n )\n\n \"\"\"attach bottom to other object loss\"\"\"\n if \"object_object_attach\" in self.optimize_option:\n for obj_id_str, obj_relation in self.relation_info.items():\n if obj_relation.get(\"attach_bottom_to_object\", False):\n kwargs = {\n \"room_optimizer\": self,\n \"obj_info_src\": all_obj_info[obj_id_str],\n \"obj_info_tgt\": all_obj_info[\n str(obj_relation[\"attach_tgt_obj_id\"])\n ],\n \"face_direction\": torch.Tensor([0, 0, -1]),\n }\n loss_dict.update(object_object_attach_loss(**kwargs))\n\n # physical violation loss\n if \"physical_violation\" in self.optimize_option:\n if (\n not \"physical_violation_delayed_start\" in self.optimize_option\n or i_step >= 100\n ):\n loss_dict.update(\n physical_violation_loss(\n self,\n all_obj_info,\n N_nearest_obj=3,\n check_background_violation=True,\n # N_sample_points=1000,\n N_sample_points=2000,\n # N_sample_points=300,\n )\n )\n\n if \"viewing_constraint\" in self.optimize_option:\n loss_dict.update(viewing_constraint_loss(self, Twc, all_obj_info))\n\n if \"print_loss_dict\" in self.optimize_option:\n for k, v in loss_dict.items():\n # if \"_62\" not in k:\n # continue\n print(k, \"=\", float(v))\n loss = sum(list(loss_dict.values()))\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update()\n optimizer.zero_grad()\n\n t.set_description(\"Loss: %f\" % float(loss))\n t.refresh()\n # dump image\n if i_step % 20 == 0:\n self.save_optimizable_parameters(\n f\"{self.output_path}/{i_step:06d}.state.ckpt\"\n )\n # self.load_optimizable_parameters(\n # f\"{self.output_path}/{i_step:06d}.state.ckpt\"\n # )\n if i_step >= self.N_optim_step - 20:\n self.render_full_scene(\n pose=pose,\n idx=i_step,\n write_idx_on_image=False,\n render_mask=True,\n h=512,\n w=1280,\n )\n else:\n self.render_full_scene(\n pose=pose,\n idx=i_step,\n render_mask=False,\n h=self.h,\n w=self.w,\n )\n dump_optimization_meta_to_file(\n filepath=f\"{self.output_path}/{i_step:06d}.optim.json\",\n obj_pose_dict=self.object_pose_dict,\n )" }, { "identifier": "read_real_scene_localization", "path": "optim/misc_utils.py", "snippet": "def read_real_scene_localization(pose_path: str, transform_info_json_path: str):\n pose_dict = {}\n transform_info = read_json(transform_info_json_path)\n trans_colmap_to_arkit = np.array(transform_info[\"transform_colmap_to_arkit_sRT\"])\n trans_align = np.array(transform_info[\"transform_alignment\"])\n with open(pose_path) as file:\n lines = file.readlines()\n lines = lines[1:]\n for line in lines:\n fname, tx, ty, tz, qx, qy, qz, qw, _, _ = line.strip().split(\" \")\n fname += \".png\"\n pose = np.eye(4)\n pose[0, 3] = tx\n pose[1, 3] = ty\n pose[2, 3] = tz\n # Twc\n pose[:3, :3] = Rotation.from_quat([qx, qy, qz, qw]).as_matrix()\n # pose = np.linalg.inv(pose)\n # pose_ndc = np.linalg.inv(pose_ndc)\n\n # convert to ndc\n # pose_ndc = pose\n # fix_rot = np.array([1, 0, 0, 0, -1, 0, 0, 0, -1]).reshape(3, 3)\n # pose_ndc[:3, :3] = pose_ndc[:3, :3] @ fix_rot\n\n # transform to arkit pose\n s, R, t = decompose_to_sRT(trans_colmap_to_arkit)\n # pose_ndc = transform_colmap_to_arkit @ pose_ndc\n # print(s, R, t)\n pose[:3, 3] = R @ (pose[:3, 3] * s) + t\n pose[:3, :3] = R @ pose[:3, :3]\n\n # apply alignment to poses\n pose = trans_align @ pose\n\n pose_dict[fname] = {\"pose_slam_Twc\": pose}\n # print(fname, pose)\n return pose_dict" }, { "identifier": "read_real_scene_localization_with_name", "path": "optim/misc_utils.py", "snippet": "def read_real_scene_localization_with_name(arrangement_name):\n localization_info = read_real_scene_localization(\n f\"data/real_room_0/arrangement_panorama_select/{arrangement_name}/traj.txt\",\n \"data/real_room_0/objects/000/background_hloc_neus_normal_converge/transform_info.json\",\n )\n return localization_info" }, { "identifier": "read_testing_config", "path": "optim/misc_utils.py", "snippet": "def read_testing_config():\n conf_cli = OmegaConf.from_cli()\n conf_test_file = OmegaConf.load(conf_cli.config)\n # read dataset config\n conf_test_file[\"dataset_config\"] = read_dataset_config_file(\n conf_test_file[\"dataset_config_path\"]\n )\n conf_test_file[\"bg_dataset_config\"] = read_dataset_config_file(\n conf_test_file[\"bg_dataset_config_path\"]\n )\n\n # processing ckpt\n ckpt_path_dict = {}\n for item in conf_test_file[\"ckpt_lists\"]:\n path = item[\"path\"]\n obj_ids = item[\"obj_ids\"]\n neus_conf = item.get(\"neus_conf\", \"config/neus.yaml\")\n for obj_id in obj_ids:\n ckpt_path_dict[str(obj_id)] = {\"path\": path, \"neus_conf\": neus_conf}\n conf_test_file[\"ckpt_path_dict\"] = ckpt_path_dict\n\n conf_merged = OmegaConf.merge(conf_test_file, conf_cli)\n return conf_merged" } ]
import sys import os import torch import numpy as np from PIL import Image from omegaconf import OmegaConf from optim.room_optimizer import RoomOptimizer from optim.misc_utils import ( read_real_scene_localization, read_real_scene_localization_with_name, read_testing_config, )
14,879
os.environ["OMP_NUM_THREADS"] = "1" # noqa os.environ["MKL_NUM_THREADS"] = "1" # noqa sys.path.append(".") # noqa def main(config): active_instance_id = config.active_instance_id image_path = config.test_image_path dataset_config = config.dataset_config["dataset"] bg_scale_factor = 1 bg_scene_center = [0, 0, 0] if config.bg_dataset_config != "": bg_dataset_config = config.bg_dataset_config["dataset"] bg_scale_factor = bg_dataset_config["scale_factor"] bg_scene_center = bg_dataset_config["scene_center"] img_wh = config.img_wh # read image input_rgb = Image.open(image_path) input_rgb = input_rgb.resize(img_wh, Image.LANCZOS) input_rgb = np.array(input_rgb) input_rgb = torch.from_numpy(input_rgb).float() / 255 # (H, W, 3) # intialize room optimizer room_optimizer = RoomOptimizer( scene_info_json_path=config.scene_info_json, scale_factor=dataset_config["scale_factor"], scale_factor_dict=dataset_config.get("scale_factor_dict", {}), bg_scale_factor=bg_scale_factor, bg_scene_center=bg_scene_center, img_wh=config.img_wh, near=0.3, far=10.0, N_samples=64, N_importance=128, chunk=config.chunk, model_ckpt_path_dict=config.ckpt_path_dict, # relation_info=relation_info, relation_info={}, output_path="debug", prefix=config.prefix, active_instance_id=active_instance_id, lr=1e-2, # lr=5e-2, N_optim_step=500, adjust_lr_per_step=0, optim_batch_size=1024, # optim_batch_size=2048, # optim_batch_size=4096, # use_amp=False, use_amp=True, optimize_light_env=True, optimize_appearance_code=config.get("optimize_appearance_code", False), mask_per_object=False, bbox_ray_intersect=True, bbox_enlarge=0.1, optimize_option=[ "keypoint_mask", "photometric_loss", # "perceptual_loss", "z_axis_align_loss", "object_room_wall_attach", "object_room_floor_attach", "physical_violation", # "physical_violation_delayed_start", "object_object_attach", "viewing_constraint", "optimize_exposure", "regenerate_relation_during_test", # "visualize_pred", # "print_loss_dict", ], ) # room_optimizer.set_sampling_mask_from_seg( # seg_mask=None, # seg_mask_path=config.seg_mask_path, # # add_noise_to_seg=0, # add_noise_to_seg=5, # dilate mask # convert_seg_mask_to_box_mask=True, # # convert_seg_mask_to_box_mask=False, # ) room_optimizer.set_sampling_mask_from_seg( # seg_mask=torch.ones_like(input_rgb[:, :, 0]).numpy() * 31, seg_mask_path=config.seg_mask_path, # add_noise_to_seg=0, add_noise_to_seg=5, # dilate mask convert_seg_mask_to_box_mask=False, # convert_seg_mask_to_box_mask=False, ) if "obj_prediction_json" in config: room_optimizer.set_initial_pose_from_prediction(config["obj_prediction_json"]) else: room_optimizer.set_initial_object_poses_from_scene_meta() # dump config config["optimize_option"] = room_optimizer.optimize_option OmegaConf.save( config=config, f=os.path.join(room_optimizer.output_path, "optim_config_full.yaml"), ) room_optimizer.generate_relation()
os.environ["OMP_NUM_THREADS"] = "1" # noqa os.environ["MKL_NUM_THREADS"] = "1" # noqa sys.path.append(".") # noqa def main(config): active_instance_id = config.active_instance_id image_path = config.test_image_path dataset_config = config.dataset_config["dataset"] bg_scale_factor = 1 bg_scene_center = [0, 0, 0] if config.bg_dataset_config != "": bg_dataset_config = config.bg_dataset_config["dataset"] bg_scale_factor = bg_dataset_config["scale_factor"] bg_scene_center = bg_dataset_config["scene_center"] img_wh = config.img_wh # read image input_rgb = Image.open(image_path) input_rgb = input_rgb.resize(img_wh, Image.LANCZOS) input_rgb = np.array(input_rgb) input_rgb = torch.from_numpy(input_rgb).float() / 255 # (H, W, 3) # intialize room optimizer room_optimizer = RoomOptimizer( scene_info_json_path=config.scene_info_json, scale_factor=dataset_config["scale_factor"], scale_factor_dict=dataset_config.get("scale_factor_dict", {}), bg_scale_factor=bg_scale_factor, bg_scene_center=bg_scene_center, img_wh=config.img_wh, near=0.3, far=10.0, N_samples=64, N_importance=128, chunk=config.chunk, model_ckpt_path_dict=config.ckpt_path_dict, # relation_info=relation_info, relation_info={}, output_path="debug", prefix=config.prefix, active_instance_id=active_instance_id, lr=1e-2, # lr=5e-2, N_optim_step=500, adjust_lr_per_step=0, optim_batch_size=1024, # optim_batch_size=2048, # optim_batch_size=4096, # use_amp=False, use_amp=True, optimize_light_env=True, optimize_appearance_code=config.get("optimize_appearance_code", False), mask_per_object=False, bbox_ray_intersect=True, bbox_enlarge=0.1, optimize_option=[ "keypoint_mask", "photometric_loss", # "perceptual_loss", "z_axis_align_loss", "object_room_wall_attach", "object_room_floor_attach", "physical_violation", # "physical_violation_delayed_start", "object_object_attach", "viewing_constraint", "optimize_exposure", "regenerate_relation_during_test", # "visualize_pred", # "print_loss_dict", ], ) # room_optimizer.set_sampling_mask_from_seg( # seg_mask=None, # seg_mask_path=config.seg_mask_path, # # add_noise_to_seg=0, # add_noise_to_seg=5, # dilate mask # convert_seg_mask_to_box_mask=True, # # convert_seg_mask_to_box_mask=False, # ) room_optimizer.set_sampling_mask_from_seg( # seg_mask=torch.ones_like(input_rgb[:, :, 0]).numpy() * 31, seg_mask_path=config.seg_mask_path, # add_noise_to_seg=0, add_noise_to_seg=5, # dilate mask convert_seg_mask_to_box_mask=False, # convert_seg_mask_to_box_mask=False, ) if "obj_prediction_json" in config: room_optimizer.set_initial_pose_from_prediction(config["obj_prediction_json"]) else: room_optimizer.set_initial_object_poses_from_scene_meta() # dump config config["optimize_option"] = room_optimizer.optimize_option OmegaConf.save( config=config, f=os.path.join(room_optimizer.output_path, "optim_config_full.yaml"), ) room_optimizer.generate_relation()
real_room_loc = read_real_scene_localization_with_name("arrangement3")
2
2023-10-15 08:41:29+00:00
24k
WenzhengZhang/Seq2seqCoref
main_trainer.py
[ { "identifier": "DataArguments", "path": "arguments.py", "snippet": "class DataArguments:\n data_dir: Optional[str] = field(\n default=None, metadata={\"help\": \"Path to data directory\"}\n )\n\n max_train_len: Optional[int] = field(\n default=1536,\n metadata={\n \"help\": \"maximum train source input length\"\n },\n )\n max_train_len_out: Optional[int] = field(\n default=2048,\n metadata={\n \"help\": \"maximum train target decoder length\"\n },\n )\n max_eval_len: Optional[int] = field(\n default=1536,\n metadata={\n \"help\": \"maximum dev/test source input length\"\n },\n )\n max_eval_len_out: Optional[int] = field(\n default=2048,\n metadata={\n \"help\": \"maximum dev/test target decode length\"\n },\n )\n\n data_cache_dir: Optional[str] = field(\n default=None, metadata={\n \"help\": \"Where do you want to store the data downloaded from huggingface\"}\n )\n\n beam_sz: Optional[int] = field(\n default=4, metadata={\n \"help\": \"num beams\"\n }\n )\n\n oracle_mentions_dir: Optional[str] = field(\n default=None, metadata={\n \"help\": \"oracle mentions directory\"\n }\n )\n language: Optional[str] = field(\n default='english', metadata={\n \"help\": \"coreference language\"\n }\n )\n joint_data_dirs: Optional[str] = field(\n default=None, metadata={\"help\": \"datasets dirs for joint training\"}\n )\n joint_max_train_lens: Optional[str] = field(\n default=None, metadata={\"help\": \"max train len for each dataset for \"\n \"joint training\"}\n )\n joint_max_eval_lens: Optional[str] = field(\n default=None, metadata={\"help\": \"max eval len for each dataset for \"\n \"joint training\"}\n )\n joint_num_samples: Optional[int] = field(\n default=2000, metadata={\"help\": \"num samples to subsample for joint \"\n \"training\"}\n )" }, { "identifier": "ModelArguments", "path": "arguments.py", "snippet": "class ModelArguments:\n model_name_or_path: str = field(\n default=\"t5-base\",\n metadata={\n \"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n\n config_name: Optional[str] = field(\n default=None, metadata={\n \"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\n \"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None, metadata={\n \"help\": \"Where do you want to store the pretrained models downloaded from s3\"}\n )\n\n decay_rate: Optional[float] = field(\n default=0.6, metadata={\"help\": \"Decay learning rate\"}\n )\n low_cpu_mem_usage: Optional[bool] = field(\n default=False, metadata={\"help\": \"low cpu mem usage when load model\"}\n )" }, { "identifier": "CorefTrainingArguments", "path": "arguments.py", "snippet": "class CorefTrainingArguments(Seq2SeqTrainingArguments):\n do_train: bool = field(default=True,\n metadata={\"help\": \"Whether to run training.\"})\n save_dir: Optional[str] = field(\n default=None, metadata={\"help\": \"Path to save predicts directory\"}\n )\n save_predicts: Optional[bool] = field(\n default=True, metadata={\"help\": \"whether to save predictions\"}\n )\n mark_sentence: Optional[bool] = field(\n default=False, metadata={\"help\": \"mark sentence end for short target?\"}\n )\n align_mode: Optional[str] = field(\n default='l', metadata={\"help\": \"alignment mode: highroad (h) or \"\n \"lowroad (l) \"}\n )\n optim: Union[OptimizerNames, str] = field(\n default=\"adamw_apex_fused\",\n metadata={\"help\": \"The optimizer to use.\"},\n )\n parallelize_model: Optional[bool] = field(\n default=False, metadata={\"help\": \"whether to enable naive model \"\n \"parallel\"}\n )\n manual_empty_cache: Optional[bool] = field(\n default=False, metadata={\"help\": \"whether to empty cuda cache manually\"}\n )\n is_stage3: Optional[bool] = field(\n default=False, metadata={\"help\": \"use deepspeed stage3 for inference \"\n \"if is stage3\"}\n )\n val_after_train: Optional[bool] = field(\n default=False, metadata={\"help\": \"save the checkpoints then do \"\n \"validation after training\"}\n )\n allow_singletons: Optional[bool] = field(\n default=False, metadata={\n \"help\": \"whether to allow singletons\"\n }\n )\n seq2seq_type: Optional[str] = field(\n default='action', metadata={\n \"help\": \"seq2seq type: action, short_seq, full_seq, tagging, \"\n \"input_feed, action_non_int\"\n }\n )\n action_type: Optional[str] = field(\n default='integer', metadata={\n \"help\": \"target action type: integer, non_integer\"\n }\n )\n do_oracle: Optional[bool] = field(\n default=False, metadata={\n \"help\": \"do oracle experiments or not. Provide (gold) mentions \"\n \"and ask the model to predict coreference predictions\"\n }\n )\n add_mention_end: Optional[bool] = field(\n default=False, metadata={\n \"help\": \"add mention end token when using non-integer action format\"\n }\n )\n joint_data_names: Optional[str] = field(\n default=None, metadata={\"help\": \"datasets names for joint training\"}\n )\n joint_min_num_mentions: Optional[str] = field(\n default=None, metadata={\"help\": \"threshold for num mentions per epoch \"\n \"in joint training for each dataset\"}\n )\n min_num_mentions: Optional[int] = field(\n default=2, metadata={\"help\": \"minimum number of mentions per cluster,\"\n \"ontonotes is 2 other datasets is 1 \"\n \"(allow singletons)\"}\n )\n joint_train: Optional[bool] = field(\n default=False, metadata={\"help\": \"whether to use joint training\"}\n )" }, { "identifier": "CorefDataset", "path": "data.py", "snippet": "class CorefDataset(Dataset):\n\n def __init__(self, tokenizer,\n data_args, train_args, split):\n self.tokenizer = tokenizer\n self.data_args = data_args\n self.train_args = train_args\n self.split = split\n # self.task_prefix = self.data_args.task_prefix\n # convert tokens to ids for each sample\n self.samples, self.doc_labels = self.load_dataset()\n\n def __len__(self):\n return len(self.samples)\n\n def load_dataset(self):\n max_len = self.data_args.max_train_len if self.split == 'train' else \\\n self.data_args.max_eval_len\n data_path = os.path.join(\n self.data_args.data_dir,\n f'{self.split}.t5-small.english.{max_len}.jsonlines')\n samples = []\n doc_labels = {}\n thred = self.train_args.min_num_mentions\n with open(data_path, 'r') as f:\n for line in f:\n item = json.loads(line)\n doc_key = item['doc_key']\n doc_id = re.sub(r'_\\d+$', '', doc_key)\n if self.train_args.action_type == \"integer\":\n target_sent = self.tokenizer.convert_tokens_to_ids(\n item['target_sentence'])\n elif self.train_args.action_type == \"non_integer\":\n if self.train_args.add_mention_end:\n target_sent = self.tokenizer.convert_tokens_to_ids(\n item[\"target_non_int_mention_end_sentence\"])\n else:\n target_sent = self.tokenizer.convert_tokens_to_ids(\n item[\"target_non_int_sentence\"])\n else:\n raise ValueError(f\"wrong action type \"\n f\"{self.train_args.action_type}\")\n\n if self.train_args.seq2seq_type == 'action' or \\\n self.train_args.seq2seq_type == 'input_feed':\n if self.train_args.action_type == 'integer':\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item['target_action'])\n elif self.train_args.action_type == 'non_integer':\n if self.train_args.add_mention_end:\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item[\"target_non_int_mention_end_action\"])\n else:\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item[\"target_non_int_action\"])\n else:\n raise ValueError(\"wrong action type (\"\n \"integer/non_integer)\")\n elif self.train_args.seq2seq_type == 'short_seq':\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item['target_short_sentence'])\n elif self.train_args.seq2seq_type == 'full_seq':\n target_seq = deepcopy(target_sent)\n elif self.train_args.seq2seq_type == 'tagging':\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item['target_action'])\n # set the last token as eos token\n target_seq[-1] = self.tokenizer.eos_token_id\n else:\n raise ValueError('wrong seq2seq type')\n sample = {'doc_key': doc_key,\n 'sentence': self.tokenizer.convert_tokens_to_ids(\n item['sentence']),\n 'target_sentence': target_sent,\n 'target_seq': target_seq,\n 'subtoken_map': item['subtoken_map'],\n 'seg_clusters': [[tuple(m) for m in c] for c in item[\n 'seg_clusters'] if len(c) >= thred],\n 'offset': item['offset']\n }\n doc_labels[doc_id] = [[tuple(m) for m in c] for c in item[\n 'gold_clusters']]\n samples.append(sample)\n return samples, doc_labels\n\n def __getitem__(self, index):\n sample = self.samples[index]\n input_ids = torch.tensor(sample['sentence'], dtype=torch.long)\n if self.train_args.seq2seq_type == 'action' or \\\n self.train_args.seq2seq_type == 'input_feed':\n label_ids = torch.tensor(sample['target_sentence'],\n dtype=torch.long)\n target_ids = torch.tensor(sample['target_seq'], dtype=torch.long)\n input_len, tgt_len = input_ids.size(0), label_ids.size(0)\n attention_mask = torch.tensor([1] * input_len, dtype=torch.long)\n src_encoding = {'input_ids': input_ids,\n 'attention_mask': attention_mask,\n 'decoder_labels': label_ids,\n 'labels': target_ids\n }\n else:\n label_ids = torch.tensor(sample['target_seq'],\n dtype=torch.long)\n input_len, tgt_len = input_ids.size(0), label_ids.size(0)\n attention_mask = torch.tensor([1] * input_len, dtype=torch.long)\n src_encoding = {'input_ids': input_ids,\n 'attention_mask': attention_mask,\n 'labels': label_ids,\n }\n return src_encoding" }, { "identifier": "JointDataset", "path": "data.py", "snippet": "class JointDataset(Dataset):\n\n def __init__(self, tokenizer,\n data_args, train_args, split):\n self.tokenizer = tokenizer\n self.data_args = data_args\n self.train_args = train_args\n self.split = split\n self.all_samples, self.doc_labels, self.id_to_name = self.load_dataset()\n self.samples = None if self.split == 'train' else [\n s for data_samples in self.all_samples.values() for s in\n data_samples\n ]\n\n def __len__(self):\n if self.split == 'train':\n num_samples = 0\n for s in self.all_samples.values():\n num_samples += min(self.data_args.joint_num_samples, len(s))\n else:\n num_samples = len(self.samples)\n return num_samples\n\n def set_samples(self, epoch):\n # subsample larger datasets and then concat them\n sample_seed = self.train_args.seed + epoch\n min_num_samples = min(len(s) for s in self.all_samples.values())\n samples = []\n for data_name, data_samples in self.all_samples.items():\n if len(data_samples) > min_num_samples:\n subsamples = random.Random(sample_seed).sample(\n data_samples, self.data_args.joint_num_samples)\n else:\n subsamples = data_samples\n samples += subsamples\n self.samples = samples\n\n def _load_single_data(self, data_dir,\n data_name,\n max_len,\n thred):\n\n samples = []\n doc_labels = {}\n id_to_name = {}\n data_path = os.path.join(\n data_dir,\n f'{self.split}.t5-small.english.{max_len}.jsonlines')\n with open(data_path, 'r') as f:\n for line in f:\n item = json.loads(line)\n doc_key = item['doc_key']\n doc_id = re.sub(r'_\\d+$', '', doc_key)\n id_to_name[doc_id] = data_name\n if self.train_args.action_type == \"integer\":\n target_sent = self.tokenizer.convert_tokens_to_ids(\n item['target_sentence'])\n elif self.train_args.action_type == \"non_integer\":\n if self.train_args.add_mention_end:\n target_sent = self.tokenizer.convert_tokens_to_ids(\n item[\"target_non_int_mention_end_sentence\"])\n else:\n target_sent = self.tokenizer.convert_tokens_to_ids(\n item[\"target_non_int_sentence\"])\n else:\n raise ValueError(f\"wrong action type \"\n f\"{self.train_args.action_type}\")\n\n if self.train_args.seq2seq_type == 'action' or \\\n self.train_args.seq2seq_type == 'input_feed':\n if self.train_args.action_type == 'integer':\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item['target_action'])\n elif self.train_args.action_type == 'non_integer':\n if self.train_args.add_mention_end:\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item[\"target_non_int_mention_end_action\"])\n else:\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item[\"target_non_int_action\"])\n else:\n raise ValueError(\"wrong action type (\"\n \"integer/non_integer)\")\n elif self.train_args.seq2seq_type == 'short_seq':\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item['target_short_sentence'])\n elif self.train_args.seq2seq_type == 'full_seq':\n target_seq = deepcopy(target_sent)\n elif self.train_args.seq2seq_type == 'tagging':\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item['target_action'])\n # set the last token as eos token\n target_seq[-1] = self.tokenizer.eos_token_id\n else:\n raise ValueError('wrong seq2seq type')\n sample = {'doc_key': doc_key,\n 'sentence': self.tokenizer.convert_tokens_to_ids(\n item['sentence']),\n 'target_sentence': target_sent,\n 'target_seq': target_seq,\n 'subtoken_map': item['subtoken_map'],\n 'seg_clusters': [[tuple(m) for m in c] for c in item[\n 'seg_clusters'] if len(c) >= thred],\n 'offset': item['offset']\n }\n doc_labels[doc_id] = [[tuple(m) for m in c] for c in item[\n 'gold_clusters']]\n samples.append(sample)\n return samples, doc_labels, id_to_name\n\n def load_dataset(self):\n doc_labels = {}\n id_to_name = {}\n samples = {}\n max_lens = self.data_args.joint_max_train_lens.split(\n ',') if self.split == 'train' else \\\n self.data_args.joint_max_eval_lens.split(',')\n max_lens = [int(l) for l in max_lens]\n threds = self.train_args.joint_min_num_mentions.split(',')\n threds = [int(t) for t in threds]\n data_dirs = self.data_args.joint_data_dirs.split(',')\n data_names = self.train_args.joint_data_names.split(',')\n for data_dir, data_name, max_len, thred in zip(\n data_dirs, data_names, max_lens, threds):\n single_samples, single_doc_labels, single_id_to_name = \\\n self._load_single_data(data_dir, data_name, max_len, thred)\n samples[data_name] = single_samples\n doc_labels.update(single_doc_labels)\n id_to_name.update(single_id_to_name)\n return samples, doc_labels, id_to_name\n\n def __getitem__(self, index):\n sample = self.samples[index]\n input_ids = torch.tensor(sample['sentence'], dtype=torch.long)\n if self.train_args.seq2seq_type == 'action' or \\\n self.train_args.seq2seq_type == 'input_feed':\n label_ids = torch.tensor(sample['target_sentence'],\n dtype=torch.long)\n target_ids = torch.tensor(sample['target_seq'], dtype=torch.long)\n input_len, tgt_len = input_ids.size(0), label_ids.size(0)\n attention_mask = torch.tensor([1] * input_len, dtype=torch.long)\n src_encoding = {'input_ids': input_ids,\n 'attention_mask': attention_mask,\n 'decoder_labels': label_ids,\n 'labels': target_ids\n }\n else:\n label_ids = torch.tensor(sample['target_seq'],\n dtype=torch.long)\n input_len, tgt_len = input_ids.size(0), label_ids.size(0)\n attention_mask = torch.tensor([1] * input_len, dtype=torch.long)\n src_encoding = {'input_ids': input_ids,\n 'attention_mask': attention_mask,\n 'labels': label_ids,\n }\n return src_encoding" }, { "identifier": "SPEAKER_START", "path": "constants.py", "snippet": "SPEAKER_START = '<speaker>'" }, { "identifier": "SPEAKER_END", "path": "constants.py", "snippet": "SPEAKER_END = '</speaker>'" }, { "identifier": "MENTION_START", "path": "constants.py", "snippet": "MENTION_START = '<m>'" }, { "identifier": "MENTION_END", "path": "constants.py", "snippet": "MENTION_END = '</m>'" }, { "identifier": "COPY", "path": "constants.py", "snippet": "COPY = '<copy>'" }, { "identifier": "CLUSTER_NEW", "path": "constants.py", "snippet": "CLUSTER_NEW = '</new>'" }, { "identifier": "CLUSTERS", "path": "constants.py", "snippet": "CLUSTERS = []" }, { "identifier": "SENTENCE_START", "path": "constants.py", "snippet": "SENTENCE_START = '<sentence>'" }, { "identifier": "SENTENCE_END", "path": "constants.py", "snippet": "SENTENCE_END = '</sentence>'" }, { "identifier": "SPECIAL_IDS", "path": "constants.py", "snippet": "SPECIAL_IDS = {\n 'speaker_start': int_tokenizer.encode(SPEAKER_START,\n add_special_tokens=False)[0],\n 'speaker_end': int_tokenizer.encode(SPEAKER_END, add_special_tokens=False)[\n 0],\n 'mention_start': int_tokenizer.encode(MENTION_START,\n add_special_tokens=False)[0],\n 'mention_end': int_tokenizer.encode(MENTION_END, add_special_tokens=False)[\n 0],\n 'sep': int_tokenizer.encode(SEP_TOKEN, add_special_tokens=False)[0],\n 'copy': int_tokenizer.encode(COPY, add_special_tokens=False)[0],\n 'eos': int_tokenizer.eos_token_id\n}" }, { "identifier": "NON_INT_SPECIAL_IDS", "path": "constants.py", "snippet": "NON_INT_SPECIAL_IDS = {\n 'speaker_start': non_int_tokenizer.encode(\n SPEAKER_START,\n add_special_tokens=False)[0],\n 'speaker_end':\n non_int_tokenizer.encode(\n SPEAKER_END, add_special_tokens=False)[0],\n 'mention_start': non_int_tokenizer.encode(\n MENTION_START,\n add_special_tokens=False)[0],\n 'cluster_ids': MENTION_ENDS_IDS,\n 'cluster_ids_to_num': END_IDS_TO_NUM,\n 'cluster_new': non_int_tokenizer.encode(\n CLUSTER_NEW,\n add_special_tokens=False)[0],\n 'copy': non_int_tokenizer.encode(\n COPY, add_special_tokens=False)[0],\n 'eos': non_int_tokenizer.eos_token_id\n}" }, { "identifier": "MARK_SPECIAL_IDS", "path": "constants.py", "snippet": "MARK_SPECIAL_IDS = deepcopy(SPECIAL_IDS)" }, { "identifier": "MENTION_END_NON_INT_SPECIAL_IDS", "path": "constants.py", "snippet": "MENTION_END_NON_INT_SPECIAL_IDS = {\n 'speaker_start': mention_end_non_int_tokenizer.encode(\n SPEAKER_START,\n add_special_tokens=False)[0],\n 'speaker_end':\n mention_end_non_int_tokenizer.encode(\n SPEAKER_END, add_special_tokens=False)[0],\n 'mention_start': mention_end_non_int_tokenizer.encode(\n MENTION_START,\n add_special_tokens=False)[0],\n 'mention_end': mention_end_non_int_tokenizer.encode(\n MENTION_END,\n add_special_tokens=False)[0],\n 'cluster_ids': CLUSTER_IDS,\n 'cluster_ids_to_num': CLUSTER_IDS_TO_NUM,\n 'cluster_new': mention_end_non_int_tokenizer.encode(\n CLUSTER_NEW,\n add_special_tokens=False)[0],\n 'copy': mention_end_non_int_tokenizer.encode(\n COPY, add_special_tokens=False)[0],\n 'eos': mention_end_non_int_tokenizer.eos_token_id\n}" }, { "identifier": "MENTION_ENDS", "path": "constants.py", "snippet": "MENTION_ENDS = []" }, { "identifier": "CorefTrainer", "path": "trainer.py", "snippet": "class CorefTrainer(Seq2SeqTrainer):\n\n def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:\n if self.args.save_total_limit is None or self.args.save_total_limit <= 0:\n return\n\n # Check if we should delete older checkpoint(s)\n checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime,\n output_dir=output_dir)\n if self.args.val_after_train and self.args.eval_delay < \\\n self.state.global_step:\n for checkpoint in checkpoints_sorted[:-1]:\n states_dir = [str(x) for x in Path(\n checkpoint).glob(f'global_step*') if os.path.isdir(x)]\n for state_dir in states_dir:\n logger.info(f\"Deleting optimizer states of saved \"\n f\"checkpoint {checkpoint}\")\n if os.path.exists(state_dir) and os.path.isdir(\n state_dir):\n shutil.rmtree(state_dir)\n else:\n if len(checkpoints_sorted) <= self.args.save_total_limit:\n return\n\n # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which\n # we don't do to allow resuming.\n save_total_limit = self.args.save_total_limit\n if (\n self.state.best_model_checkpoint is not None\n and self.args.save_total_limit == 1\n and checkpoints_sorted[\n -1] != self.state.best_model_checkpoint\n ):\n save_total_limit = 2\n\n number_of_checkpoints_to_delete = max(0, len(\n checkpoints_sorted) - save_total_limit)\n checkpoints_to_be_deleted = checkpoints_sorted[\n :number_of_checkpoints_to_delete]\n for checkpoint in checkpoints_to_be_deleted:\n logger.info(\n f\"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit\")\n shutil.rmtree(checkpoint)\n\n def _save(self, output_dir: Optional[str] = None, state_dict=None):\n # If we are executing this function, we are the process zero, so we don't check for that.\n output_dir = output_dir if output_dir is not None else self.args.output_dir\n os.makedirs(output_dir, exist_ok=True)\n logger.info(f\"Saving model checkpoint to {output_dir}\")\n # Save a trained model and configuration using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n if not isinstance(self.model, PreTrainedModel) and not hasattr(\n self.model, 'save_pretrained'):\n if state_dict is None:\n state_dict = self.model.state_dict()\n\n if isinstance(unwrap_model(self.model), PreTrainedModel):\n unwrap_model(self.model).save_pretrained(\n output_dir, state_dict=state_dict,\n # safe_serialization=self.args.save_safetensors\n )\n else:\n logger.info(\n \"Trainer.model is not a `PreTrainedModel`, only saving its state dict.\")\n # if self.args.save_safetensors:\n # safetensors.torch.save_file(state_dict,\n # os.path.join(output_dir,\n # SAFE_WEIGHTS_NAME))\n # else:\n torch.save(state_dict, os.path.join(output_dir,\n WEIGHTS_NAME))\n else:\n self.model.save_pretrained(\n output_dir, state_dict=state_dict,\n # safe_serialization=self.args.save_safetensors\n )\n\n if self.tokenizer is not None:\n self.tokenizer.save_pretrained(output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))\n\n def _inner_training_loop(\n self, batch_size=None, args=None, resume_from_checkpoint=None,\n trial=None, ignore_keys_for_eval=None\n ):\n self._train_batch_size = batch_size\n # Data loader and number of training steps\n train_dataloader = self.get_train_dataloader()\n\n # Setting up training control variables:\n # number of training epochs: num_train_epochs\n # number of training steps per epoch: num_update_steps_per_epoch\n # total number of training steps to execute: max_steps\n total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size\n\n len_dataloader = None\n if has_length(train_dataloader):\n len_dataloader = len(train_dataloader)\n num_update_steps_per_epoch = len_dataloader // args.gradient_accumulation_steps\n num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)\n num_examples = self.num_examples(train_dataloader)\n if args.max_steps > 0:\n max_steps = args.max_steps\n num_train_epochs = args.max_steps // num_update_steps_per_epoch + int(\n args.max_steps % num_update_steps_per_epoch > 0\n )\n # May be slightly incorrect if the last batch in the training dataloader has a smaller size but it's\n # the best we can do.\n num_train_samples = args.max_steps * total_train_batch_size\n else:\n max_steps = math.ceil(\n args.num_train_epochs * num_update_steps_per_epoch)\n num_train_epochs = math.ceil(args.num_train_epochs)\n num_train_samples = self.num_examples(\n train_dataloader) * args.num_train_epochs\n elif args.max_steps > 0: # Rely on max_steps when dataloader does not have a working size\n max_steps = args.max_steps\n # Setting a very large number of epochs so we go as many times as necessary over the iterator.\n num_train_epochs = sys.maxsize\n num_update_steps_per_epoch = max_steps\n num_examples = total_train_batch_size * args.max_steps\n num_train_samples = args.max_steps * total_train_batch_size\n else:\n raise ValueError(\n \"args.max_steps must be set to a positive value if dataloader does not have a length, was\"\n f\" {args.max_steps}\"\n )\n\n if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug:\n if self.args.n_gpu > 1:\n # nn.DataParallel(model) replicates the model, creating new variables and module\n # references registered here no longer work on other gpus, breaking the module\n raise ValueError(\n \"Currently --debug underflow_overflow is not supported under DP. Please use DDP\"\n \" (torch.distributed.launch).\"\n )\n else:\n debug_overflow = DebugUnderflowOverflow(self.model) # noqa\n\n delay_optimizer_creation = (\n self.sharded_ddp is not None\n and self.sharded_ddp != ShardedDDPOption.SIMPLE\n or is_sagemaker_mp_enabled()\n or self.fsdp is not None\n )\n if args.deepspeed:\n deepspeed_engine, optimizer, lr_scheduler = deepspeed_init(\n self, num_training_steps=max_steps,\n resume_from_checkpoint=resume_from_checkpoint\n )\n self.model = deepspeed_engine.module\n self.model_wrapped = deepspeed_engine\n self.deepspeed = deepspeed_engine\n self.optimizer = optimizer\n self.lr_scheduler = lr_scheduler\n elif not delay_optimizer_creation:\n self.create_optimizer_and_scheduler(num_training_steps=max_steps)\n\n self.state = TrainerState()\n self.state.is_hyper_param_search = trial is not None\n\n # Activate gradient checkpointing if needed\n if args.gradient_checkpointing:\n self.model.gradient_checkpointing_enable()\n\n model = self._wrap_model(self.model_wrapped)\n\n if is_sagemaker_mp_enabled() and resume_from_checkpoint is not None:\n self._load_from_checkpoint(resume_from_checkpoint, model)\n\n # for the rest of this function `model` is the outside model, whether it was wrapped or not\n if model is not self.model:\n self.model_wrapped = model\n\n if delay_optimizer_creation:\n self.create_optimizer_and_scheduler(num_training_steps=max_steps)\n\n # Check if saved optimizer or scheduler states exist\n self._load_optimizer_and_scheduler(resume_from_checkpoint)\n\n # important: at this point:\n # self.model is the Transformers Model\n # self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(f\" Num examples = {num_examples}\")\n logger.info(f\" Num Epochs = {num_train_epochs}\")\n logger.info(\n f\" Instantaneous batch size per device = {args.per_device_train_batch_size}\")\n logger.info(\n f\" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}\")\n logger.info(\n f\" Gradient Accumulation steps = {args.gradient_accumulation_steps}\")\n logger.info(f\" Total optimization steps = {max_steps}\")\n logger.info(\n f\" Number of trainable parameters = {sum(p.numel() for p in model.parameters() if p.requires_grad)}\"\n )\n\n self.state.epoch = 0\n start_time = time.time()\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n steps_trained_progress_bar = None\n\n # Check if continuing training from a checkpoint\n if resume_from_checkpoint is not None and os.path.isfile(\n os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)\n ):\n self.state = TrainerState.load_from_json(\n os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME))\n epochs_trained = self.state.global_step // num_update_steps_per_epoch\n if not args.ignore_data_skip:\n steps_trained_in_current_epoch = self.state.global_step % (\n num_update_steps_per_epoch)\n steps_trained_in_current_epoch *= args.gradient_accumulation_steps\n else:\n steps_trained_in_current_epoch = 0\n\n logger.info(\n \" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(f\" Continuing training from epoch {epochs_trained}\")\n logger.info(\n f\" Continuing training from global step {self.state.global_step}\")\n if not args.ignore_data_skip:\n logger.info(\n f\" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} \"\n \"batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` \"\n \"flag to your launch command, but you will resume the training on data already seen by your model.\"\n )\n if self.is_local_process_zero() and not args.disable_tqdm:\n steps_trained_progress_bar = tqdm(\n total=steps_trained_in_current_epoch)\n steps_trained_progress_bar.set_description(\n \"Skipping the first batches\")\n\n # Update the references\n self.callback_handler.model = self.model\n self.callback_handler.optimizer = self.optimizer\n self.callback_handler.lr_scheduler = self.lr_scheduler\n self.callback_handler.train_dataloader = train_dataloader\n if self.hp_name is not None and self._trial is not None:\n # use self._trial because the SigOpt/Optuna hpo only call `_hp_search_setup(trial)` instead of passing trial\n # parameter to Train when using DDP.\n self.state.trial_name = self.hp_name(self._trial)\n if trial is not None:\n assignments = trial.assignments if self.hp_search_backend == HPSearchBackend.SIGOPT else trial\n self.state.trial_params = hp_params(assignments)\n else:\n self.state.trial_params = None\n # This should be the same if the state has been saved but in case the training arguments changed, it's safer\n # to set this after the load.\n self.state.max_steps = max_steps\n self.state.num_train_epochs = num_train_epochs\n self.state.is_local_process_zero = self.is_local_process_zero()\n self.state.is_world_process_zero = self.is_world_process_zero()\n\n # tr_loss is a tensor to avoid synchronization of TPUs through .item()\n tr_loss = torch.tensor(0.0).to(args.device)\n # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses\n self._total_loss_scalar = 0.0\n self._globalstep_last_logged = self.state.global_step\n model.zero_grad()\n\n self.control = self.callback_handler.on_train_begin(args, self.state,\n self.control)\n\n # Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.\n if not args.ignore_data_skip:\n for epoch in range(epochs_trained):\n is_random_sampler = hasattr(train_dataloader,\n \"sampler\") and isinstance(\n train_dataloader.sampler, RandomSampler\n )\n if is_torch_less_than_1_11 or not is_random_sampler:\n # We just need to begin an iteration to create the randomization of the sampler.\n # That was before PyTorch 1.11 however...\n if self.args.joint_train:\n train_dataloader.dataset.set_samples(epoch)\n for _ in train_dataloader:\n break\n else:\n # Otherwise we need to call the whooooole sampler cause there is some random operation added\n # AT THE VERY END!\n _ = list(train_dataloader.sampler)\n if args.manual_empty_cache:\n torch.cuda.empty_cache()\n for epoch in range(epochs_trained, num_train_epochs):\n if self.args.joint_train:\n train_dataloader.dataset.set_samples(epoch)\n if isinstance(train_dataloader, DataLoader) and isinstance(\n train_dataloader.sampler, DistributedSampler):\n train_dataloader.sampler.set_epoch(epoch)\n elif hasattr(train_dataloader, \"dataset\") and isinstance(\n train_dataloader.dataset, IterableDatasetShard):\n train_dataloader.dataset.set_epoch(epoch)\n\n if is_torch_tpu_available():\n parallel_loader = pl.ParallelLoader(train_dataloader, [\n args.device]).per_device_loader(args.device)\n epoch_iterator = parallel_loader\n else:\n epoch_iterator = train_dataloader\n\n # Reset the past mems state at the beginning of each epoch if necessary.\n if args.past_index >= 0:\n self._past = None\n\n steps_in_epoch = (\n len(epoch_iterator)\n if len_dataloader is not None\n else args.max_steps * args.gradient_accumulation_steps\n )\n self.control = self.callback_handler.on_epoch_begin(args,\n self.state,\n self.control)\n\n if epoch == epochs_trained and resume_from_checkpoint is not None and steps_trained_in_current_epoch == 0:\n self._load_rng_state(resume_from_checkpoint)\n\n step = -1\n if args.manual_empty_cache:\n torch.cuda.empty_cache()\n for step, inputs in enumerate(epoch_iterator):\n\n # Skip past any already trained steps if resuming training\n if args.manual_empty_cache:\n torch.cuda.empty_cache()\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n if steps_trained_progress_bar is not None:\n steps_trained_progress_bar.update(1)\n if steps_trained_in_current_epoch == 0:\n self._load_rng_state(resume_from_checkpoint)\n continue\n elif steps_trained_progress_bar is not None:\n steps_trained_progress_bar.close()\n steps_trained_progress_bar = None\n\n if step % args.gradient_accumulation_steps == 0:\n self.control = self.callback_handler.on_step_begin(args,\n self.state,\n self.control)\n # if args.manual_empty_cache:\n # torch.cuda.empty_cache()\n if (\n ((step + 1) % args.gradient_accumulation_steps != 0)\n and args.local_rank != -1\n and args._no_sync_in_gradient_accumulation\n ):\n # Avoid unnecessary DDP synchronization since there will be no backward pass on this example.\n with model.no_sync():\n tr_loss_step = self.training_step(model, inputs)\n else:\n tr_loss_step = self.training_step(model, inputs)\n\n if (\n args.logging_nan_inf_filter\n and not is_torch_tpu_available()\n and (\n torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step))\n ):\n # if loss is nan or inf simply add the average of previous logged losses\n tr_loss += tr_loss / (\n 1 + self.state.global_step - self._globalstep_last_logged)\n else:\n tr_loss += tr_loss_step\n\n self.current_flos += float(self.floating_point_ops(inputs))\n\n # Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps\n if self.deepspeed:\n if args.manual_empty_cache:\n torch.cuda.empty_cache()\n self.deepspeed.step()\n\n if (step + 1) % args.gradient_accumulation_steps == 0 or (\n # last step in epoch but step is always smaller than gradient_accumulation_steps\n steps_in_epoch <= args.gradient_accumulation_steps\n and (step + 1) == steps_in_epoch\n ):\n # Gradient clipping\n if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed:\n # deepspeed does its own clipping\n\n if self.do_grad_scaling:\n # Reduce gradients first for XLA\n if is_torch_tpu_available():\n gradients = xm._fetch_gradients(self.optimizer)\n xm.all_reduce(\"sum\", gradients,\n scale=1.0 / xm.xrt_world_size())\n # AMP: gradients need unscaling\n self.scaler.unscale_(self.optimizer)\n\n if is_sagemaker_mp_enabled() and args.fp16:\n self.optimizer.clip_master_grads(args.max_grad_norm)\n elif hasattr(self.optimizer, \"clip_grad_norm\"):\n # Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping\n self.optimizer.clip_grad_norm(args.max_grad_norm)\n elif hasattr(model, \"clip_grad_norm_\"):\n # Some models (like FullyShardedDDP) have a specific way to do gradient clipping\n model.clip_grad_norm_(args.max_grad_norm)\n else:\n # Revert to normal clipping otherwise, handling Apex or full precision\n nn.utils.clip_grad_norm_(\n amp.master_params(\n self.optimizer) if self.use_apex else model.parameters(),\n args.max_grad_norm,\n )\n\n # Optimizer step\n optimizer_was_run = True\n if self.deepspeed:\n pass # called outside the loop\n elif is_torch_tpu_available():\n if self.do_grad_scaling:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n else:\n xm.optimizer_step(self.optimizer)\n elif self.do_grad_scaling:\n scale_before = self.scaler.get_scale()\n self.scaler.step(self.optimizer)\n self.scaler.update()\n scale_after = self.scaler.get_scale()\n optimizer_was_run = scale_before <= scale_after\n else:\n self.optimizer.step()\n\n if optimizer_was_run and not self.deepspeed:\n self.lr_scheduler.step()\n\n model.zero_grad()\n self.state.global_step += 1\n self.state.epoch = epoch + (step + 1) / steps_in_epoch\n if args.manual_empty_cache:\n torch.cuda.empty_cache()\n self.control = self.callback_handler.on_step_end(args,\n self.state,\n self.control)\n\n self._maybe_log_save_evaluate(tr_loss, model, trial, epoch,\n ignore_keys_for_eval)\n else:\n self.control = self.callback_handler.on_substep_end(args,\n self.state,\n self.control)\n\n if self.control.should_epoch_stop or self.control.should_training_stop:\n break\n if step < 0:\n logger.warning(\n \"There seems to be not a single sample in your epoch_iterator, stopping training at step\"\n f\" {self.state.global_step}! This is expected if you're using an IterableDataset and set\"\n f\" num_steps ({max_steps}) higher than the number of available samples.\"\n )\n self.control.should_training_stop = True\n\n self.control = self.callback_handler.on_epoch_end(args, self.state,\n self.control)\n self._maybe_log_save_evaluate(tr_loss, model, trial, epoch,\n ignore_keys_for_eval)\n\n if DebugOption.TPU_METRICS_DEBUG in self.args.debug:\n if is_torch_tpu_available():\n # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)\n xm.master_print(met.metrics_report())\n else:\n logger.warning(\n \"You enabled PyTorch/XLA debug metrics but you don't have a TPU \"\n \"configured. Check your training configuration if this is unexpected.\"\n )\n if self.control.should_training_stop:\n break\n\n if args.past_index and hasattr(self, \"_past\"):\n # Clean the state at the end of training\n delattr(self, \"_past\")\n\n logger.info(\n \"\\n\\nTraining completed. Do not forget to share your model on huggingface.co/models =)\\n\\n\")\n if args.load_best_model_at_end and self.state.best_model_checkpoint is not None:\n # Wait for everyone to get here so we are sur the model has been saved by process 0.\n if is_torch_tpu_available():\n xm.rendezvous(\"load_best_model_at_end\")\n elif args.local_rank != -1:\n dist.barrier()\n elif is_sagemaker_mp_enabled():\n smp.barrier()\n\n self._load_best_model()\n\n # add remaining tr_loss\n self._total_loss_scalar += tr_loss.item()\n train_loss = self._total_loss_scalar / self.state.global_step\n\n metrics = speed_metrics(\"train\", start_time,\n num_samples=num_train_samples,\n num_steps=self.state.max_steps)\n self.store_flos()\n metrics[\"total_flos\"] = self.state.total_flos\n metrics[\"train_loss\"] = train_loss\n\n self.is_in_train = False\n\n self._memory_tracker.stop_and_update_metrics(metrics)\n\n self.log(metrics)\n\n run_dir = self._get_output_dir(trial)\n checkpoints_sorted = self._sorted_checkpoints(use_mtime=False,\n output_dir=run_dir)\n\n # Delete the last checkpoint when save_total_limit=1 if it's different from the best checkpoint.\n if self.state.best_model_checkpoint is not None and \\\n self.args.save_total_limit == 1 and self.is_world_process_zero():\n for checkpoint in checkpoints_sorted:\n if checkpoint != self.state.best_model_checkpoint:\n logger.info(\n f\"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit\")\n shutil.rmtree(checkpoint)\n\n self.control = self.callback_handler.on_train_end(args, self.state,\n self.control)\n\n return TrainOutput(self.state.global_step, train_loss, metrics)\n\n def my_compute_metrics(self,\n doc_labels: Dict[str, List[List]],\n predicts: Any,\n samples: List,\n split: str,\n id_to_name: Dict = None\n ) -> Dict:\n if self.args.joint_train:\n data_names = self.args.joint_data_names.split(',')\n joint_threds = [\n int(t) for t in self.args.joint_min_num_mentions.split(',')]\n name_to_threds = {n: t for n, t in zip(data_names, joint_threds)}\n documents_to_chunk_data = defaultdict(list)\n documents_to_chunk_gold = defaultdict(list)\n predictions = {}\n golds = {}\n assert len(samples) == len(predicts)\n out_sents = []\n last_doc_id = re.sub(r'_\\d+$', '', samples[0]['doc_key'])\n for sample, predict in zip(samples, predicts):\n doc_key = sample['doc_key']\n doc_id = re.sub(r'_\\d+$', '', doc_key)\n # require convert to ids first\n input_ids = sample['sentence']\n subtoken_map = sample['subtoken_map']\n offset = sample['offset']\n # remove bos\n predict_ids = predict[1:].tolist()\n gold_data = sample['seg_clusters']\n if self.args.joint_train:\n thred = name_to_threds[id_to_name[doc_id]]\n else:\n thred = self.args.min_num_mentions\n if self.args.seq2seq_type == \"short_seq\":\n special_ids = MARK_SPECIAL_IDS if self.args.mark_sentence \\\n else SPECIAL_IDS\n pred_data, aligned_input_ids, aligned_pred_ids = \\\n parse_short_target_tokens(input_ids, predict_ids,\n special_ids, subtoken_map,\n self.tokenizer,\n self.args.align_mode,\n thred,\n self.args.mark_sentence\n )\n pred_tokens = self.tokenizer.convert_ids_to_tokens(\n predict_ids)\n out_predict = {\n 'doc_key': doc_key,\n 'pred_tokens': pred_tokens,\n 'pred_text': self.tokenizer.convert_tokens_to_string(\n pred_tokens),\n 'pred_aligned_text': self.tokenizer.convert_ids_to_tokens(\n aligned_pred_ids\n ),\n 'input_aligned_text': self.tokenizer.convert_ids_to_tokens(\n aligned_input_ids\n )\n }\n else:\n is_tagging = (self.args.seq2seq_type == 'tagging')\n if self.args.action_type == 'integer':\n pred_data, pred_token_mentions, predict_ids = \\\n parse_int_output_tokens(\n input_ids,\n predict_ids,\n SPECIAL_IDS,\n subtoken_map,\n self.tokenizer,\n thred, is_tagging)\n else:\n special_ids = MENTION_END_NON_INT_SPECIAL_IDS if \\\n self.args.add_mention_end else NON_INT_SPECIAL_IDS\n pred_data, pred_token_mentions, predict_ids = \\\n parse_nonint_output_tokens(\n input_ids,\n predict_ids,\n special_ids,\n subtoken_map,\n self.tokenizer, self.args.add_mention_end,\n thred)\n pred_token_mentions = [(m[0] + offset, m[1] + offset) for m in\n pred_token_mentions]\n pred_tokens = self.tokenizer.convert_ids_to_tokens(\n predict_ids)\n out_predict = {'doc_key': doc_key,\n 'pred_tokens': pred_tokens,\n 'pred_text':\n self.tokenizer.convert_tokens_to_string(\n pred_tokens),\n 'predict_clusters': pred_data,\n 'gold_clusters': gold_data,\n 'predict_token_mentions': pred_token_mentions\n }\n # list of (m1,m2)\n\n documents_to_chunk_data[doc_id].extend(pred_data)\n documents_to_chunk_gold[doc_id].extend(gold_data)\n\n out_sents.append(out_predict)\n if doc_id != last_doc_id:\n predictions[last_doc_id] = get_document_predicts(\n documents_to_chunk_data[\n last_doc_id])\n golds[last_doc_id] = get_document_predicts(\n documents_to_chunk_gold[\n last_doc_id])\n last_doc_id = doc_id\n # final one\n predictions[last_doc_id] = get_document_predicts(\n documents_to_chunk_data[last_doc_id]\n )\n golds[last_doc_id] = get_document_predicts(\n documents_to_chunk_gold[last_doc_id]\n )\n # print(predictions)\n if self.args.joint_train:\n predictions_list = defaultdict(list)\n labels_list = defaultdict(list)\n golds_list = defaultdict(list)\n else:\n predictions_list = []\n labels_list = []\n golds_list = []\n for document_id, doc_label in doc_labels.items():\n if self.args.joint_train:\n predictions_list[id_to_name[document_id]].append(\n predictions[document_id])\n labels_list[id_to_name[document_id]].append(doc_label)\n golds_list[id_to_name[document_id]].append(golds[document_id])\n else:\n predictions_list.append(predictions[document_id])\n labels_list.append(doc_label)\n golds_list.append(golds[document_id])\n if self.args.joint_train:\n label_results = {}\n gold_results = {}\n for dn in predictions_list.keys():\n metrics = CorefAllMetrics().get_all_metrics(\n labels_list[dn],\n predictions_list[dn])\n metrics_golds = CorefAllMetrics().get_all_metrics(\n golds_list[dn],\n predictions_list[dn])\n single_label_results = {\n f'{dn}_{metric_name}_{x}': v\n for metric_name, metric_values in metrics['micro'].items()\n for x, v in metric_values.items()\n }\n single_gold_results = {\n f'{dn}_gold_{metric_name}_{x}': v\n for metric_name, metric_values in\n metrics_golds['micro'].items()\n for x, v in metric_values.items()\n }\n label_results.update(single_label_results)\n gold_results.update(single_gold_results)\n\n else:\n metrics = CorefAllMetrics().get_all_metrics(labels_list,\n predictions_list)\n metrics_golds = CorefAllMetrics().get_all_metrics(golds_list,\n predictions_list)\n label_results = {\n f'{metric_name}_{x}': v\n for metric_name, metric_values in metrics['micro'].items()\n for x, v in metric_values.items()\n }\n gold_results = {\n f'gold_{metric_name}_{x}': v\n for metric_name, metric_values in metrics_golds['micro'].items()\n for x, v in metric_values.items()\n }\n results = {**label_results, **gold_results}\n if self.args.joint_train:\n avg_f1s = [results[f\"{dname}_average_f1\"] for dname in\n data_names]\n results[\"average_f1\"] = sum(avg_f1s) / len(avg_f1s)\n if self.is_world_process_zero() and self.args.save_predicts:\n os.makedirs(self.args.save_dir, exist_ok=True)\n save_path = os.path.join(self.args.save_dir,\n f'{split}-predicts.txt')\n results_path = os.path.join(self.args.save_dir,\n f'{split}-results.json')\n with open(save_path, 'w') as f:\n for p in out_sents:\n f.write('%s\\n' % json.dumps(p))\n with open(results_path, 'w') as f:\n json.dump(results, f)\n\n return results\n\n def evaluation_loop(\n self,\n dataloader: DataLoader,\n description: str,\n prediction_loss_only: Optional[bool] = False,\n ignore_keys: Optional[List[str]] = None,\n metric_key_prefix: str = \"eval\",\n ) -> EvalLoopOutput:\n \"\"\"\n Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.\n Works both with or without labels.\n \"\"\"\n args = self.args\n\n prediction_loss_only = False\n\n # if eval is called w/o train init deepspeed here\n if args.deepspeed and not self.deepspeed:\n # XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval\n # from the checkpoint eventually\n deepspeed_engine, _, _ = deepspeed_init(\n self, num_training_steps=0, resume_from_checkpoint=None,\n inference=is_deepspeed_zero3_enabled()\n )\n self.model = deepspeed_engine.module\n self.model_wrapped = deepspeed_engine\n self.deepspeed = deepspeed_engine\n if self.args.gradient_checkpointing:\n self.model.config.use_cache = True\n model = self._wrap_model(self.model, training=False,\n dataloader=dataloader)\n\n # if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called\n # while ``train`` is running, cast it to the right dtype first and then put on device\n if not self.is_in_train:\n if args.fp16_full_eval:\n model = model.to(dtype=torch.float16, device=args.device)\n elif args.bf16_full_eval:\n model = model.to(dtype=torch.bfloat16, device=args.device)\n\n batch_size = self.args.eval_batch_size\n\n logger.info(f\"***** Running {description} *****\")\n if has_length(dataloader):\n logger.info(f\" Num examples = {self.num_examples(dataloader)}\")\n else:\n logger.info(\" Num examples: Unknown\")\n logger.info(f\" Batch size = {batch_size}\")\n\n model.eval()\n\n self.callback_handler.eval_dataloader = dataloader\n # Do this before wrapping.\n eval_dataset = getattr(dataloader, \"dataset\", None)\n\n if is_torch_tpu_available():\n dataloader = pl.ParallelLoader(dataloader,\n [args.device]).per_device_loader(\n args.device)\n\n if args.past_index >= 0:\n self._past = None\n\n # Initialize containers\n # losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps)\n losses_host = None\n preds_host = None\n labels_host = None\n inputs_host = None\n\n # losses/preds/labels on CPU (final containers)\n all_losses = None\n all_preds = None\n all_labels = None\n all_inputs = None\n # Will be useful when we have an iterable dataset so don't know its length.\n\n observed_num_examples = 0\n # Main evaluation loop\n for step, inputs in enumerate(dataloader):\n # Update the observed num examples\n observed_batch_size = find_batch_size(inputs)\n if observed_batch_size is not None:\n observed_num_examples += observed_batch_size\n # For batch samplers, batch_size is not known by the dataloader in advance.\n if batch_size is None:\n batch_size = observed_batch_size\n\n # Prediction step\n loss, logits, labels = self.prediction_step(model, inputs,\n prediction_loss_only,\n ignore_keys=ignore_keys)\n inputs_decode = self._prepare_input(inputs[\n \"input_ids\"]) if args.include_inputs_for_metrics else None\n\n if is_torch_tpu_available():\n xm.mark_step()\n\n # Update containers on host\n if loss is not None:\n losses = self._nested_gather(loss.repeat(batch_size))\n losses_host = losses if losses_host is None else torch.cat(\n (losses_host, losses), dim=0)\n if labels is not None:\n labels = self._pad_across_processes(labels)\n labels = self._nested_gather(labels)\n labels_host = labels if labels_host is None else nested_concat(\n labels_host, labels, padding_index=-100)\n if inputs_decode is not None:\n inputs_decode = self._pad_across_processes(inputs_decode)\n inputs_decode = self._nested_gather(inputs_decode)\n inputs_host = (\n inputs_decode\n if inputs_host is None\n else nested_concat(inputs_host, inputs_decode,\n padding_index=-100)\n )\n if logits is not None:\n logits = self._pad_across_processes(logits)\n logits = self._nested_gather(logits)\n if self.preprocess_logits_for_metrics is not None:\n logits = self.preprocess_logits_for_metrics(logits, labels)\n preds_host = logits if preds_host is None else nested_concat(\n preds_host, logits, padding_index=-100)\n self.control = self.callback_handler.on_prediction_step(args,\n self.state,\n self.control)\n\n # Gather all tensors and put them back on the CPU if we have done enough accumulation steps.\n if args.eval_accumulation_steps is not None and (\n step + 1) % args.eval_accumulation_steps == 0:\n if losses_host is not None:\n losses = nested_numpify(losses_host)\n all_losses = losses if all_losses is None else np.concatenate(\n (all_losses, losses), axis=0)\n if preds_host is not None:\n logits = nested_numpify(preds_host)\n all_preds = logits if all_preds is None else nested_concat(\n all_preds, logits, padding_index=-100)\n if inputs_host is not None:\n inputs_decode = nested_numpify(inputs_host)\n all_inputs = (\n inputs_decode\n if all_inputs is None\n else nested_concat(all_inputs, inputs_decode,\n padding_index=-100)\n )\n if labels_host is not None:\n labels = nested_numpify(labels_host)\n all_labels = (\n labels if all_labels is None else nested_concat(\n all_labels, labels, padding_index=-100)\n )\n\n # Set back to None to begin a new accumulation\n losses_host, preds_host, inputs_host, labels_host = None, None, None, None\n\n if args.past_index and hasattr(self, \"_past\"):\n # Clean the state at the end of the evaluation loop\n delattr(self, \"_past\")\n\n # Gather all remaining tensors and put them back on the CPU\n if losses_host is not None:\n losses = nested_numpify(losses_host)\n all_losses = losses if all_losses is None else np.concatenate(\n (all_losses, losses), axis=0)\n if preds_host is not None:\n logits = nested_numpify(preds_host)\n all_preds = logits if all_preds is None else nested_concat(\n all_preds, logits, padding_index=-100)\n if inputs_host is not None:\n inputs_decode = nested_numpify(inputs_host)\n all_inputs = (\n inputs_decode if all_inputs is None else nested_concat(\n all_inputs, inputs_decode, padding_index=-100)\n )\n if labels_host is not None:\n labels = nested_numpify(labels_host)\n all_labels = labels if all_labels is None else nested_concat(\n all_labels, labels, padding_index=-100)\n\n # Number of samples\n if has_length(eval_dataset):\n num_samples = len(eval_dataset)\n # The instance check is weird and does not actually check for the type, but whether the dataset has the right\n # methods. Therefore we need to make sure it also has the attribute.\n elif isinstance(eval_dataset, IterableDatasetShard) and getattr(\n eval_dataset, \"num_examples\", 0) > 0:\n num_samples = eval_dataset.num_examples\n else:\n if has_length(dataloader):\n num_samples = self.num_examples(dataloader)\n else: # both len(dataloader.dataset) and len(dataloader) fail\n num_samples = observed_num_examples\n if num_samples == 0 and observed_num_examples > 0:\n num_samples = observed_num_examples\n\n # Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of\n # samplers has been rounded to a multiple of batch_size, so we truncate.\n if all_losses is not None:\n all_losses = all_losses[:num_samples]\n if all_preds is not None:\n all_preds = nested_truncate(all_preds, num_samples)\n if all_labels is not None:\n all_labels = nested_truncate(all_labels, num_samples)\n if all_inputs is not None:\n all_inputs = nested_truncate(all_inputs, num_samples)\n\n # Metrics!\n doc_labels = eval_dataset.doc_labels\n eval_samples = eval_dataset.samples\n split = eval_dataset.split\n if self.args.joint_train:\n doc_id_to_name = eval_dataset.id_to_name\n else:\n doc_id_to_name = None\n # allow_singletons = eval_dataset.data_args.allow_singletons\n assert all_preds is not None\n metrics = self.my_compute_metrics(doc_labels, all_preds,\n eval_samples, split,\n doc_id_to_name)\n # if all_preds is not None and doc_labels is not None:\n # metrics = self.get_eval_metrics(doc_labels, all_preds,\n # eval_samples, split)\n # else:\n # metrics = {}\n\n # To be JSON-serializable, we need to remove numpy types or zero-d tensors\n metrics = denumpify_detensorize(metrics)\n\n if all_losses is not None:\n metrics[f\"{metric_key_prefix}_loss\"] = all_losses.mean().item()\n\n # Prefix all keys with metric_key_prefix + '_'\n for key in list(metrics.keys()):\n if not key.startswith(f\"{metric_key_prefix}_\"):\n metrics[f\"{metric_key_prefix}_{key}\"] = metrics.pop(key)\n if self.args.gradient_checkpointing:\n self.model.config.use_cache = False\n return EvalLoopOutput(predictions=all_preds, label_ids=all_labels,\n metrics=metrics, num_samples=num_samples)\n\n def prediction_step(\n self,\n model: nn.Module,\n inputs: Dict[str, Union[torch.Tensor, Any]],\n prediction_loss_only: bool,\n ignore_keys: Optional[List[str]] = None,\n ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:\n \"\"\"\n Perform an evaluation step on `model` using `inputs`.\n\n Subclass and override to inject custom behavior.\n\n Args:\n model (`nn.Module`):\n The model to evaluate.\n inputs (`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n The dictionary will be unpacked before being fed to the model. Most models expect the targets under the\n argument `labels`. Check your model's documentation for all accepted arguments.\n prediction_loss_only (`bool`):\n Whether or not to return the loss only.\n ignore_keys:\n list of ignore keys\n\n Return:\n Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and\n labels (each being optional).\n \"\"\"\n\n if not self.args.predict_with_generate or prediction_loss_only:\n return super().prediction_step(\n model, inputs, prediction_loss_only=prediction_loss_only,\n ignore_keys=ignore_keys\n )\n\n has_labels = \"labels\" in inputs\n inputs = self._prepare_inputs(inputs)\n\n # XXX: adapt synced_gpus for fairscale as well\n gen_kwargs = self._gen_kwargs.copy()\n gen_kwargs[\"max_length\"] = (\n gen_kwargs[\"max_length\"] if gen_kwargs.get(\n \"max_length\") is not None else self.model.config.max_length\n )\n gen_kwargs[\"num_beams\"] = (\n gen_kwargs[\"num_beams\"] if gen_kwargs.get(\n \"num_beams\") is not None else self.model.config.num_beams\n )\n default_synced_gpus = True if is_deepspeed_zero3_enabled() else False\n gen_kwargs[\"synced_gpus\"] = (\n gen_kwargs[\"synced_gpus\"] if gen_kwargs.get(\n \"synced_gpus\") is not None else default_synced_gpus\n )\n\n if \"attention_mask\" in inputs:\n gen_kwargs[\"attention_mask\"] = inputs.get(\"attention_mask\", None)\n if \"global_attention_mask\" in inputs:\n gen_kwargs[\"global_attention_mask\"] = inputs.get(\n \"global_attention_mask\", None)\n\n # prepare generation inputs\n # some encoder-decoder models can have varying encoder's and thus\n # varying model input names\n if hasattr(self.model,\n \"encoder\") and self.model.encoder.main_input_name != self.model.main_input_name:\n generation_inputs = inputs[self.model.encoder.main_input_name]\n else:\n generation_inputs = inputs[self.model.main_input_name]\n # add our logits_processor here\n if self.args.seq2seq_type != 'short_seq':\n if self.args.action_type == 'non_integer':\n special_ids = MENTION_END_NON_INT_SPECIAL_IDS if \\\n self.args.add_mention_end else NON_INT_SPECIAL_IDS\n gen_kwargs['logits_processor'] = LogitsProcessorList(\n [NonIntProcessor(generation_inputs, special_ids,\n self.args.seq2seq_type,\n self.args.add_mention_end)])\n else:\n gen_kwargs['logits_processor'] = LogitsProcessorList(\n [IntProcessor(generation_inputs, SPECIAL_IDS,\n self.args.seq2seq_type)])\n elif self.args.mark_sentence:\n gen_kwargs['logits_processor'] = LogitsProcessorList(\n [ShortSeqProcessor(generation_inputs, MARK_SPECIAL_IDS)])\n # if self.args.use_peft:\n # gen_kwargs[\"input_ids\"] = generation_inputs\n # gen_kwargs[\"use_cache\"] = True\n # generated_tokens = self.model.generate(\n # **gen_kwargs,\n # )\n # else:\n generated_tokens = self.model.generate(\n generation_inputs,\n **gen_kwargs,\n )\n # in case the batch is shorter than max length, the output should be padded\n if generated_tokens.shape[-1] < gen_kwargs[\"max_length\"]:\n generated_tokens = self._pad_tensors_to_max_len(generated_tokens,\n gen_kwargs[\n \"max_length\"])\n\n with torch.no_grad():\n with self.compute_loss_context_manager():\n outputs = model(**inputs)\n if has_labels:\n if self.label_smoother is not None:\n loss = self.label_smoother(outputs,\n inputs[\"labels\"]).mean().detach()\n else:\n loss = (outputs[\"loss\"] if isinstance(outputs, dict) else\n outputs[0]).mean().detach()\n else:\n loss = None\n\n if self.args.prediction_loss_only:\n return (loss, None, None)\n\n if has_labels:\n labels = inputs[\"labels\"]\n if labels.shape[-1] < gen_kwargs[\"max_length\"]:\n labels = self._pad_tensors_to_max_len(labels,\n gen_kwargs[\"max_length\"])\n else:\n labels = None\n\n return (loss, generated_tokens, labels)" }, { "identifier": "ConstrainedDataCollator", "path": "data.py", "snippet": "class ConstrainedDataCollator:\n \"\"\"\n Data collator that will dynamically pad the inputs received, as well as the labels.\n\n Args:\n tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):\n The tokenizer used for encoding the data.\n model ([`PreTrainedModel`]):\n The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to\n prepare the *decoder_input_ids*\n\n This is useful when using *label_smoothing* to avoid calculating loss twice.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):\n Select a strategy to pad the returned sequences (according to the model's padding side and padding index)\n among:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n is provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n max_length (`int`, *optional*):\n Maximum length of the returned list and optionally padding length (see above).\n pad_to_multiple_of (`int`, *optional*):\n If set will pad the sequence to a multiple of the provided value.\n\n This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=\n 7.5 (Volta).\n label_pad_token_id (`int`, *optional*, defaults to -100):\n The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).\n return_tensors (`str`):\n The type of Tensor to return. Allowable values are \"np\", \"pt\" and \"tf\".\n \"\"\"\n\n tokenizer: PreTrainedTokenizerBase\n model: Optional[Any] = None\n padding: Union[bool, str, PaddingStrategy] = True\n max_length: Optional[int] = None\n pad_to_multiple_of: Optional[int] = None\n label_pad_token_id: int = -100\n return_tensors: str = \"pt\"\n\n def __call__(self, features, return_tensors=None):\n import numpy as np\n\n if return_tensors is None:\n return_tensors = self.return_tensors\n labels = [feature[\"labels\"] for\n feature in features] if \"labels\" in features[\n 0].keys() else None\n decoder_labels = [feature[\"decoder_labels\"] for\n feature in features] if \"decoder_labels\" in features[\n 0].keys() else None\n # We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the\n # same length to return tensors.\n if labels is not None:\n assert decoder_labels is not None\n max_label_length = max(len(l) for l in labels)\n if self.pad_to_multiple_of is not None:\n max_label_length = (\n (max_label_length + self.pad_to_multiple_of - 1)\n // self.pad_to_multiple_of\n * self.pad_to_multiple_of\n )\n\n padding_side = self.tokenizer.padding_side\n for feature in features:\n remainder = [self.label_pad_token_id] * (\n max_label_length - len(feature[\"labels\"]))\n if isinstance(feature[\"labels\"], list):\n feature[\"labels\"] = (\n feature[\n \"labels\"] + remainder if padding_side == \"right\"\n else remainder + feature[\"labels\"]\n )\n feature[\"decoder_labels\"] = (\n feature[\n \"decoder_labels\"] + remainder if padding_side ==\n \"right\"\n else remainder + feature[\"decoder_labels\"]\n )\n elif padding_side == \"right\":\n feature[\"labels\"] = np.concatenate(\n [feature[\"labels\"], remainder]).astype(np.int64)\n feature[\"decoder_labels\"] = np.concatenate(\n [feature[\"decoder_labels\"], remainder]).astype(np.int64)\n else:\n feature[\"labels\"] = np.concatenate(\n [remainder, feature[\"labels\"]]).astype(np.int64)\n feature[\"decoder_labels\"] = np.concatenate(\n [remainder, feature[\"decoder_labels\"]]).astype(np.int64)\n\n features = self.tokenizer.pad(\n features,\n padding=self.padding,\n max_length=self.max_length,\n pad_to_multiple_of=self.pad_to_multiple_of,\n return_tensors=return_tensors,\n )\n\n # prepare decoder_input_ids\n if (\n labels is not None\n and self.model is not None\n and hasattr(self.model, \"prepare_decoder_input_ids_from_labels\")\n ):\n decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(\n labels=features[\"decoder_labels\"])\n features[\"decoder_input_ids\"] = decoder_input_ids\n if self.model.is_input_feed:\n decoder_input_actions = \\\n self.model.prepare_decoder_input_ids_from_labels(\n labels=features[\"labels\"])\n features[\"decoder_input_actions\"] = decoder_input_actions\n del features[\"decoder_labels\"]\n return features" }, { "identifier": "ConstrainedT5", "path": "model.py", "snippet": "class ConstrainedT5(T5ForConditionalGeneration):\n\n def __init__(self, config: T5Config, special_ids: Dict,\n seq2seq_type: str, action_type: str,\n add_mention_end: bool):\n super().__init__(config)\n self.mention_start = special_ids['mention_start']\n self.mention_end = special_ids.get('mention_end',None)\n self.eos_id = special_ids['eos']\n self.action_type = action_type\n self.add_mention_end = add_mention_end\n self.cluster_ids = None\n self.copy_id = special_ids['copy']\n self.seq2seq_type = seq2seq_type\n if action_type == 'integer':\n self.sep = special_ids['sep']\n self.ent_ids = special_ids['integers'] + [\n special_ids['mention_end']]\n self.specials = [self.mention_start, self.sep,\n self.copy_id] + self.ent_ids\n # self.seq2seq_type = seq2seq_type\n else:\n self.cluster_new = special_ids['cluster_new']\n self.cluster_ids = special_ids['cluster_ids']\n self.eos_id = special_ids['eos']\n if self.add_mention_end:\n self.specials = [self.mention_start,\n self.mention_end,\n self.cluster_new,\n self.copy_id] + self.cluster_ids\n else:\n self.specials = [self.mention_start,\n self.cluster_new,\n self.copy_id] + self.cluster_ids\n if self.seq2seq_type == 'tagging':\n self.specials.append(self.eos_id)\n self.is_input_feed = (self.seq2seq_type == \"input_feed\")\n\n @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=Seq2SeqLMOutput,\n config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.BoolTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n decoder_head_mask: Optional[torch.FloatTensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n decoder_inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n decoder_input_actions: Optional[torch.LongTensor] = None,\n full_decoder_input_ids: Optional[torch.LongTensor] = None\n ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,\n config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for\n labels in `[0, ..., config.vocab_size]`\n\n Returns:\n\n \"\"\"\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask\n if head_mask is not None and decoder_head_mask is None:\n if self.config.num_layers == self.config.num_decoder_layers:\n warnings.warn(HEAD_MASK_WARNING_MSG, FutureWarning)\n decoder_head_mask = head_mask\n\n # Encode if needed (training, first prediction pass)\n if encoder_outputs is None:\n # Convert encoder inputs in embeddings if needed\n encoder_outputs = self.encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\n encoder_outputs = BaseModelOutput(\n last_hidden_state=encoder_outputs[0],\n hidden_states=encoder_outputs[1] if len(\n encoder_outputs) > 1 else None,\n attentions=encoder_outputs[2] if len(\n encoder_outputs) > 2 else None,\n )\n\n hidden_states = encoder_outputs[0]\n\n if self.model_parallel:\n torch.cuda.set_device(self.decoder.first_device)\n\n if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:\n # get decoder inputs from shifting lm labels to the right\n decoder_input_ids = self._shift_right(labels)\n # Set device for model parallelism\n if self.is_input_feed and not self.training and decoder_input_actions is None:\n decoder_input_actions = self.input_to_actions(\n full_decoder_input_ids)\n if self.model_parallel:\n torch.cuda.set_device(self.decoder.first_device)\n hidden_states = hidden_states.to(self.decoder.first_device)\n if decoder_input_ids is not None:\n decoder_input_ids = decoder_input_ids.to(\n self.decoder.first_device)\n if attention_mask is not None:\n attention_mask = attention_mask.to(self.decoder.first_device)\n if decoder_attention_mask is not None:\n decoder_attention_mask = decoder_attention_mask.to(\n self.decoder.first_device)\n if self.is_input_feed and decoder_input_actions is \\\n not None:\n decoder_input_actions = decoder_input_actions.to(\n self.decoder.first_device\n )\n if self.is_input_feed:\n decoder_token_embeds = self.decoder.embed_tokens(decoder_input_ids)\n if not self.training and past_key_values is not None:\n decoder_action_embeds = self.decoder.embed_tokens(\n decoder_input_actions[:, -1:])\n else:\n decoder_action_embeds = self.decoder.embed_tokens(\n decoder_input_actions)\n decoder_inputs_embeds = decoder_token_embeds / 2 + decoder_action_embeds / 2\n # Decode\n decoder_outputs = self.decoder(\n input_ids=decoder_input_ids if not self.is_input_feed else None,\n attention_mask=decoder_attention_mask,\n inputs_embeds=decoder_inputs_embeds,\n past_key_values=past_key_values,\n encoder_hidden_states=hidden_states,\n encoder_attention_mask=attention_mask,\n head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = decoder_outputs[0]\n\n # Set device for model parallelism\n if self.model_parallel:\n torch.cuda.set_device(self.encoder.first_device)\n self.lm_head = self.lm_head.to(self.encoder.first_device)\n sequence_output = sequence_output.to(self.lm_head.weight.device)\n\n if self.config.tie_word_embeddings:\n # Rescale output before projecting on vocab\n # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586\n sequence_output = sequence_output * (self.model_dim ** -0.5)\n\n lm_logits = self.lm_head(sequence_output)\n masks = torch.ones_like(lm_logits,\n dtype=torch.bool)\n masks[:, :, self.specials] = False\n lm_logits.masked_fill_(masks, -float('inf'))\n\n loss = None\n if labels is not None:\n # construct constrained mask here\n\n loss_fct = CrossEntropyLoss(ignore_index=-100)\n loss = loss_fct(lm_logits.view(-1, lm_logits.size(\n -1)), labels.view(-1))\n\n if not return_dict:\n output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs\n return ((loss,) + output) if loss is not None else output\n\n return Seq2SeqLMOutput(\n loss=loss,\n logits=lm_logits,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n cross_attentions=decoder_outputs.cross_attentions,\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\n encoder_hidden_states=encoder_outputs.hidden_states,\n encoder_attentions=encoder_outputs.attentions,\n )\n\n def prepare_inputs_for_generation(\n self,\n input_ids,\n past=None,\n attention_mask=None,\n head_mask=None,\n decoder_head_mask=None,\n cross_attn_head_mask=None,\n use_cache=None,\n encoder_outputs=None,\n **kwargs\n ):\n\n # cut decoder_input_ids if past is used\n if past is not None:\n cut_input_ids = input_ids[:, -1:]\n else:\n cut_input_ids = input_ids\n\n return {\n \"decoder_input_ids\": cut_input_ids,\n \"past_key_values\": past,\n \"encoder_outputs\": encoder_outputs,\n \"attention_mask\": attention_mask,\n \"head_mask\": head_mask,\n \"decoder_head_mask\": decoder_head_mask,\n \"cross_attn_head_mask\": cross_attn_head_mask,\n \"use_cache\": use_cache,\n \"full_decoder_input_ids\": input_ids\n }\n\n def input_to_actions(self, input_ids: torch.LongTensor):\n # input_ids : B x L\n input_actions = deepcopy(input_ids)\n if self.action_type == 'integer':\n is_sep = (input_ids == self.sep)\n is_end = (input_ids == self.mention_end)\n is_start = (input_ids == self.mention_start)\n is_ent = (is_sep.cumsum(-1) - is_end.cumsum(-1)).bool()\n is_copy = ((~is_start) & (~is_ent) & (~is_end))\n else:\n cluster_ids = self.cluster_ids.to(input_ids.device)\n is_not_cid = torch.isin(input_ids, cluster_ids, invert=True)\n is_not_start = (input_ids != self.mention_start)\n if self.add_mention_end:\n is_not_end = (input_ids != self.mention_end)\n is_copy = (is_not_start & is_not_end & is_not_cid)\n else:\n is_copy = (is_not_start & is_not_cid)\n input_actions[:, 1:][is_copy[:, 1:]] = self.copy_id\n return input_actions" } ]
import logging import os import sys from transformers import HfArgumentParser, set_seed from transformers import AutoModelForSeq2SeqLM, \ DataCollatorForSeq2Seq, AutoConfig, AutoTokenizer from transformers.integrations import TensorBoardCallback from arguments import DataArguments, ModelArguments, CorefTrainingArguments \ as TrainingArguments from data import CorefDataset, JointDataset from constants import SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END, \ COPY, CLUSTER_NEW, CLUSTERS, SENTENCE_START, SENTENCE_END, SPECIAL_IDS, \ NON_INT_SPECIAL_IDS, MARK_SPECIAL_IDS, MENTION_END_NON_INT_SPECIAL_IDS, \ MENTION_ENDS from trainer import CorefTrainer from data import ConstrainedDataCollator from model import ConstrainedT5
20,269
logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler(sys.stdout)) def main(): parser = HfArgumentParser( (ModelArguments, DataArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): model_args, data_args, training_args = parser.parse_json_file( json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() model_args: ModelArguments data_args: DataArguments training_args: TrainingArguments if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, fp16 training: %s, bf16 training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, training_args.bf16, ) logger.info("Training/evaluation parameters %s", training_args) logger.info("MODEL parameters %s", model_args) logger.info("Data arguments %s", data_args) set_seed(training_args.seed) tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path) if training_args.action_type == "integer": num_new_tokens = tokenizer.add_tokens([SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END,
logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler(sys.stdout)) def main(): parser = HfArgumentParser( (ModelArguments, DataArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): model_args, data_args, training_args = parser.parse_json_file( json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() model_args: ModelArguments data_args: DataArguments training_args: TrainingArguments if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, fp16 training: %s, bf16 training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, training_args.bf16, ) logger.info("Training/evaluation parameters %s", training_args) logger.info("MODEL parameters %s", model_args) logger.info("Data arguments %s", data_args) set_seed(training_args.seed) tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path) if training_args.action_type == "integer": num_new_tokens = tokenizer.add_tokens([SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END,
COPY])
9
2023-10-17 17:39:16+00:00
24k
giulio98/functional-diffusion-processes
src/functional_diffusion_processes/trainers/trainer.py
[ { "identifier": "AudioDataset", "path": "src/functional_diffusion_processes/datasets/audio_dataset.py", "snippet": "class AudioDataset(BaseDataset, abc.ABC):\n \"\"\"Base class for defining audio datasets.\n\n This class serves as the foundation for defining datasets containing audio data.\n It includes methods for preprocessing, resizing, and normalizing audio data.\n Subclasses may override these methods to implement dataset-specific processing and resizing logic.\n \"\"\"\n\n def __init__(self, data_config: DictConfig, split: str, evaluation: bool = False) -> None:\n \"\"\"Initialize an AudioDataset instance.\n\n Args:\n data_config (DictConfig): Configuration for loading the dataset, including paths, audio properties, etc.\n split (str): Specifies which split of the dataset to load (e.g., 'train', 'validation', 'test').\n evaluation (bool, optional): Indicates whether the dataset is for evaluation purposes. Defaults to False.\n \"\"\"\n super().__init__(data_config, split, evaluation)\n\n @staticmethod\n def normalize_audio(audio_np: np.ndarray, sample_rate: int) -> np.ndarray:\n \"\"\"Normalize the amplitude of the audio data to a standard range.\n\n This method utilizes PyDub's effects module to perform audio normalization.\n\n Args:\n audio_np (np.ndarray): Audio data represented as a NumPy array.\n sample_rate (int): The sample rate of the audio data.\n\n Returns:\n np.ndarray: The normalized audio data as a NumPy array.\n \"\"\"\n # Convert numpy array to AudioSegment\n audio_segment = AudioSegment(audio_np.tobytes(), frame_rate=int(sample_rate), sample_width=2, channels=1)\n\n # Normalize with PyDub\n normalized_audio_segment = effects.normalize(audio_segment)\n\n # Convert back to numpy\n normalized_audio_np = np.array(normalized_audio_segment.get_array_of_samples())\n\n return normalized_audio_np\n\n def _resize_op(self, audio: tf.Tensor, size: int) -> tf.Tensor:\n \"\"\"Resize the input audio to a specified size and normalize its amplitude to the range [0, 1].\n\n If the audio length is less than the specified size, zero padding is applied to reach the desired size.\n If the audio length is greater, it is truncated to the specified size.\n\n Args:\n audio (tf.Tensor): Input audio data as a TensorFlow tensor.\n size (int): The target size for the audio data.\n\n Returns:\n tf.Tensor: The resized and normalized audio data as a TensorFlow tensor.\n \"\"\"\n # Normalize dataset\n pylogger.info(\"Normalizing audio...\")\n audio = tf.cast(audio, dtype=tf.int16)\n # Calculate current length of the audio\n pylogger.info(\"Resizing audio to size {}...\".format(size))\n audio_length = tf.shape(audio)[0]\n audio = tf.cond(\n audio_length < size,\n lambda: tf.concat([audio, tf.zeros(size - audio_length, dtype=audio.dtype)], axis=0),\n lambda: audio[:size],\n )\n audio_np = tf.numpy_function(self.normalize_audio, [audio, self.data_config.audio_sample_rate], tf.int16)\n audio = tf.convert_to_tensor(audio_np, dtype=tf.int16)\n audio = tf.cast(audio, dtype=tf.float32)\n pylogger.info(\"Converting audio to range [-1, 1]...\")\n max_intensity = self.data_config.audio_max_intensity\n audio = audio / max_intensity\n return audio\n\n def preprocess_fn(self, d: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Preprocess the input audio data.\n\n This method resizes the audio data to a specified size based on the dataset configuration and normalizes the amplitude to the range [-1, +1].\n\n Args:\n d (Dict[str, Any]): A dictionary containing the input audio data and any associated metadata.\n\n Returns:\n Dict[str, Any]: A dictionary containing the preprocessed audio data and any associated metadata.\n \"\"\"\n pylogger.info(\"Preprocessing audios for split {}...\".format(self.split))\n audio = self._resize_op(\n audio=d[\"audio\"], size=int(self.data_config.audio_sample_rate * self.data_config.audio_max_duration)\n )\n audio = tf.reshape(\n tensor=audio,\n shape=(-1, self.data_config.output_size),\n )\n pylogger.info(\"Audio reshaped to shape {}...\".format(audio.shape))\n return dict(data=audio, label=d.get(\"label\", None))\n\n def postprocess_fn(self, batch_data: Any, inverse_scaler: Callable) -> Any:\n \"\"\"Postprocess the output audio data.\n\n This method applies the inverse of the preprocessing steps to revert the audio data to its original form.\n\n Args:\n batch_data (Any): A batch of audio data to postprocess.\n inverse_scaler (Callable): A function that applies the inverse of the preprocessing steps.\n\n Returns:\n Any: A batch of postprocessed audio data.\n \"\"\"\n max_intensity = self.data_config.audio_max_intensity\n batch_audio = inverse_scaler(batch_data)\n batch_audio = batch_audio * max_intensity\n batch_post_processed = tf.cast(batch_audio, tf.int16)\n audio_np = tf.numpy_function(\n self.normalize_audio, [batch_post_processed, self.data_config.audio_sample_rate], tf.int16\n )\n batch_post_processed = tf.convert_to_tensor(audio_np, dtype=tf.int16)\n return batch_post_processed" }, { "identifier": "ImageDataset", "path": "src/functional_diffusion_processes/datasets/image_dataset.py", "snippet": "class ImageDataset(BaseDataset, abc.ABC):\n \"\"\"Base class for handling image datasets.\n\n Provides a structured way to load, preprocess, and post-process image data.\n This class can be extended to handle specific image datasets as required.\n\n Attributes:\n data_config (DictConfig): Configuration settings for loading the dataset.\n split (str): Specifies the dataset split to load ('train', 'val', 'test', etc.).\n evaluation (bool): Indicates if the dataset is used for evaluation.\n \"\"\"\n\n def __init__(self, data_config: DictConfig, split: str, evaluation: bool = False) -> None:\n \"\"\"Initializes the ImageDataset object with dataset configurations.\n\n Args:\n data_config (DictConfig): Configuration settings for loading the dataset.\n split (str): Specifies the dataset split to load ('train', 'val', 'test', etc.).\n evaluation (bool): Indicates if the dataset is used for evaluation.\n \"\"\"\n super().__init__(data_config, split, evaluation)\n\n @staticmethod\n def _resize_op(image: Any, size: int) -> Any:\n \"\"\"Resizes the input image to the specified size and normalizes its values to the range [0,1].\n\n Args:\n image (Any): A tensor representing the input image.\n size (int): The target size for each dimension of the output image.\n\n Returns:\n Any: A tensor representing the resized and normalized image.\n \"\"\"\n # convert to range [0,1]\n pylogger.info(\"Converting image to range [0,1]...\")\n image = tf.image.convert_image_dtype(image=image, dtype=tf.float32)\n\n # resize to size\n pylogger.info(\"Resizing image to size {}...\".format(size))\n\n image = tf.image.resize(images=image, size=[size, size])\n\n return image\n\n def preprocess_fn(self, d: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Preprocesses the input data by resizing, possibly flipping, and applying uniform dequantization.\n\n Args:\n d (Dict[str, Any]): A dictionary containing the input data with keys 'image' and optionally 'label'.\n\n Returns:\n Dict[str, Any]: A dictionary containing the preprocessed data, with keys 'data' and optionally 'label'.\n \"\"\"\n image = self._resize_op(image=d[\"image\"], size=self.data_config.image_width_size)\n\n pylogger.info(\"Preprocessing images for split {}...\".format(self.split))\n\n if self.data_config.random_flip and not self.evaluation:\n pylogger.info(\"Applying random flips...\")\n image = tf.image.random_flip_left_right(image=image, seed=self.data_config.seed)\n\n if self.data_config.uniform_dequantization:\n pylogger.info(\"Applying uniform dequantization...\")\n image = (\n tf.random.uniform(shape=image.shape, dtype=tf.float32, seed=self.data_config.seed) + image * 255.0\n ) / 256.0\n\n image = tf.reshape(\n tensor=image,\n shape=(-1, self.data_config.output_size),\n )\n pylogger.info(\"Image reshaped to shape {}...\".format(image.shape))\n\n return dict(data=image, label=d.get(\"label\", None))\n\n def postprocess_fn(self, batch_data: Any, inverse_scaler: Callable) -> Any:\n \"\"\"Post-processes the output data by reverting the preprocessing steps.\n\n Args:\n batch_data (Any): A batch of data to postprocess.\n inverse_scaler (Callable): A function to invert the scaling applied to the data.\n\n Returns:\n Any: A batch of postprocessed data, arranged in a grid for visualization.\n \"\"\"\n batch_post_processed = make_grid_image(\n ndarray=process_images(images=batch_data),\n inverse_scaler=inverse_scaler,\n )\n return batch_post_processed" }, { "identifier": "BaseDataset", "path": "src/functional_diffusion_processes/datasets/base_dataset.py", "snippet": "class BaseDataset(abc.ABC):\n \"\"\"Abstract base class for defining datasets.\n\n Provides a template for loading, preprocessing, and iterating over datasets.\n It encapsulates common dataset configurations and operations while allowing for dataset-specific\n preprocessing and post-processing through abstract methods.\n\n Attributes:\n dataset_builder: A builder object for loading the dataset.\n data_config (DictConfig): Configuration parameters for the dataset.\n split (str): Specifies which split of the dataset to load, e.g., 'train', 'validation', or 'test'.\n evaluation (bool): Indicates whether the dataset is for evaluation purposes.\n dataset_options: Options for configuring the dataset pipeline.\n \"\"\"\n\n def __init__(self, data_config: DictConfig, split: str, evaluation: bool = False) -> None:\n \"\"\"Abstract base class for defining datasets.\n\n This class provides a skeleton for defining datasets, with abstract methods for\n preprocessing data, generating batches of data, and resizing images. Subclasses\n must implement these methods to define their specific datasets.\n\n Args:\n data_config (DictConfig): A dictionary-like object containing the configuration for\n loading the dataset.\n\n split (str): A string specifying which split of the dataset to load.\n\n evaluation (bool): A boolean specifying whether the dataset is for evaluation purposes.\n \"\"\"\n self.dataset_builder = None\n self.data_config = data_config\n self.split = split\n self.evaluation = evaluation\n self.dataset_options = tf.data.Options()\n self.dataset_options.experimental_optimization.map_parallelization = True\n self.dataset_options.experimental_threading.private_threadpool_size = 48\n self.dataset_options.experimental_threading.max_intra_op_parallelism = 1\n\n @abc.abstractmethod\n def preprocess_fn(self, d: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Abstract method for preprocessing input data.\n\n Subclasses should override this method to implement dataset-specific preprocessing.\n\n Args:\n d (Dict[str, Any]): A dictionary containing the input data.\n\n Returns:\n Dict[str, Any]: A dictionary containing the preprocessed data.\n \"\"\"\n raise NotImplementedError(\"Subclasses must implement preprocess_fn method.\")\n\n @abc.abstractmethod\n def postprocess_fn(self, batch_data: Any, inverse_scaler: Callable) -> Any:\n \"\"\"Abstract method for postprocessing output data.\n\n Subclasses should override this method to implement dataset-specific post-processing.\n\n Args:\n batch_data (Any): A batch of data to postprocess.\n inverse_scaler (Callable): A function to inverse the scaling of the data.\n\n Returns:\n Any: A dictionary containing the postprocessed data.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the postprocess_fn method.\")\n\n def _generator(self) -> Iterator[Any]:\n \"\"\"Generate batches of preprocessed data.\n\n Loads the dataset, shuffles the data, applies preprocessing, and batches the data.\n Subclasses might override this method to implement dataset-specific batching logic.\n\n Returns:\n Iterator[Any]: An iterator that generates batches of preprocessed data.\n \"\"\"\n # load the dataset\n if isinstance(self.dataset_builder, tfds.core.DatasetBuilder):\n read_config = tfds.ReadConfig(options=self.dataset_options)\n if self.data_config.download:\n self.dataset_builder.download_and_prepare()\n ds = self.dataset_builder.as_dataset(\n split=self.split,\n shuffle_files=False,\n read_config=read_config,\n as_supervised=False,\n )\n else:\n ds = self.dataset_builder.with_options(options=self.dataset_options)\n\n ds = ds.shuffle(buffer_size=10000, seed=self.data_config.seed)\n\n # apply the preprocessing function to each element in the dataset\n ds = ds.map(map_func=self.preprocess_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n # determine the batch size per device\n ds = ds.batch(batch_size=self.data_config.batch_size, drop_remainder=True)\n ds = ds.batch(batch_size=jax.device_count(), drop_remainder=True)\n\n ds = ds.repeat(count=100000 if not self.evaluation else 1)\n\n return iter(ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE))\n\n def __iter__(self) -> Iterator[Any]:\n \"\"\"Return an iterator that generates batches of preprocessed data.\n\n Calls the `_generator` method to obtain an iterator for generating preprocessed data batches.\n\n Returns:\n Iterator[Any]: An iterator that generates batches of preprocessed data.\n \"\"\"\n return self._generator()\n\n def __len__(self) -> int:\n \"\"\"Return the number of examples in the dataset.\n\n Obtains the total number of examples in the specified dataset split from the dataset builder's info attribute.\n\n Returns:\n int: The number of examples in the dataset.\n \"\"\"\n return self.dataset_builder.info.splits[self.split].num_examples" }, { "identifier": "Loss", "path": "src/functional_diffusion_processes/losses/base_loss.py", "snippet": "class Loss(abc.ABC):\n \"\"\"Abstract class representing a loss function.\n\n Provides a framework for defining custom loss functions by enforcing the implementation\n of `construct_loss_fn` method in any derived classes. This class holds a reference to\n a stochastic differential equation (SDE) object which is used to calculate the weight factor for the loss.\n\n Attributes:\n sde (SDE): The stochastic differential equation instance associated with this loss.\n \"\"\"\n\n def __init__(self, sde: SDE) -> None:\n \"\"\"Initializes the Loss instance with a given SDE.\n\n Args:\n sde (SDE): An SDE instance which might be used in the loss computation.\n \"\"\"\n self.sde = sde\n\n def construct_loss_fn(self, model: Any) -> Callable:\n \"\"\"Abstract method to construct a loss function for a given model.\n\n This method should be implemented by any derived class to define the loss\n computation specific to the type of loss being implemented.\n\n Args:\n model (Any): The model for which to construct the loss function.\n\n Returns:\n Callable: A callable representing the constructed loss function.\n\n Raises:\n NotImplementedError: If the method is not implemented by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the construct_loss_fn method.\")" }, { "identifier": "FIDMetric", "path": "src/functional_diffusion_processes/metrics/fid_metric.py", "snippet": "class FIDMetric:\n \"\"\"Class for computing the Frechet Inception Distance (FID) metric.\n\n This class facilitates the computation of the FID metric, which measures the similarity between two distributions of images.\n It precomputes features for the real dataset using a specified Inception feature extractor and provides methods to compute\n and store features for generated images, and to compute the FID and Inception Score (IS).\n\n Attributes:\n metric_config (DictConfig): Configuration parameters for the FID metric.\n feature_extractor (InceptionFeatureExtractor): Inception feature extractor for computing the FID metric.\n dataset (BaseDataset): Dataset object providing real samples for FID computation.\n generated_pools (list): List to store features of generated images.\n generated_logits (list): List to store logits of generated images.\n real_features (dict): Dictionary to store precomputed features of real dataset.\n \"\"\"\n\n def __init__(\n self,\n metric_config: DictConfig,\n feature_extractor: InceptionFeatureExtractor,\n dataset: BaseDataset,\n ) -> None:\n \"\"\"Initializes the FIDMetric class with specified configurations, feature extractor, and dataset.\n\n Args:\n metric_config (DictConfig): Configuration parameters for the FID metric.\n feature_extractor (InceptionFeatureExtractor): Inception feature extractor for computing the FID metric.\n dataset (BaseDataset): Dataset object providing real samples for FID computation.\n \"\"\"\n self.metric_config = metric_config\n self.feature_extractor = feature_extractor\n self.dataset = dataset\n self.generated_pools = []\n self.generated_logits = []\n try:\n self.real_features = load_dataset_stats(\n save_path=metric_config.real_features_path,\n dataset_name=metric_config.dataset_name,\n )\n except FileNotFoundError:\n self._precompute_features(\n dataset_name=metric_config.dataset_name,\n save_path=metric_config.real_features_path,\n )\n self.real_features = load_dataset_stats(\n save_path=metric_config.real_features_path,\n dataset_name=metric_config.dataset_name,\n )\n\n def _precompute_features(self, dataset_name: str, save_path: str) -> None:\n \"\"\"Precomputes and saves features for the real dataset.\n\n Args:\n dataset_name (str): Name of the dataset.\n save_path (str): Path where the computed features will be saved.\n \"\"\"\n tf.io.gfile.makedirs(path=save_path)\n\n tf.io.gfile.makedirs(os.path.join(save_path, f\"{dataset_name.lower()}_clean\"))\n\n # Use the feature extractor to compute features for the real dataset\n all_pools = self.feature_extractor.extract_features(\n dataset=self.dataset, save_path=save_path, dataset_name=dataset_name\n )\n\n # Save latent represents of the Inception network to disk or Google Cloud Storage\n filename = f\"{dataset_name.lower()}_stats.npz\"\n\n if jax.host_id() == 0:\n pylogger.info(\"Saving real dataset stats to: %s\" % os.path.join(save_path, filename))\n\n with tf.io.gfile.GFile(os.path.join(save_path, filename), \"wb\") as f_out:\n io_buffer = io.BytesIO()\n np.savez_compressed(io_buffer, pool_3=all_pools)\n f_out.write(io_buffer.getvalue())\n\n def compute_fid(self, eval_dir, num_sampling_round) -> Tuple[float, float]:\n \"\"\"Computes the FID and Inception Score (IS) for the generated and real images.\n\n Args:\n eval_dir (str): Directory path for evaluation.\n num_sampling_round (int): Number of sampling rounds.\n\n Returns:\n Tuple[float, float]: A tuple containing the FID and Inception Score.\n \"\"\"\n real_pools = self.real_features[\"pool_3\"]\n if not self.feature_extractor.inception_v3 and not self.feature_extractor.inception_v3 == \"lenet\":\n if len(self.generated_logits) == 0 or len(self.generated_pools) == 0:\n if jax.host_id() == 0:\n # Load all statistics that have been previously computed and saved for each host\n for host in range(jax.host_count()):\n stats = tf.io.gfile.glob(os.path.join(eval_dir, \"statistics_*.npz\"))\n wait_message = False\n while len(stats) < num_sampling_round:\n if not wait_message:\n print(\"Waiting for statistics on host %d\" % (host,))\n wait_message = True\n stats = tf.io.gfile.glob(os.path.join(eval_dir, \"statistics_*.npz\"))\n time.sleep(10)\n\n for stat_file in stats:\n with tf.io.gfile.GFile(stat_file, \"rb\") as fin:\n stat = np.load(fin)\n\n self.generated_pools.append(stat[\"pool_3\"])\n self.generated_logits.append(stat[\"logits\"])\n\n all_logits = np.concatenate(self.generated_logits, axis=0)[: self.metric_config.num_samples]\n inception_score = tfgan.eval.classifier_score_from_logits(logits=all_logits)\n else:\n inception_score = -1\n\n all_pools = np.concatenate(self.generated_pools, axis=0)[: self.metric_config.num_samples]\n\n fid = tfgan.eval.frechet_classifier_distance_from_activations(activations1=real_pools, activations2=all_pools)\n\n return fid, inception_score\n\n def compute_and_store_generated_features(self, images: Any, sample_dir: str, round_num: int) -> None:\n \"\"\"Computes features for the generated images and stores them in a specified directory.\n\n Args:\n images (Any): Tensor representing the generated images.\n sample_dir (str): Directory where the features will be stored.\n round_num (int): Round number in the training process.\n \"\"\"\n latents = self.feature_extractor.extract_features(images)\n\n self.generated_pools.append(latents[\"pool_3\"])\n\n gc.collect()\n\n if self.feature_extractor.model_name == \"inception\" or self.feature_extractor.inception_v3:\n self.generated_logits.append(latents[\"logits\"])\n with tf.io.gfile.GFile(os.path.join(sample_dir, f\"statistics_{round_num}.npz\"), \"wb\") as f_out:\n io_buffer = io.BytesIO()\n np.savez_compressed(\n io_buffer,\n pool_3=latents[\"pool_3\"],\n logits=latents[\"logits\"],\n )\n\n f_out.write(io_buffer.getvalue())\n\n elif self.feature_extractor.model_name == \"lenet\":\n with tf.io.gfile.GFile(os.path.join(sample_dir, f\"statistics_{round_num}.npz\"), \"wb\") as f_out:\n io_buffer = io.BytesIO()\n np.savez_compressed(io_buffer, pool_3=latents[\"pool_3\"])\n f_out.write(io_buffer.getvalue())" }, { "identifier": "Sampler", "path": "src/functional_diffusion_processes/samplers/base_sampler.py", "snippet": "class Sampler(abc.ABC):\n \"\"\"Abstract base class for creating sampler objects.\n\n This class serves as a template for creating sampler objects which are\n designed to generate samples of a stochastic process governed by a\n specified stochastic differential equation (SDE). The process of sampling\n is carried out by employing specified predictor and corrector methods.\n\n Attributes:\n predictor (Predictor): The predictor method to be used in the sampling process.\n corrector (Corrector): The corrector method to be used in the sampling process.\n sde (SDE): The stochastic differential equation governing the process to be sampled.\n sampler_config (DictConfig): Configuration settings for the sampler.\n\n Methods:\n make_sampler(predict_fn: Callable) -> Callable:\n Abstract method to create a sampling function based on the specified predictor,\n corrector, and SDE.\n \"\"\"\n\n def __init__(self, predictor: Predictor, corrector: Corrector, sde: SDE, sampler_config: DictConfig) -> None:\n \"\"\"Initializes the Sampler object with specified predictor, corrector, SDE, and configuration.\n\n Args:\n predictor (Predictor): The predictor method for the sampler.\n corrector (Corrector): The corrector method for the sampler.\n sde (SDE): The stochastic differential equation governing the process.\n sampler_config (DictConfig): Configuration settings for the sampler.\n \"\"\"\n super().__init__()\n self.predictor = predictor\n self.corrector = corrector\n self.sampler_config = sampler_config\n self.sde = sde\n\n def make_sampler(self, predict_fn: Callable, auxiliary_fn: Union[Any, Callable]) -> Callable:\n \"\"\"Abstract method to create a sampler function.\n\n This method is intended to be overridden by derived classes to provide\n specific implementations for creating a sampler function. The sampler\n function will utilize the specified predictor and corrector methods\n along with the provided SDE to generate samples of the stochastic process.\n\n Args:\n predict_fn (Callable): The model prediction function.\n auxiliary_fn (Callable): The auxiliary prediction function for the model.\n\n Returns:\n Callable: The constructed sampling function.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the make_sampler method.\")" }, { "identifier": "SDE", "path": "src/functional_diffusion_processes/sdetools/base_sde.py", "snippet": "class SDE(abc.ABC):\n \"\"\"Abstract base class for representing Stochastic Differential Equations (SDEs).\n\n This class provides a structured way to define and work with SDEs, including computing\n Fourier transforms, discretizing the equations, and defining the drift and diffusion terms.\n\n Attributes:\n sde_config (DictConfig): Configuration object containing SDE settings.\n T (float): Total time duration.\n N (int): Number of time steps.\n eps (float): Small constant for numerical stability.\n is_unidimensional (bool): Flag indicating if the SDE is unidimensional.\n \"\"\"\n\n def __init__(self, sde_config: DictConfig) -> None:\n \"\"\"Initializes the SDE with the given configuration.\n\n Args:\n sde_config (DictConfig): Configuration object containing SDE settings.\n \"\"\"\n super().__init__()\n self.sde_config = sde_config\n self.T = self.sde_config.T\n self.N = self.sde_config.N\n self.eps = self.sde_config.eps\n self.is_unidimensional = True if len(self.sde_config.shape) == 1 else False\n\n def fourier_transform(self, state: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes the Fourier transform of the given state.\n\n This method can handle both vectorized and non-vectorized input states.\n\n Args:\n state (jnp.ndarray): State whose Fourier transform is to be computed.\n\n Returns:\n jnp.ndarray: Fourier transform of the given state.\n \"\"\"\n return (\n jnp.fft.fft(state, norm=\"ortho\", axis=1)\n if self.is_unidimensional\n else jnp.fft.fft2(state, norm=\"ortho\", axes=(1, 2))\n )\n\n def inverse_fourier_transform(self, state: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes the inverse Fourier transform of the given state.\n\n This method can handle both vectorized and non-vectorized input states.\n\n Args:\n state (jnp.ndarray): State whose inverse Fourier transform is to be computed.\n\n Returns:\n jnp.ndarray: Inverse Fourier transform of the given state.\n \"\"\"\n return (\n jnp.fft.ifft(state, norm=\"ortho\", axis=1)\n if self.is_unidimensional\n else jnp.fft.ifft2(state, norm=\"ortho\", axes=(1, 2))\n )\n\n @abc.abstractmethod\n def sde(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Abstract method to compute the drift and diffusion terms of the SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Tuple containing the drift and diffusion terms of the SDE.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the sde method.\")\n\n @abc.abstractmethod\n def marginal_prob(\n self,\n rng: PRNGKeyArray,\n x: jnp.ndarray,\n t: jnp.ndarray,\n t0: Optional[jnp.ndarray] = None,\n ) -> Tuple[Any, jnp.ndarray | Any]:\n \"\"\"Computes the marginal probability density at a given time.\n\n This is an abstract method that should be overridden by subclasses to\n compute the marginal probability density based on the state and time.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n x (jnp.ndarray): State of the system.\n t (jnp.ndarray): Current time.\n t0 (Optional[jnp.ndarray], optional): Initial time. Defaults to None.\n\n Returns:\n Tuple[Any, jnp.ndarray | Any]: Marginal probability density at the given time.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the marginal_prob method.\")\n\n @abc.abstractmethod\n def diffuse(\n self, rng: PRNGKeyArray, x: jnp.ndarray, t: jnp.ndarray, t0: Optional[jnp.ndarray] = None\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Performs diffusion of the input from time t0 to time t.\n\n This is an abstract method that should be overridden by subclasses to\n implement the diffusion process based on the state and time.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n x (jnp.ndarray): Input state.\n t (jnp.ndarray): Current time.\n t0 (Optional[jnp.ndarray], optional): Initial time. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Mean of the corrupted input and the corrupted input.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the diffuse method.\")\n\n @abc.abstractmethod\n def prior_sampling(\n self, rng: PRNGKeyArray, shape: Tuple[int, ...], t0: Optional[jnp.ndarray] = None\n ) -> jnp.ndarray:\n \"\"\"Generates a sample from the prior distribution of the SDE.\n\n This is an abstract method that should be overridden by subclasses to\n implement the prior sampling process based on the shape and initial time.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n shape (Tuple[int, ...]): Shape of the sample to be generated.\n t0 (Optional[jnp.ndarray], optional): Initial time. Defaults to None.\n\n Returns:\n jnp.ndarray: A sample from the prior distribution of the SDE.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the prior_sampling method.\")\n\n @abc.abstractmethod\n def score_fn(\n self, y_corrupted: jnp.ndarray, y_reconstructed: jnp.ndarray, t: jnp.ndarray, rng: Optional[PRNGKeyArray] = None\n ) -> jnp.ndarray:\n \"\"\"Computes the score function based on the corrupted and reconstructed states.\n\n This is an abstract method that should be overridden by subclasses to\n compute the score function based on the state and time.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n y_reconstructed (jnp.ndarray): Reconstructed state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n\n Returns:\n jnp.ndarray: The score function.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the score_fn method.\")\n\n @abc.abstractmethod\n def get_psm(self, t: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes the Power-Special-Matrix(PSM) used as a weighting factor for the loss.\n\n This is an abstract method that should be overridden by subclasses to\n compute the state-dependent diffusion matrix based on the time.\n\n Args:\n t (jnp.ndarray): Current time.\n\n Returns:\n jnp.ndarray: The state-dependent diffusion matrix.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the get_psm method.\")\n\n @abc.abstractmethod\n def get_reverse_noise(self, rng: PRNGKeyArray, shape: Tuple[int, ...]) -> jnp.ndarray:\n \"\"\"Generates noise for the reverse SDE.\n\n This is an abstract method that should be overridden by subclasses to\n generate reverse noise based on the shape.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n shape (Tuple[int, ...]): Shape of the noise to be generated.\n\n Returns:\n jnp.ndarray: The reverse noise.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the get_reverse_noise method.\")\n\n def discretize(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Discretizes the SDE into an iterative update rule.\n\n This method computes the discrete drift and diffusion terms based on the continuous SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Tuple containing the discrete drift and diffusion terms.\n \"\"\"\n dt = (self.T - self.eps) / self.N\n drift, diffusion = self.sde(y_corrupted, t, y_reconstructed)\n f = drift * dt\n g = diffusion * jnp.sqrt(dt)\n return f, g\n\n def reverse(self):\n \"\"\"Creates a reverse-time version of the current SDE.\n\n This method defines a nested class for the reverse-time SDE and returns an instance of it.\n\n Returns:\n ReverseSDE: An instance of the reverse-time SDE subclass.\n \"\"\"\n num_time_steps = self.N\n end_t = self.T\n sde_fn = self.sde\n discretize_fn = self.discretize\n score_fn = self.score_fn\n sde_config = self.sde_config\n\n class ReverseSDE(self.__class__, abc.ABC):\n \"\"\"Reverse Stochastic Differential Equation abstract base class.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize the ReverseSDE class.\n\n Inherits the properties from the original SDE class and overrides the relevant methods for the\n reverse-time SDE.\n \"\"\"\n super().__init__(sde_config)\n self.N = num_time_steps\n self.T = end_t\n self.score_fn = score_fn\n\n def sde(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Return the drift and diffusion terms for the reverse-time SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Drift and diffusion terms for the reverse-time SDE.\n \"\"\"\n drift, diffusion = sde_fn(y_corrupted, t, y_reconstructed)\n score = self.score_fn(y_corrupted, y_reconstructed, t, rng=rng)\n drift = -drift + batch_mul(diffusion**2, score * (0.5 if self.sde_config.probability_flow else 1.0))\n # Set the diffusion function to zero for ODEs.\n diffusion = jnp.zeros_like(diffusion) if self.sde_config.probability_flow else diffusion\n return drift, diffusion\n\n def discretize(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Discretizes the reverse-time SDE in the form of an iterative update rule.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Drift and diffusion terms for the discretized reverse-time SDE.\n \"\"\"\n f, g = discretize_fn(y_corrupted, t, y_corrupted)\n rev_f = -f + batch_mul(\n g**2,\n self.score_fn(y_corrupted, y_reconstructed, t, rng=rng)\n * (0.5 if self.sde_config.probability_flow else 1.0),\n )\n rev_g = jnp.zeros_like(g) if self.sde_config.probability_flow else g\n return rev_f, rev_g\n\n def semi_analytic(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Computes the semi-analytic drift and diffusion terms for the reverse-time SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Drift and diffusion terms for the semi-analytic reverse-time SDE.\n \"\"\"\n _, diffusion = sde_fn(y_corrupted, t, y_reconstructed)\n score = self.score_fn(y_corrupted, y_reconstructed, t, rng=rng)\n drift = batch_mul(diffusion**2, score * (0.5 if self.sde_config.probability_flow else 1.0))\n diffusion = jnp.zeros_like(diffusion) if self.sde_config.probability_flow else diffusion\n return drift, diffusion\n\n return ReverseSDE()" }, { "identifier": "filter_mask", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def filter_mask(shape, radius):\n device_num, batch_size, rows, cols, n_channels = shape\n crow, ccol = int(rows / 2), int(cols / 2)\n center = [crow, ccol]\n x, y = jnp.ogrid[:rows, :cols]\n mask_area = (x - center[0]) ** 2 + (y - center[1]) ** 2 >= radius * radius\n mask = jnp.ones_like(mask_area)\n mask = jnp.where(mask_area, 0, mask)\n mask = mask.reshape(1, 1, rows, cols, 1)\n mask = jnp.repeat(mask, device_num, axis=0)\n mask = jnp.repeat(mask, batch_size, axis=1)\n mask = jnp.repeat(mask, n_channels, axis=4)\n return mask" }, { "identifier": "make_grid_image", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def make_grid_image(ndarray: Any, inverse_scaler: Callable, padding: int = 2, pad_value: float = 0.0) -> Any:\n \"\"\"Make a grid image from a Numpy Array.\n\n Args:\n ndarray: The Numpy Array.\n inverse_scaler: The inverse scaler.\n padding: The padding.\n pad_value: The padding value.\n\n Returns:\n The grid image.\n \"\"\"\n ndarray = jnp.asarray(ndarray)\n\n if ndarray.ndim == 4 and ndarray.shape[-1] == 1: # single-channel images\n ndarray = jnp.concatenate((ndarray, ndarray, ndarray), -1)\n\n n_row = int(np.sqrt(ndarray.shape[0]))\n # make the mini-batch of images into a grid\n n_maps = ndarray.shape[0]\n x_maps = min(n_row, n_maps)\n ymaps = int(math.ceil(float(n_maps) / x_maps))\n height, width = int(ndarray.shape[1] + padding), int(ndarray.shape[2] + padding)\n num_channels = ndarray.shape[3]\n grid = np.full((height * ymaps + padding, width * x_maps + padding, num_channels), pad_value).astype(np.float32)\n k = 0\n for y in range(ymaps):\n for x in range(x_maps):\n if k >= n_maps:\n break\n grid[\n y * height + padding : (y + 1) * height,\n x * width + padding : (x + 1) * width,\n ] = ndarray[k]\n k = k + 1\n\n ndarr = inverse_scaler(grid)\n ndarr = jnp.clip(ndarr * 255, 0, 255).astype(jnp.uint8)\n return ndarr" }, { "identifier": "process_images", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def process_images(images: Any) -> Any:\n \"\"\"Reshape images to the correct shape.\n\n Args:\n images: Tensor of images to reshape.\n\n Returns:\n A tensor of images with the correct shape.\n \"\"\"\n w = np.sqrt(images.shape[2]).astype(int)\n h = np.sqrt(images.shape[2]).astype(int)\n o = images.shape[3]\n return images.reshape(-1, w, h, o)" }, { "identifier": "save_samples", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def save_samples(round_num: int, samples: Any, file_path: str) -> None:\n \"\"\"Save samples to a file.\n\n Args:\n round_num: The round number of the evaluation.\n samples: Tensor of samples to save.\n file_path: string of the Path to the file where the samples will be saved.\n \"\"\"\n for i in range(samples.shape[0]):\n clean_path = os.path.join(file_path, f\"clean/samples_{round_num}_{i}.npy\")\n np.save(clean_path, samples[i])\n samples_path = os.path.join(file_path, f\"samples_{round_num}.npz\")\n with tf.io.gfile.GFile(samples_path, \"wb\") as f_out:\n io_buffer = io.BytesIO()\n np.savez_compressed(io_buffer, samples=samples)\n f_out.write(io_buffer.getvalue())" }, { "identifier": "to_grayscale", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "@jax.pmap\ndef to_grayscale(images):\n weights = np.array([0.2989, 0.5870, 0.1140])[None, None, None, :] # Extend dimensions\n grayscale_images = np.sum(images * weights, axis=-1)\n return grayscale_images" }, { "identifier": "get_data_inverse_scaler", "path": "src/functional_diffusion_processes/utils/scaler.py", "snippet": "def get_data_inverse_scaler(is_centered: bool) -> Callable:\n \"\"\"Inverse data normalizer.\n\n Rescale data to original range at the end of the diffusion.\n\n Args:\n is_centered: boolean if True data will rescaled from [-1, 1] to [0, 1].\n \"\"\"\n if is_centered:\n # Rescale [-1, 1] to [0, 1]\n return lambda x: (x + 1.0) / 2.0\n else:\n return lambda x: x" }, { "identifier": "get_data_scaler", "path": "src/functional_diffusion_processes/utils/scaler.py", "snippet": "def get_data_scaler(is_centered: bool) -> Callable:\n \"\"\"Normalize data. Assume data are always in [0, 1].\n\n Args:\n is_centered: boolean if True data will be centered in [-1, 1].\n \"\"\"\n if is_centered:\n # Rescale to [-1, 1]\n return lambda x: x * 2.0 - 1.0\n else:\n return lambda x: x" }, { "identifier": "TrainState", "path": "src/functional_diffusion_processes/utils/training_state.py", "snippet": "class TrainState(train_state.TrainState):\n \"\"\"The training state for the model.\"\"\"\n\n opt_state_params: Any\n ema_params: Any\n rng: jax.random.PRNGKey" }, { "identifier": "colorizing_fn", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def colorizing_fn(\n sample_fn: Callable, carry_state: Tuple, batch_input: jnp.ndarray, gray_scale_img: jnp.ndarray\n) -> Tuple:\n \"\"\"Perform colorizing task on a given grayscale image.\n\n Args:\n sample_fn (Callable): The sampling function used for colorization.\n carry_state (Tuple): The current state of the model.\n batch_input (jnp.ndarray): The input data for colorization.\n gray_scale_img (jnp.ndarray): The grayscale image to be colorized.\n\n Returns:\n Tuple: The updated state and the colorized image.\n \"\"\"\n (rng, state) = carry_state\n return sample_fn(rng, batch_input, state.ema_params, gray_scale_img)" }, { "identifier": "construct_sampling_fn", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def construct_sampling_fn(model: flax.linen.Module, sampler: Sampler) -> Callable:\n \"\"\"Construct a sampling function for generating samples from the model.\n\n Args:\n model (flax.linen.Module): The model instance from which to generate samples.\n sampler (Sampler): The sampler instance used for sampling.\n\n Returns:\n Callable: The constructed sampling function.\n \"\"\"\n predict_fn = model.make_predict_fn()\n if isinstance(model, BaseMAML):\n super_resolution_fn = model.make_super_resolution_fn()\n sample_fn = sampler.make_sampler(predict_fn, super_resolution_fn)\n else:\n sample_fn = sampler.make_sampler(predict_fn, None)\n return sample_fn" }, { "identifier": "construct_train_step", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def construct_train_step(optimizer, loss_fn) -> Callable:\n \"\"\"Construct a train step function to be used in the training loop.\n\n This function creates a training step function which, when called, performs\n a single step of training including forward pass, loss computation, and\n backward pass for gradient computation and updates.\n\n Args:\n optimizer: The optimizer instance used for updating model parameters.\n loss_fn: The loss function used for computing the loss.\n\n Returns:\n Callable: The constructed train step function.\n \"\"\"\n\n @partial(jax.pmap, axis_name=\"device\")\n def train_fn(\n rng,\n params,\n optim_params,\n step,\n batch_input,\n batch,\n ):\n grad_params, (new_rng, loss, loss_inner, batch_reconstructed, batch_corrupted, target) = loss_fn(\n rng, params, step, batch_input, batch\n )\n\n loss = jax.lax.pmean(loss, axis_name=\"device\")\n grad_params = jax.lax.pmean(grad_params, axis_name=\"device\")\n\n updates, optim_params = optimizer.update(grad_params, optim_params, params)\n\n params = optax.apply_updates(params, updates)\n params = clip_learning_rates(params)\n return new_rng, loss, loss_inner, params, optim_params, batch_reconstructed, batch_corrupted, target\n\n return train_fn" }, { "identifier": "inpainting_fn", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def inpainting_fn(\n sample_fn: Callable, carry_state: Tuple, batch_input: jnp.ndarray, image: jnp.ndarray, mask: jnp.ndarray\n) -> Tuple:\n \"\"\"Perform inpainting task on a given image using a mask.\n\n Args:\n sample_fn (Callable): The sampling function used for inpainting.\n carry_state (Tuple): The current state of the model.\n batch_input (jnp.ndarray): The input data for inpainting.\n image (jnp.ndarray): The image to be inpainted.\n mask (jnp.ndarray): The mask used for inpainting.\n\n Returns:\n Tuple: The updated state and the inpainted image.\n \"\"\"\n (rng, state) = carry_state\n return sample_fn(rng, batch_input, state.ema_params, image, mask)" }, { "identifier": "sampling_fn", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def sampling_fn(sample_fn: Callable, carry_state: Tuple, batch_input: jnp.ndarray) -> Tuple:\n \"\"\"Perform sampling task using a given sampling function.\n\n Args:\n sample_fn (Callable): The sampling function.\n carry_state (Tuple): The current state of the model.\n batch_input (jnp.ndarray): The input data for sampling.\n\n Returns:\n Tuple: The updated state after performing the sampling.\n \"\"\"\n (rng, state) = carry_state\n return sample_fn(rng, batch_input, state.ema_params)" } ]
import abc import gc import io import logging import os import flax import flax.jax_utils as flax_utils import hydra.utils import jax import numpy as np import tensorflow as tf import wandb from typing import Any, Callable, Tuple, Union from cleanfid import fid from flax import linen, traverse_util from flax.training import checkpoints from flax.training.checkpoints import restore_checkpoint from jax import numpy as jnp from omegaconf import DictConfig, OmegaConf from tqdm.auto import tqdm from wandb.sdk.lib import RunDisabled from wandb.sdk.wandb_run import Run from ..datasets import AudioDataset, ImageDataset from ..datasets.base_dataset import BaseDataset from ..losses.base_loss import Loss from ..metrics import FIDMetric from ..samplers import Sampler from ..sdetools.base_sde import SDE from ..utils.common import filter_mask, make_grid_image, process_images, save_samples, to_grayscale from ..utils.scaler import get_data_inverse_scaler, get_data_scaler from ..utils.training_state import TrainState from .helpers import colorizing_fn, construct_sampling_fn, construct_train_step, inpainting_fn, sampling_fn
14,800
# update the TrainState with replicated parameters and optimizer state state = state.replace( params=p_params, opt_state_params=p_opt_state_params, step=p_step, ema_params=p_ema_params, ) if jax.host_id() == 0: pylogger.info("Starting training loop at step %d." % (start_step,)) rng = jax.random.fold_in(rng, jax.host_id()) assert ( self.training_config.log_freq % self.training_config.n_jitted_steps == 0 and self.training_config.eval_freq % self.training_config.n_jitted_steps == 0 ), "Missing logs or checkpoints!" ds_train_iter = iter(ds_train) with tqdm( total=self.training_config.total_steps + 1, initial=start_step, position=0, leave=True, ) as pbar: for step in range( start_step, self.training_config.total_steps + 1, self.training_config.n_jitted_steps, ): # Get the next batch of data and scale it batch = jax.tree_map(f=lambda x: scaler(x._numpy()), tree=next(ds_train_iter)["data"]) if not self.training_config.sampling_only: # Split the random number generator for the current step rng, *next_rng = jax.random.split(key=rng, num=jax.local_device_count() + 1) next_rng = jnp.asarray(next_rng) ((_, state), batch_reconstructed, batch_corrupted, target) = self.train_step( train_step_fn=train_step_fn, carry_state=(next_rng, state), batch=batch, batch_input=p_batch_input, ) if not self.training_config.sampling_only and ( (jax.host_id() == 0 and step % self.training_config.checkpoint_freq == 0 and step != 0) ): self.save_checkpoint(step, run, state) # Evaluate the model if self.training_config.sampling and (step % self.training_config.eval_freq == 0): # if step != 0: if jax.host_id() == 0: pylogger.info("Generating samples at step %d." % (step,)) _, *sample_rng = jax.random.split(rng, jax.local_device_count() + 1) _, b, g, c = batch.shape sample_rng = jnp.asarray(sample_rng) if self.training_config.sampling_type == "full": batch_sampled, batch_sampled_last, batch_sampled_all = sampling_fn( sample_fn, (sample_rng, state), p_batch_input ) elif self.training_config.sampling_type == "colorization": batch_grayscale = to_grayscale(batch) batch_grayscale = batch_grayscale.reshape(-1, b, g, 1) batch_sampled, batch_sampled_last, batch_sampled_all = colorizing_fn( sample_fn, (sample_rng, state), p_batch_input, batch_grayscale ) elif self.training_config.sampling_type == "inpainting": config_object = OmegaConf.create( { "_target_": "functional_diffusion_processes.datasets.mnist_dataset.MNISTDataset", "data_config": { "seed": 42, "batch_size": ds_train.data_config.batch_size, "image_height_size": ds_train.data_config.image_height_size, "image_width_size": ds_train.data_config.image_width_size, "output_size": 1, "random_flip": False, "uniform_dequantization": False, "data_centered": False, "data_dir": "${oc.env:DATA_ROOT}/tensorflow_datasets", "download": True, "is_mask": True, }, "split": "train", "evaluation": False, } ) ds_mask = hydra.utils.instantiate(config_object, _recursive_=False) ds_mask_iter = iter(ds_mask) batch_masked = jax.tree_map(f=lambda x: x._numpy(), tree=next(ds_mask_iter)["data"]) batch_sampled, batch_sampled_last, batch_sampled_all = inpainting_fn( sample_fn, (sample_rng, state), p_batch_input, (batch * batch_masked), batch_masked ) elif self.training_config.sampling_type == "deblurring": n_rows, n_cols = ds_train.data_config.image_height_size, ds_train.data_config.image_width_size batch_masked = filter_mask(batch.reshape(-1, b, n_rows, n_cols, c).shape, radius=10) batch_freq = jnp.fft.fftshift( jnp.fft.fft2(batch.reshape(-1, b, n_rows, n_cols, c), axes=(2, 3)), axes=(2, 3), ) batch_freq = batch_freq * batch_masked batch_blurred = jnp.real(jnp.fft.ifft2(jnp.fft.ifftshift(batch_freq, axes=(2, 3)), axes=(2, 3))) batch_blurred = batch_blurred.reshape(-1, b, g, c) batch_masked = batch_masked.reshape(-1, b, g, c) batch_sampled, batch_sampled_last, batch_sampled_all = inpainting_fn( sample_fn, (sample_rng, state), p_batch_input, batch_blurred, batch_masked ) if jax.host_id() == 0 and self.logging.use_wandb:
# import imageio # import imageio pylogger = logging.getLogger(__name__) class Trainer(abc.ABC): """Class for training a model.""" def __init__( self, mode: str, model_name: str, training_config: DictConfig, optimizer, evaluation_config: DictConfig, trainer_logging: DictConfig, sampler: Sampler, loss_obj: Loss, ) -> None: """Initialize a Trainer instance with configurations and core components. Args: mode (str): Specifies the mode of the trainer which can be either "train" or "eval". model_name (str): The name identifier for the model. training_config (DictConfig): A configuration dictionary for training settings. optimizer: The optimizer instance used for training. evaluation_config (DictConfig): A configuration dictionary for evaluation settings. trainer_logging (DictConfig): A configuration dictionary for logging settings. sampler (Sampler): A sampler instance for sampling from the model. loss_obj (Loss): A loss object used for computing the loss during training. """ self.mode = mode self.model_name = model_name self.training_config = training_config self.optimizer = hydra.utils.instantiate(optimizer) self.evaluation_config = evaluation_config self.logging = trainer_logging self.sampler = sampler self.loss_obj = loss_obj self.checkpoint_dir = os.path.join(self.training_config.save_dir, self.training_config.checkpoint_dir) self.sample_dir = os.path.join(self.training_config.save_dir, self.training_config.sample_dir) self.eval_dir = os.path.join(self.training_config.save_dir, self.evaluation_config.eval_dir) # Create the directories for saving samples and checkpoints tf.io.gfile.makedirs(self.checkpoint_dir) tf.io.gfile.makedirs(self.sample_dir) tf.io.gfile.makedirs(self.eval_dir) tf.io.gfile.makedirs(os.path.join(self.eval_dir, "clean")) def initialize_wandb( self, dataset_config: DictConfig, sde_config: DictConfig, model_config: DictConfig ) -> Union[Run, RunDisabled, None]: """Initialize wandb if logging is enabled.""" if self.logging.use_wandb: run = wandb.init( name=os.path.basename(self.logging.wandb_init.name), project=self.logging.wandb_init.project, entity=self.logging.wandb_init.entity, save_code=self.logging.wandb_init.save_code, config={ **self.training_config, **dataset_config, **sde_config, **model_config, }, ) else: run = None return run def initialize_run(self, model, ds_train, sde): """Perform all initialization steps required for training.""" run = self.initialize_wandb(ds_train.data_config, sde.sde_config, model.model_config) scaler = get_data_scaler(is_centered=ds_train.data_config.data_centered) inverse_scaler = get_data_inverse_scaler(is_centered=ds_train.data_config.data_centered) rng = jax.random.PRNGKey(seed=self.training_config.seed) rng, step_rng = jax.random.split(rng) batch_input = model.initialize_input( (ds_train.data_config.batch_size, *sde.sde_config.shape, ds_train.data_config.output_size) ) params = jax.jit(model.initialize_model, backend="cpu")(step_rng, batch_input) flat_params = traverse_util.flatten_dict(params).values() tot_params = sum([jnp.size(p) for p in flat_params]) pylogger.info("Total number of parameters: {:.2f}M".format(tot_params / 1e6)) state = TrainState.create( apply_fn=model.apply, params=params, tx=self.optimizer, opt_state_params=self.optimizer.init(params), rng=rng, ema_params=params, ) train_step_fn = construct_train_step(self.optimizer, self.loss_obj.construct_loss_fn(model)) sample_fn = construct_sampling_fn(model, self.sampler) # Resume training when intermediate checkpoints are detected if self.training_config.resume_training: pylogger.warning("Resuming training from the latest checkpoint.") if self.logging.use_wandb and self.model_name != "local": model_file = wandb.use_artifact(self.model_name).download() state = restore_checkpoint(ckpt_dir=model_file, prefix="checkpoint_", target=state) else: state = checkpoints.restore_checkpoint(ckpt_dir=self.checkpoint_dir, target=state) return run, scaler, inverse_scaler, rng, state, train_step_fn, sample_fn, batch_input def train_step( self, train_step_fn: Callable, carry_state: Tuple, batch: jnp.ndarray, batch_input: jnp.ndarray, ) -> Tuple: """Perform a single training step, updating the model parameters. Args: train_step_fn (Callable): The train step function. carry_state (Tuple): The current state of the model and optimizer. batch (jnp.ndarray): The batch of data used for training. batch_input (jnp.ndarray): The input data to the model. Returns: Tuple: The updated state after performing the training step. """ (rng, state) = carry_state ( new_rng, loss, loss_inner, new_params, new_optim_state, batch_reconstructed, batch_corrupted, target, ) = train_step_fn( rng, state.params, state.opt_state_params, state.step, batch_input, batch, ) ema_rate = self.training_config.ema_rate new_params_ema = jax.tree_map( lambda p_ema, p: p_ema * ema_rate + p * (1.0 - ema_rate), state.ema_params, new_params, ) # update the state new_state = state.replace( rng=flax.jax_utils.unreplicate(new_rng), step=state.step + 1, opt_state_params=new_optim_state, params=new_params, ema_params=new_params_ema, ) new_carry_state = (new_rng, new_state) loss = flax.jax_utils.unreplicate(loss) step = int(flax_utils.unreplicate(state.step)) # Log the training progress if jax.host_id() == 0 and step % self.training_config.log_freq == 0: pylogger.info("step: %d, training_loss: %.5e" % (step, loss)) if self.logging.use_wandb: wandb.log({"step": step, "loss": loss}, step=step) if loss_inner is not None: loss_inner = flax.jax_utils.unreplicate(loss_inner) for inner_step, loss in enumerate(loss_inner): pylogger.info("step: %d, training_loss_inner: %.5e" % (step, loss)) if self.logging.use_wandb: wandb.log({"step": step, f"loss inner step {inner_step}": loss}, step=step) return new_carry_state, batch_reconstructed, batch_corrupted, target def save_checkpoint(self, step, run, state): pylogger.info("Saving the model at step %d." % (step,)) # Log the evaluation progress # Save the model parameters ( params, opt_state_params, step_, ema_params, ) = flax_utils.unreplicate( ( state.params, state.opt_state_params, state.step, state.ema_params, ) ) saved_state = state.replace( step=step_, opt_state_params=opt_state_params, params=params, ema_params=ema_params, ) checkpoint_file = checkpoints.save_checkpoint( self.checkpoint_dir, saved_state, step=step_ // self.training_config.eval_freq, keep=np.inf, ) if self.logging.use_wandb: wandb_model_artifact_name = str(step_) + "_" + run.id wandb_model = wandb.Artifact(wandb_model_artifact_name, type="model") wandb_model.add_file(checkpoint_file) run.log_artifact(wandb_model) # noinspection PyProtectedMember def train(self, model: linen.Module, ds_train: BaseDataset, sde: SDE) -> None: """Train the model with optional evaluation and logging. This method encapsulates the entire training process including initialization, training loop, checkpointing, evaluation, and logging. It supports different sampling types like colorization, inpainting, super resolution, and deblurring. Args: model (linen.Module): The model to be trained. ds_train (BaseDataset): The training dataset. sde (SDE): Stochastic differential equation object, governing the dynamics for sampling. Raises: ValueError: If an unsupported dataset type is provided. Note: The method leverages the Weights & Biases (wandb) platform for logging and checkpointing, make sure it's configured properly if logging is enabled. """ run, scaler, inverse_scaler, rng, state, train_step_fn, sample_fn, batch_input = self.initialize_run( model, ds_train, sde ) # `state.step` is JAX integer on the GPU/TPU devices start_step = int(state.step) rng = state.rng # Replicate the train state on all devices ( p_params, p_opt_state_params, p_step, p_ema_params, p_batch_input, ) = flax_utils.replicate( ( state.params, state.opt_state_params, state.step, state.ema_params, batch_input, ) ) # update the TrainState with replicated parameters and optimizer state state = state.replace( params=p_params, opt_state_params=p_opt_state_params, step=p_step, ema_params=p_ema_params, ) if jax.host_id() == 0: pylogger.info("Starting training loop at step %d." % (start_step,)) rng = jax.random.fold_in(rng, jax.host_id()) assert ( self.training_config.log_freq % self.training_config.n_jitted_steps == 0 and self.training_config.eval_freq % self.training_config.n_jitted_steps == 0 ), "Missing logs or checkpoints!" ds_train_iter = iter(ds_train) with tqdm( total=self.training_config.total_steps + 1, initial=start_step, position=0, leave=True, ) as pbar: for step in range( start_step, self.training_config.total_steps + 1, self.training_config.n_jitted_steps, ): # Get the next batch of data and scale it batch = jax.tree_map(f=lambda x: scaler(x._numpy()), tree=next(ds_train_iter)["data"]) if not self.training_config.sampling_only: # Split the random number generator for the current step rng, *next_rng = jax.random.split(key=rng, num=jax.local_device_count() + 1) next_rng = jnp.asarray(next_rng) ((_, state), batch_reconstructed, batch_corrupted, target) = self.train_step( train_step_fn=train_step_fn, carry_state=(next_rng, state), batch=batch, batch_input=p_batch_input, ) if not self.training_config.sampling_only and ( (jax.host_id() == 0 and step % self.training_config.checkpoint_freq == 0 and step != 0) ): self.save_checkpoint(step, run, state) # Evaluate the model if self.training_config.sampling and (step % self.training_config.eval_freq == 0): # if step != 0: if jax.host_id() == 0: pylogger.info("Generating samples at step %d." % (step,)) _, *sample_rng = jax.random.split(rng, jax.local_device_count() + 1) _, b, g, c = batch.shape sample_rng = jnp.asarray(sample_rng) if self.training_config.sampling_type == "full": batch_sampled, batch_sampled_last, batch_sampled_all = sampling_fn( sample_fn, (sample_rng, state), p_batch_input ) elif self.training_config.sampling_type == "colorization": batch_grayscale = to_grayscale(batch) batch_grayscale = batch_grayscale.reshape(-1, b, g, 1) batch_sampled, batch_sampled_last, batch_sampled_all = colorizing_fn( sample_fn, (sample_rng, state), p_batch_input, batch_grayscale ) elif self.training_config.sampling_type == "inpainting": config_object = OmegaConf.create( { "_target_": "functional_diffusion_processes.datasets.mnist_dataset.MNISTDataset", "data_config": { "seed": 42, "batch_size": ds_train.data_config.batch_size, "image_height_size": ds_train.data_config.image_height_size, "image_width_size": ds_train.data_config.image_width_size, "output_size": 1, "random_flip": False, "uniform_dequantization": False, "data_centered": False, "data_dir": "${oc.env:DATA_ROOT}/tensorflow_datasets", "download": True, "is_mask": True, }, "split": "train", "evaluation": False, } ) ds_mask = hydra.utils.instantiate(config_object, _recursive_=False) ds_mask_iter = iter(ds_mask) batch_masked = jax.tree_map(f=lambda x: x._numpy(), tree=next(ds_mask_iter)["data"]) batch_sampled, batch_sampled_last, batch_sampled_all = inpainting_fn( sample_fn, (sample_rng, state), p_batch_input, (batch * batch_masked), batch_masked ) elif self.training_config.sampling_type == "deblurring": n_rows, n_cols = ds_train.data_config.image_height_size, ds_train.data_config.image_width_size batch_masked = filter_mask(batch.reshape(-1, b, n_rows, n_cols, c).shape, radius=10) batch_freq = jnp.fft.fftshift( jnp.fft.fft2(batch.reshape(-1, b, n_rows, n_cols, c), axes=(2, 3)), axes=(2, 3), ) batch_freq = batch_freq * batch_masked batch_blurred = jnp.real(jnp.fft.ifft2(jnp.fft.ifftshift(batch_freq, axes=(2, 3)), axes=(2, 3))) batch_blurred = batch_blurred.reshape(-1, b, g, c) batch_masked = batch_masked.reshape(-1, b, g, c) batch_sampled, batch_sampled_last, batch_sampled_all = inpainting_fn( sample_fn, (sample_rng, state), p_batch_input, batch_blurred, batch_masked ) if jax.host_id() == 0 and self.logging.use_wandb:
if isinstance(ds_train, ImageDataset):
1
2023-10-24 22:01:35+00:00
24k
violet-sto/HN-GFN
main_mobo.py
[ { "identifier": "Dataset", "path": "dataset.py", "snippet": "class Dataset:\n\n def __init__(self, args, bpath, oracle, device):\n self.test_split_rng = np.random.RandomState(142857)\n self.train_rng = np.random.RandomState(int(time.time()))\n self.train_mols = []\n self.test_mols = []\n self.all_mols = []\n self.train_mols_map = {}\n\n self.mdp = MolMDPExtended(bpath)\n self.mdp.post_init(device, args.proxy_repr_type, include_nblocks=args.include_nblocks)\n self.mdp.build_translation_table()\n if args.floatX == 'float64':\n self.mdp.floatX = torch.double\n else:\n self.mdp.floatX = torch.float\n self.mdp._cue_max_blocks = args.max_blocks\n self.max_blocks = args.max_blocks\n self.oracle = oracle\n self._device = device\n self.seen_molecules = set()\n self.stop_event = threading.Event()\n\n self.target_norm = [-8.6, 1.10] # for dockerscore\n\n self.hypervolume = Hypervolume(ref_point=torch.zeros(len(args.objectives)))\n\n def load_h5(self, path, test_ratio=0.1, num_init_examples=None):\n import json\n columns = [\"smiles\", \"dockscore\",\"blockidxs\", \"slices\", \"jbonds\", \"stems\"]\n store = pd.HDFStore(path, 'r')\n df = store.select('df')\n # Pandas has problem with calculating some stuff on float16\n df.dockscore = df.dockscore.astype(\"float64\")\n for cl_mame in columns[2:]:\n df.loc[:, cl_mame] = df[cl_mame].apply(json.loads)\n\n test_idxs = self.test_split_rng.choice(\n len(df), int(test_ratio * len(df)), replace=False)\n\n split_bool = np.zeros(len(df), dtype=np.bool)\n split_bool[test_idxs] = True\n self.scores = []\n self.smis = []\n for i in tqdm(range(len(df))):\n m = BlockMoleculeDataExtended()\n for c in range(1, len(columns)):\n setattr(m, columns[c], df.iloc[i, c - 1])\n m.blocks = [self.mdp.block_mols[i] for i in m.blockidxs]\n if len(m.blocks) > self.max_blocks:\n continue\n m.numblocks = len(m.blocks)\n m.score = self.oracle.get_score([m])\n self.scores.append(m.score)\n self.smis.append(m.smiles)\n self.all_mols.append(m)\n if split_bool[i]: \n self.test_mols.append(m)\n else:\n self.train_mols.append(m)\n if len(self.train_mols)+len(self.test_mols) >= num_init_examples:\n break\n store.close()\n\n print(\"Sampling initial {} molecules from all {} molecules...\".format(\n num_init_examples, len(split_bool)))\n print(len(self.train_mols), 'train mols')\n print(len(self.test_mols), 'test mols')\n\n def r2r(self, dockscore=None, normscore=None):\n if dockscore is not None:\n normscore = 4-(min(0, dockscore) -\n self.target_norm[0])/self.target_norm[1]\n normscore = max(0.1, normscore)\n return (normscore/1) ** 1\n\n def _get(self, i, dset):\n return [(dset[i], dset[i].score)]\n\n def sample(self, n):\n eidx = np.random.randint(0, len(self.train_mols), n)\n samples = sum((self._get(i, self.train_mols) for i in eidx), [])\n\n return zip(*samples)\n\n def sample2batch(self, mb):\n s, r = mb\n s = self.mdp.mols2batch([self.mdp.mol2repr(i) for i in s])\n r = torch.tensor(pd.DataFrame.from_dict(\n r).values, device=self._device).float()\n return (s, r)\n\n def iterset(self, n, mode):\n if mode == 'test':\n dset = self.test_mols\n elif mode == 'train':\n dset = self.train_mols\n\n N = len(dset)\n for i in range(int(np.ceil(N/n))):\n samples = sum((self._get(j, dset)\n for j in range(i*n, min(N, (i+1)*n))), [])\n yield self.sample2batch(zip(*samples))\n\n def add_samples(self, batch):\n picked_mols, scores, picked_smis = batch\n\n for m in picked_mols:\n if np.random.uniform() < (1/10):\n self.test_mols.append(m)\n else:\n self.train_mols.append(m)\n self.all_mols.append(m)\n \n self.scores += scores\n self.smis += [smis[-1] for smis in picked_smis]\n \n self.stop_event.clear()\n\n def compute_hypervolume(self):\n scores = torch.tensor(pd.DataFrame.from_dict(self.scores).values)\n volume = self.hypervolume.compute(scores)\n\n return volume\n \n def start_samplers(self, n, mbsize):\n self.ready_events = [threading.Event() for i in range(n)]\n self.resume_events = [threading.Event() for i in range(n)]\n self.results = [None] * n\n def f(idx):\n while not self.stop_event.is_set():\n try:\n self.results[idx] = self.sample2batch(self.sample(mbsize))\n except Exception as e:\n print(\"Exception while sampling:\")\n print(e)\n self.sampler_threads[idx].failed = True\n self.sampler_threads[idx].exception = e\n self.ready_events[idx].set()\n break\n self.ready_events[idx].set()\n self.resume_events[idx].clear()\n self.resume_events[idx].wait()\n self.sampler_threads = [threading.Thread(target=f, args=(i,)) for i in range(n)]\n [setattr(i, 'failed', False) for i in self.sampler_threads]\n [i.start() for i in self.sampler_threads]\n round_robin_idx = [0]\n def get():\n while True:\n idx = round_robin_idx[0]\n round_robin_idx[0] = (round_robin_idx[0] + 1) % n\n if self.ready_events[idx].is_set():\n r = self.results[idx]\n self.ready_events[idx].clear()\n self.resume_events[idx].set()\n return r\n elif round_robin_idx[0] == 0:\n time.sleep(0.001)\n return get\n\n def stop_samplers_and_join(self):\n self.stop_event.set()\n if hasattr(self, 'sampler_threads'):\n while any([i.is_alive() for i in self.sampler_threads]):\n [i.set() for i in self.resume_events]\n [i.join(0.05) for i in self.sampler_threads]" }, { "identifier": "MolMDPExtended", "path": "mol_mdp_ext.py", "snippet": "class MolMDPExtended(MolMDP):\n\n def build_translation_table(self):\n \"\"\"build a symmetry mapping for blocks. Necessary to compute parent transitions\"\"\"\n self.translation_table = {}\n for blockidx in range(len(self.block_mols)):\n # Blocks have multiple ways of being attached. By default,\n # a new block is attached to the target stem by attaching\n # it's kth atom, where k = block_rs[new_block_idx][0].\n # When computing a reverse action (from a parent), we may\n # wish to attach the new block to a different atom. In\n # the blocks library, there are duplicates of the same\n # block but with block_rs[block][0] set to a different\n # atom. Thus, for the reverse action we have to find out\n # which duplicate this corresponds to.\n\n # Here, we compute, for block blockidx, what is the index\n # of the duplicate block, if someone wants to attach to\n # atom x of the block.\n # So atom_map[x] == bidx, such that block_rs[bidx][0] == x\n atom_map = {}\n for j in range(len(self.block_mols)):\n if self.block_smi[blockidx] == self.block_smi[j]:\n atom_map[self.block_rs[j][0]] = j\n self.translation_table[blockidx] = atom_map\n\n # We're still missing some \"duplicates\", as some might be\n # symmetric versions of each other. For example, block CC with\n # block_rs == [0,1] has no duplicate, because the duplicate\n # with block_rs [1,0] would be a symmetric version (both C\n # atoms are the \"same\").\n\n # To test this, let's create nonsense molecules by attaching\n # duplicate blocks to a Gold atom, and testing whether they\n # are the same.\n gold = Chem.MolFromSmiles('[Au]')\n # If we find that two molecules are the same when attaching\n # them with two different atoms, then that means the atom\n # numbers are symmetries. We can add those to the table.\n for blockidx in range(len(self.block_mols)):\n for j in self.block_rs[blockidx]:\n if j not in self.translation_table[blockidx]:\n symmetric_duplicate = None\n for atom, block_duplicate in self.translation_table[blockidx].items():\n molA, _ = chem.mol_from_frag(\n jun_bonds=[[0,1,0,j]],\n frags=[gold, self.block_mols[blockidx]])\n molB, _ = chem.mol_from_frag(\n jun_bonds=[[0,1,0,atom]],\n frags=[gold, self.block_mols[blockidx]])\n if (Chem.MolToSmiles(molA) == Chem.MolToSmiles(molB) or\n molA.HasSubstructMatch(molB)):\n symmetric_duplicate = block_duplicate\n break\n if symmetric_duplicate is None:\n raise ValueError('block', blockidx, self.block_smi[blockidx],\n 'has no duplicate for atom', j,\n 'in position 0, and no symmetrical correspondance')\n self.translation_table[blockidx][j] = symmetric_duplicate\n #print('block', blockidx, '+ atom', j,\n # 'in position 0 is a symmetric duplicate of',\n # symmetric_duplicate)\n\n def parents(self, mol=None):\n \"\"\"returns all the possible parents of molecule mol (or the current\n molecule if mol is None.\n\n Returns a list of (BlockMoleculeDataExtended, (block_idx, stem_idx)) pairs such that\n for a pair (m, (b, s)), MolMDPExtended.add_block_to(m, b, s) == mol.\n \"\"\"\n if len(mol.blockidxs) == 1:\n # If there's just a single block, then the only parent is\n # the empty block with the action that recreates that block\n return [(BlockMoleculeDataExtended(), (mol.blockidxs[0], 0))]\n\n # Compute the how many blocks each block is connected to\n blocks_degree = defaultdict(int)\n for a,b,_,_ in mol.jbonds:\n blocks_degree[a] += 1\n blocks_degree[b] += 1\n # Keep only blocks of degree 1 (those are the ones that could\n # have just been added)\n blocks_degree_1 = [i for i, d in blocks_degree.items() if d == 1]\n # Form new molecules without these blocks\n parent_mols = []\n\n for rblockidx in blocks_degree_1:\n new_mol = mol.copy()\n # find which bond we're removing\n removed_bonds = [(jbidx, bond) for jbidx, bond in enumerate(new_mol.jbonds)\n if rblockidx in bond[:2]]\n assert len(removed_bonds) == 1\n rjbidx, rbond = removed_bonds[0]\n # Pop the bond\n new_mol.jbonds.pop(rjbidx)\n # Remove the block\n mask = np.ones(len(new_mol.blockidxs), dtype=np.bool)\n mask[rblockidx] = 0\n reindex = new_mol.delete_blocks(mask)\n # reindex maps old blockidx to new blockidx, since the\n # block the removed block was attached to might have its\n # index shifted by 1.\n\n # Compute which stem the bond was using\n stem = ([reindex[rbond[0]], rbond[2]] if rblockidx == rbond[1] else\n [reindex[rbond[1]], rbond[3]])\n # and add it back\n new_mol.stems = [list(i) for i in new_mol.stems] + [stem]\n #new_mol.stems.append(stem)\n # and we have a parent. The stem idx to recreate mol is\n # the last stem, since we appended `stem` in the back of\n # the stem list.\n # We also have to translate the block id to match the bond\n # we broke, see build_translation_table().\n removed_stem_atom = (\n rbond[3] if rblockidx == rbond[1] else rbond[2])\n blockid = mol.blockidxs[rblockidx]\n if removed_stem_atom not in self.translation_table[blockid]:\n raise ValueError('Could not translate removed stem to duplicate or symmetric block.')\n parent_mols.append([new_mol,\n # action = (block_idx, stem_idx)\n (self.translation_table[blockid][removed_stem_atom],\n len(new_mol.stems) - 1)])\n if not len(parent_mols):\n raise ValueError('Could not find any parents')\n return parent_mols\n\n\n def add_block_to(self, mol, block_idx, stem_idx=None, atmidx=None):\n '''out-of-place version of add_block'''\n #assert (block_idx >= 0) and (block_idx <= len(self.block_mols)), \"unknown block\"\n if mol.numblocks == 0:\n stem_idx = None\n new_mol = mol.copy()\n new_mol.add_block(block_idx,\n block=self.block_mols[block_idx],\n block_r=self.block_rs[block_idx],\n stem_idx=stem_idx, atmidx=atmidx)\n return new_mol\n\n def remove_jbond_from(self, mol, jbond_idx=None, atmidx=None):\n new_mol = mol.copy()\n new_mol.remove_jbond(jbond_idx, atmidx)\n return new_mol\n\n def a2mol(self, acts):\n mol = BlockMoleculeDataExtended()\n for i in acts:\n if i[0] >= 0:\n mol = self.add_block_to(mol, *i)\n return mol\n\n def reset(self):\n self.molecule = BlockMoleculeDataExtended()\n return None\n\n\n def post_init(self, device, repr_type, include_bonds=False, include_nblocks=False):\n self.device = device\n self.repr_type = repr_type\n #self.max_bond_atmidx = max([max(i) for i in self.block_rs])\n self.max_num_atm = max(self.block_natm)\n # see model_block.mol2graph\n self.true_block_set = sorted(set(self.block_smi))\n self.stem_type_offset = np.int32([0] + list(np.cumsum([\n max(self.block_rs[self.block_smi.index(i)])+1 for i in self.true_block_set])))\n self.num_stem_types = self.stem_type_offset[-1]\n self.true_blockidx = [self.true_block_set.index(i) for i in self.block_smi]\n self.num_true_blocks = len(self.true_block_set)\n self.include_nblocks = include_nblocks\n self.include_bonds = include_bonds\n #print(self.max_num_atm, self.num_stem_types)\n self.molcache = {}\n\n def mols2batch(self, mols):\n if self.repr_type == 'block_graph':\n return model_block.mols2batch(mols, self)\n elif self.repr_type == 'atom_graph':\n return model_atom.mols2batch(mols, self)\n elif self.repr_type == 'morgan_fingerprint':\n return model_fingerprint.mols2batch(mols, self)\n\n def mol2repr(self, mol=None):\n if mol is None:\n mol = self.molecule\n #molhash = str(mol.blockidxs)+':'+str(mol.stems)+':'+str(mol.jbonds)\n #if molhash in self.molcache:\n # return self.molcache[molhash]\n if self.repr_type == 'block_graph':\n r = model_block.mol2graph(mol, self, self.floatX)\n elif self.repr_type == 'atom_graph':\n r = model_atom.mol2graph(mol, self, self.floatX,\n bonds=self.include_bonds,\n nblocks=self.include_nblocks)\n elif self.repr_type == 'morgan_fingerprint':\n r = model_fingerprint.mol2fp(mol, self, self.floatX)\n #self.molcache[molhash] = r\n return r\n\n def get_nx_graph(self, mol: BlockMoleculeData, true_block=False):\n true_blockidx = self.true_blockidx\n\n G = nx.DiGraph()\n blockidxs = [true_blockidx[xx] for xx in mol.blockidxs] if true_block else mol.blockidxs\n\n G.add_nodes_from([(ix, {\"block\": blockidxs[ix]}) for ix in range(len(blockidxs))])\n\n if len(mol.jbonds) > 0:\n edges = []\n for jbond in mol.jbonds:\n edges.append((jbond[0], jbond[1],\n {\"bond\": [jbond[2], jbond[3]]}))\n edges.append((jbond[1], jbond[0],\n {\"bond\": [jbond[3], jbond[2]]}))\n G.add_edges_from(edges)\n return G\n\n def graphs_are_isomorphic(self, g1, g2):\n return nx.algorithms.is_isomorphic(g1, g2, node_match=node_match, edge_match=edge_match)" }, { "identifier": "BlockMoleculeDataExtended", "path": "mol_mdp_ext.py", "snippet": "class BlockMoleculeDataExtended(BlockMoleculeData):\n\n @property\n def mol(self):\n return chem.mol_from_frag(jun_bonds=self.jbonds, frags=self.blocks)[0]\n\n @property\n def smiles(self):\n return Chem.MolToSmiles(self.mol)\n\n def copy(self): # shallow copy\n o = BlockMoleculeDataExtended()\n o.blockidxs = list(self.blockidxs)\n o.blocks = list(self.blocks)\n o.slices = list(self.slices)\n o.numblocks = self.numblocks\n o.jbonds = list(self.jbonds)\n o.stems = list(self.stems)\n return o\n\n def as_dict(self):\n return {'blockidxs': self.blockidxs,\n 'slices': self.slices,\n 'numblocks': self.numblocks,\n 'jbonds': self.jbonds,\n 'stems': self.stems}" }, { "identifier": "Oracle", "path": "oracle/oracle.py", "snippet": "class Oracle():\n def __init__(self, args, mols_ref=None):\n '''\n @params:\n args (dict): argsurations\n '''\n self.objectives = args.objectives\n self.fps_ref = [AllChem.GetMorganFingerprintAsBitVect(x, 3, 2048) \n for x in mols_ref] if mols_ref else None\n self.device = torch.device(args.device)\n\n def batch_get_scores(self, mols):\n '''\n @params:\n mols: molecules to estimate score\n @return:\n dicts (list): list of score dictionaries\n '''\n dicts = [{} for _ in mols]\n for obj in self.objectives:\n scores = get_scores(obj, mols, device=self.device)\n for i, mol in enumerate(mols):\n dicts[i][obj] = scores[i]\n return dicts\n \n def get_score(self, mol):\n scores = {}\n for obj in self.objectives:\n score = get_scores(obj, mol, device=self.device)\n scores[obj] = score[0]\n \n return scores" }, { "identifier": "get_proxy", "path": "proxy/proxy.py", "snippet": "def get_proxy(args, bpath, oracle):\n if args.acq_fn.lower() == 'none':\n return NoAF(args, bpath, oracle)\n\n elif args.acq_fn.lower() == 'ucb':\n return UCB(args, bpath, oracle)\n \n elif args.acq_fn.lower() == 'ucb_chebyshev':\n return UCB_chebyshev(args, bpath, oracle)\n\n elif args.acq_fn.lower() == 'ei':\n return EI(args, bpath, oracle)" }, { "identifier": "FMGFlowNet", "path": "generator/gfn.py", "snippet": "class FMGFlowNet(nn.Module):\n def __init__(self, args, bpath):\n super().__init__()\n self.args = args\n mdp = MolMDPExtended(bpath)\n mdp.post_init(args.device, args.repr_type,\n include_nblocks=args.include_nblocks)\n mdp.build_translation_table()\n self.model = make_model(args, mdp, is_proxy=False)\n self.opt = torch.optim.Adam(self.model.parameters(\n ), args.learning_rate, weight_decay=args.weight_decay)\n\n self.loginf = 1000 # to prevent nans\n self.log_reg_c = args.log_reg_c\n self.balanced_loss = args.balanced_loss\n self.do_nblocks_reg = False\n self.max_blocks = args.max_blocks\n self.leaf_coef = args.leaf_coef\n self.clip_grad = args.clip_grad\n # self.score_criterion = nn.MSELoss(reduction='none')\n self.score_criterion = nn.MSELoss()\n\n def forward(self, graph_data, vec_data=None, do_stems=True):\n return self.model(graph_data, vec_data, do_stems)\n\n def train_step(self, p, pb, a, pw, w, r, s, d, mols, i):\n loss, term_loss, flow_loss = self.FMLoss(p, pb, a, pw, w, r, s, d)\n\n self.opt.zero_grad()\n loss.backward()\n if self.clip_grad > 0:\n torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.clip_grad)\n self.opt.step()\n self.model.training_steps = i+1\n \n return (loss.item(), term_loss.item(), flow_loss.item())\n\n def FMLoss(self, p, pb, a, pw, w, r, s, d):\n # Since we sampled 'mbsize' trajectories, we're going to get\n # roughly mbsize * H (H is variable) transitions\n ntransitions = r.shape[0]\n # state outputs\n stem_out_s, mol_out_s = self.model(s, w) # log(F)\n # parents of the state outputs\n stem_out_p, mol_out_p = self.model(p, pw)\n # index parents by their corresponding actions\n qsa_p = self.model.index_output_by_action(\n p, stem_out_p, mol_out_p[:, 0], a)\n # then sum the parents' contribution, this is the inflow\n exp_inflow = (torch.zeros((ntransitions,), device=qsa_p.device, dtype=qsa_p.dtype)\n .index_add_(0, pb, torch.exp(qsa_p))) # pb is the parents' batch index\n inflow = torch.log(exp_inflow + self.log_reg_c)\n # sum the state's Q(s,a), this is the outflow\n exp_outflow = self.model.sum_output(s, torch.exp(\n stem_out_s), torch.exp(mol_out_s[:, 0]))\n # include reward and done multiplier, then take the log\n # we're guarenteed that r > 0 iff d = 1, so the log always works\n outflow_plus_r = torch.log(self.log_reg_c + r + exp_outflow * (1-d))\n if self.do_nblocks_reg:\n losses = _losses = ((inflow - outflow_plus_r) /\n (s.nblocks * self.max_blocks)).pow(2)\n else:\n losses = _losses = (inflow - outflow_plus_r).pow(2)\n\n term_loss = (losses * d).sum() / (d.sum() + 1e-20) # terminal nodes\n flow_loss = (losses * (1-d)).sum() / \\\n ((1-d).sum() + 1e-20) # non-terminal nodes\n \n if self.balanced_loss:\n loss = term_loss * self.leaf_coef + flow_loss\n else:\n loss = losses.mean()\n\n return loss, term_loss, flow_loss" }, { "identifier": "TBGFlowNet", "path": "generator/gfn.py", "snippet": "class TBGFlowNet(nn.Module):\n def __init__(self, args, bpath):\n super().__init__()\n self.args = args\n self.mdp = MolMDPExtended(bpath)\n self.mdp.post_init(args.device, args.repr_type,\n include_nblocks=args.include_nblocks)\n self.mdp.build_translation_table()\n self.model = make_model(args, self.mdp, is_proxy=False)\n self.Z = nn.Sequential(nn.Linear(len(args.objectives), args.nemb//2), nn.LeakyReLU(),\n nn.Linear(args.nemb//2, 1))\n self.Z.to(args.device)\n self.opt = torch.optim.Adam(self.model.parameters(), args.learning_rate, weight_decay=args.weight_decay)\n self.opt_Z = torch.optim.Adam(self.Z.parameters(), args.Z_learning_rate, weight_decay=args.weight_decay)\n\n def forward(self, graph_data, vec_data=None, do_stems=True):\n return self.model(graph_data, vec_data, do_stems)\n\n def train_step(self, p, pb, a, pw, w, r, s, d, mols, i):\n loss = self.TBLoss(p, a, w, r, d, mols)\n self.opt.zero_grad()\n self.opt_Z.zero_grad()\n loss.backward()\n if self.args.clip_grad > 0:\n torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.args.clip_grad)\n self.opt.step()\n self.opt_Z.step()\n\n return (loss.item(),)\n\n @property\n def Z(self):\n return self.model.Z\n\n def TBLoss(self, p, a, w, r, d, mols):\n # logit\n stem_out_p, mol_out_p = self.model(p, w)\n # index parents by their corresponding actions\n logits = -self.model.action_negloglikelihood(\n p, a, stem_out_p, mol_out_p)\n\n b = torch.cat([torch.tensor([0], device=logits.device),\n torch.cumsum(d.long(), 0)[:-1]], dim=0)\n n = torch.tensor([len(self.mdp.parents(mol)) if a[idx, 0].item() != -1 else 1.\n for idx, mol in enumerate(mols[1])], device=logits.device)\n # n = torch.tensor([len(self.mdp.parents(mol)) for mol in mols[1]], device=logits.device)\n forward_ll = scatter(logits, b, reduce='sum')\n backward_ll = scatter(torch.log(1/n), b, reduce='sum')\n\n losses = ((self.Z(w[d==1.]) + forward_ll) - (torch.log(r[d == 1.]) + backward_ll)).pow(2) \n loss = losses.mean()\n\n return loss" }, { "identifier": "MOReinforce", "path": "generator/gfn.py", "snippet": "class MOReinforce(TBGFlowNet):\n def TBLoss(self, p, a, w, r, d, mols):\n # logit\n stem_out_p, mol_out_p = self.model(p, w)\n # index parents by their corresponding actions\n logits = -self.model.action_negloglikelihood(\n p, a, stem_out_p, mol_out_p)\n\n b = torch.cat([torch.tensor([0], device=logits.device),\n torch.cumsum(d.long(), 0)[:-1]], dim=0)\n n = torch.tensor([len(self.mdp.parents(mol)) if a[idx, 0].item() != -1 else 1.\n for idx, mol in enumerate(mols[1])], device=logits.device)\n # n = torch.tensor([len(self.mdp.parents(mol)) for mol in mols[1]], device=logits.device)\n forward_ll = scatter(logits, b, reduce='sum')\n\n rewards = r[d == 1.]\n losses = forward_ll * (-rewards - (-1) * rewards.mean())\n loss = losses.mean()\n\n return loss" }, { "identifier": "set_random_seed", "path": "utils/utils.py", "snippet": "def set_random_seed(seed, deterministic=True):\n \"\"\"Set random seed.\"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False" }, { "identifier": "compute_success", "path": "utils/metrics.py", "snippet": "def compute_success(mols, scores, objectives, score_succ):\n print(\"Computing successful rate...\")\n positive_mols = []\n success_dict = {k: 0. for k in objectives}\n\n for mol, score in zip(mols, scores):\n all_success = True\n for k, v in score.items():\n if v >= score_succ[k]:\n success_dict[k] += 1\n else:\n all_success = False\n if all_success:\n positive_mols.append(mol)\n\n success = 1.*len(positive_mols)/len(mols)\n\n return success, positive_mols" }, { "identifier": "compute_diversity", "path": "utils/metrics.py", "snippet": "def compute_diversity(mols):\n print(\"Computing diversity...\")\n\n if len(mols) == 0:\n return 0\n\n sims = []\n fps = [AllChem.GetMorganFingerprintAsBitVect(x.mol, 3, 2048) for x in mols]\n for i in range(len(fps)):\n sims += DataStructs.BulkTanimotoSimilarity(fps[i], fps[:i])\n\n return 1 - np.mean(sims)" }, { "identifier": "compute_novelty", "path": "utils/metrics.py", "snippet": "def compute_novelty(mols, ref_mols):\n print(\"Computing novelty...\")\n positive_fps = [AllChem.GetMorganFingerprintAsBitVect(\n x.mol, 3, 2048) for x in mols]\n ref_fps = [AllChem.GetMorganFingerprintAsBitVect(\n x, 3, 2048) for x in ref_mols]\n\n n_sim = 0.\n for i in range(len(positive_fps)):\n sims = DataStructs.BulkTanimotoSimilarity(positive_fps[i], ref_fps)\n if max(sims) >= 0.4:\n n_sim += 1\n novelty = 1. - 1. * n_sim / (len(positive_fps)+1e-6)\n\n return novelty" }, { "identifier": "compute_correlation", "path": "utils/metrics.py", "snippet": "def compute_correlation(args, model, rollout_worker, test_mols):\n\n mdp = rollout_worker.mdp\n device = args.device\n def tf(x): return torch.tensor(x, device=device).to(torch.float)\n def tint(x): return torch.tensor(x, device=device).long()\n\n # test_mols = pickle.load(gzip.open('data/some_mols_U_1k.pkl.gz'))\n logsoftmax = nn.LogSoftmax(0)\n corrs = []\n numblocks = []\n\n start_time = time.time()\n if args.n_objectives == 3:\n test_weights = rollout_worker.test_weights[::2]\n elif args.n_objectives == 4:\n test_weights = rollout_worker.test_weights[1:-2:4]\n else:\n test_weights = rollout_worker.test_weights\n \n for weights in test_weights:\n print(\"Computing correlation w.r.t test weights {}\".format(weights))\n weights = torch.tensor(weights).to(args.device)\n logp = []\n rewards = []\n for m in tqdm(test_mols):\n try:\n agraph = get_mol_path_graph(m, mdp)\n except:\n continue\n # rewards.append(np.log(moli[0][0]))\n reward = rollout_worker._get_reward(m, weights)[0].item()\n rewards.append(np.log(reward))\n s = mdp.mols2batch([mdp.mol2repr(agraph.nodes[i]['mol'])\n for i in agraph.nodes])\n numblocks.append(len(m.blocks))\n with torch.no_grad():\n # get the mols_out_s for ALL molecules not just the end one.\n if args.condition_type == 'Hyper_scorepred':\n stem_out_s, mol_out_s, _ = model(\n s, weights.repeat(s.num_graphs, 1))\n else:\n stem_out_s, mol_out_s = model(\n s, weights.repeat(s.num_graphs, 1))\n per_mol_out = []\n # Compute pi(a|s)\n for j in range(len(agraph.nodes)):\n a, b = s._slice_dict['stems'][j:j+2]\n\n stop_allowed = len(\n agraph.nodes[j]['mol'].blocks) >= args.min_blocks\n mp = logsoftmax(torch.cat([\n stem_out_s[a:b].reshape(-1),\n # If num_blocks < min_blocks, the model is not allowed to stop\n mol_out_s[j, :1] if stop_allowed else tf([-1000])]))\n per_mol_out.append(\n (mp[:-1].reshape((-1, stem_out_s.shape[1])), mp[-1]))\n\n # When the model reaches 8 blocks, it is stopped automatically. If instead it stops before\n # that, we need to take into account the STOP action's logprob\n if len(m.blocks) < 8:\n if args.condition_type == 'Hyper_scorepred':\n stem_out_last, mol_out_last, _ = model(\n mdp.mols2batch([mdp.mol2repr(m)]), weights.unsqueeze(0))\n else:\n stem_out_last, mol_out_last = model(\n mdp.mols2batch([mdp.mol2repr(m)]), weights.unsqueeze(0)) \n mplast = logsoftmax(\n torch.cat([stem_out_last.reshape(-1), mol_out_last[0, :1]]))\n MSTOP = mplast[-1]\n\n # assign logprob to edges\n for u, v in agraph.edges:\n a = agraph.edges[u, v]['action']\n if a[0] == -1:\n agraph.edges[u, v]['logprob'] = per_mol_out[v][1]\n else:\n agraph.edges[u,\n v]['logprob'] = per_mol_out[v][0][a[1], a[0]]\n\n # propagate logprobs through the graph\n for n in list(nx.topological_sort(agraph))[::-1]:\n for c in agraph.predecessors(n):\n if len(m.blocks) < 8 and c == 0:\n agraph.nodes[c]['logprob'] = torch.logaddexp(\n agraph.nodes[c].get('logprob', tf(-1000)),\n agraph.edges[c, n]['logprob'] + agraph.nodes[n].get('logprob', 0) + MSTOP)\n else:\n agraph.nodes[c]['logprob'] = torch.logaddexp(\n agraph.nodes[c].get('logprob', tf(-1000)),\n agraph.edges[c, n]['logprob'] + agraph.nodes[n].get('logprob', 0))\n\n # add the first item\n # logp.append((moli, agraph.nodes[n]['logprob'].item()))\n logp.append(agraph.nodes[n]['logprob'].item())\n corrs.append(stats.spearmanr(rewards, logp).correlation)\n\n print('Spearmanr: {}, mean: {}, Time: {}'.format(corrs, np.mean(corrs), time.time()-start_time))\n return corrs" }, { "identifier": "circle_points", "path": "utils/metrics.py", "snippet": "def circle_points(K, min_angle=None, max_angle=None):\n # generate evenly distributed preference vector\n ang0 = 1e-6 if min_angle is None else min_angle\n ang1 = np.pi / 2 - ang0 if max_angle is None else max_angle\n angles = np.linspace(ang0, ang1, K, endpoint=True)\n x = np.cos(angles)\n y = np.sin(angles)\n weights = np.c_[x, y]\n normalized_weights = weights/weights.sum(1, keepdims=True)\n\n return normalized_weights.astype(np.float32)" }, { "identifier": "get_logger", "path": "utils/logging.py", "snippet": "def get_logger(args):\n if args.enable_tensorboard:\n return TensorboardLogger(args)\n else:\n return Logger(args)" }, { "identifier": "RolloutWorker", "path": "main.py", "snippet": "class RolloutWorker:\n def __init__(self, args, bpath, proxy, device):\n self.args = args\n self.test_split_rng = np.random.RandomState(142857)\n self.train_rng = np.random.RandomState(int(time.time()))\n self.mdp = MolMDPExtended(bpath)\n self.mdp.post_init(device, args.repr_type,\n include_nblocks=args.include_nblocks)\n self.mdp.build_translation_table()\n if args.floatX == 'float64':\n self.mdp.floatX = self.floatX = torch.double\n else:\n self.mdp.floatX = self.floatX = torch.float\n self.proxy = proxy\n self._device = device\n self.seen_molecules = set()\n self.stop_event = threading.Event()\n #######\n # This is the \"result\", here a list of (reward, BlockMolDataExt, info...) tuples\n self.sampled_mols = []\n self.online_mols = []\n self.hindsight_mols = []\n self.max_online_mols = 1000\n self.max_hindsight_mols = 1000\n\n self.min_blocks = args.min_blocks\n self.max_blocks = args.max_blocks\n self.mdp._cue_max_blocks = self.max_blocks\n self.reward_exp = args.reward_exp\n self.reward_min = args.reward_min\n self.reward_norm = args.reward_norm\n self.reward_exp_ramping = args.reward_exp_ramping\n self.random_action_prob = args.random_action_prob\n\n # If True this basically implements Buesing et al's TreeSample Q,\n # samples uniformly from it though, no MTCS involved\n if args.criterion == 'TB' or args.criterion == \"Reinforce\":\n self.ignore_parents = True\n elif args.criterion == 'FM':\n self.ignore_parents = False\n\n def rollout(self, generator, use_rand_policy=True, weights=None, replay=False):\n weights = Dirichlet(torch.ones(len(self.args.objectives))*self.args.alpha).sample_n(1).to(\n self.args.device) if weights is None else weights\n\n m = BlockMoleculeDataExtended()\n samples = []\n max_blocks = self.max_blocks\n trajectory_stats = []\n for t in range(max_blocks):\n s = self.mdp.mols2batch([self.mdp.mol2repr(m)])\n s_o, m_o = generator(s, vec_data=weights, do_stems=True)\n # fix from run 330 onwards\n if t < self.min_blocks:\n m_o = m_o*0 - 1000 # prevent assigning prob to stop\n # when we can't stop\n ##\n logits = torch.cat([m_o.reshape(-1), s_o.reshape(-1)])\n cat = torch.distributions.Categorical(\n logits=logits) \n action = cat.sample().item()\n\n if use_rand_policy and self.random_action_prob > 0: # just for training\n if self.train_rng.uniform() < self.random_action_prob:\n action = self.train_rng.randint(\n int(t < self.min_blocks), logits.shape[0])\n\n q = torch.cat([m_o.reshape(-1), s_o.reshape(-1)])\n trajectory_stats.append(\n (q[action].item(), action, torch.logsumexp(q, 0).item()))\n\n if t >= self.min_blocks and action == 0:\n r, raw_r = self._get_reward(m, weights) # r: reward, raw_r: scores for the objectives\n samples.append(((m,), ((-1, 0),), weights, weights, r, m, 1))\n break\n else:\n action = max(0, action-1)\n action = (action % self.mdp.num_blocks,\n action // self.mdp.num_blocks)\n m_old = m\n m = self.mdp.add_block_to(m, *action)\n if len(m.blocks) and not len(m.stems) or t == max_blocks - 1:\n # can't add anything more to this mol so let's make it\n # terminal. Note that this node's parent isn't just m,\n # because this is a sink for all parent transitions\n r, raw_r = self._get_reward(m, weights)\n if self.ignore_parents:\n samples.append(\n ((m_old,), (action,), weights, weights, r, m, 1))\n else:\n parents, actions = zip(*self.mdp.parents(m))\n samples.append((parents, actions, weights.repeat(\n len(parents), 1), weights, r, m, 1))\n break\n else:\n if self.ignore_parents:\n samples.append(\n ((m_old,), (action,), weights, weights, 0, m, 0))\n else:\n parents, actions = zip(*self.mdp.parents(m))\n samples.append(\n (parents, actions, weights.repeat(len(parents), 1), weights, 0, m, 0))\n\n p = self.mdp.mols2batch([self.mdp.mol2repr(i) for i in samples[-1][0]])\n qp = generator(p, weights.repeat(p.num_graphs, 1))\n qsa_p = generator.model.index_output_by_action(\n p, qp[0], qp[1][:, 0],\n torch.tensor(samples[-1][1], device=self._device).long())\n inflow = torch.logsumexp(qsa_p.flatten(), 0).item()\n self.sampled_mols.append(\n ([i.cpu().numpy() for i in raw_r], weights.cpu().numpy(), m, trajectory_stats, inflow))\n\n if replay and self.args.hindsight_prob > 0.0:\n self._add_mol_to_replay(m)\n\n return samples\n\n def _get_reward(self, m, weights=None):\n rdmol = m.mol\n if rdmol is None:\n return self.reward_min\n \n # get scores from oracle\n score = self.proxy.get_score([m])\n score = torch.tensor(list(score.values())).to(self.args.device)\n \n if self.args.scalar == 'WeightedSum':\n raw_reward = (weights*score).sum()\n \n elif self.args.scalar == 'Tchebycheff':\n raw_reward = (weights*score).min() + 0.1 * (weights*score).sum()\n \n reward = self.l2r(raw_reward.clip(self.reward_min))\n return reward, (raw_reward, score)\n\n def execute_train_episode_batch(self, generator, dataset=None, use_rand_policy=True):\n if self.args.condition_type is None:\n weights = self.test_weights # train specific model\n else:\n weights = Dirichlet(torch.tensor(self.args.alpha_vector)*self.args.alpha).sample_n(1).to(self.args.device) #* sample weights per batch, seem better\n samples = sum((self.rollout(generator, use_rand_policy, weights)\n for i in range(self.args.trajectories_mbsize)), [])\n\n return zip(*samples)\n\n def sample2batch(self, mb):\n p, a, p_weights, weights, r, s, d, *o = mb\n mols = (p, s)\n # The batch index of each parent\n p_batch = torch.tensor(sum([[i]*len(p) for i, p in enumerate(p)], []),\n device=self._device).long()\n # Convert all parents and states to repr. Note that this\n # concatenates all the parent lists, which is why we need\n # p_batch\n p = self.mdp.mols2batch(list(map(self.mdp.mol2repr, sum(p, ()))))\n s = self.mdp.mols2batch([self.mdp.mol2repr(i) for i in s])\n # Concatenate all the actions (one per parent per sample)\n a = torch.tensor(sum(a, ()), device=self._device).long()\n # rewards and dones\n r = torch.tensor(r, device=self._device).to(self.floatX)\n d = torch.tensor(d, device=self._device).to(self.floatX)\n # weights\n p_w = torch.cat(p_weights, 0)\n w = torch.cat(weights, 0)\n return (p, p_batch, a, p_w, w, r, s, d, mols, *o)\n\n def l2r(self, raw_reward, t=0):\n if self.reward_exp_ramping > 0:\n reward_exp = 1 + (self.reward_exp - 1) * \\\n (1 - 1/(1 + t / self.reward_exp_ramping))\n # when t=0, exp = 1; t->∞, exp = self.reward_exp\n else:\n reward_exp = self.reward_exp\n\n reward = (raw_reward/self.reward_norm)**reward_exp\n\n return reward\n\n def start_samplers(self, generator, n, dataset):\n self.ready_events = [threading.Event() for i in range(n)]\n self.resume_events = [threading.Event() for i in range(n)]\n self.results = [None] * n\n\n def f(idx):\n while not self.stop_event.is_set():\n try:\n self.results[idx] = self.sample2batch(\n self.execute_train_episode_batch(generator, dataset, use_rand_policy=True))\n except Exception as e:\n print(\"Exception while sampling:\")\n print(e)\n self.sampler_threads[idx].failed = True\n self.sampler_threads[idx].exception = e\n self.ready_events[idx].set()\n break\n self.ready_events[idx].set()\n self.resume_events[idx].clear()\n self.resume_events[idx].wait()\n\n self.sampler_threads = [threading.Thread(\n target=f, args=(i,)) for i in range(n)]\n [setattr(i, 'failed', False) for i in self.sampler_threads]\n [i.start() for i in self.sampler_threads]\n round_robin_idx = [0]\n\n def get():\n while True:\n idx = round_robin_idx[0]\n round_robin_idx[0] = (round_robin_idx[0] + 1) % n\n if self.ready_events[idx].is_set():\n r = self.results[idx]\n self.ready_events[idx].clear()\n self.resume_events[idx].set()\n return r\n elif round_robin_idx[0] == 0:\n time.sleep(0.001)\n return get\n\n def stop_samplers_and_join(self):\n self.stop_event.set()\n if hasattr(self, 'sampler_threads'):\n while any([i.is_alive() for i in self.sampler_threads]):\n [i.set() for i in self.resume_events]\n [i.join(0.05) for i in self.sampler_threads]" }, { "identifier": "get_test_mols", "path": "main.py", "snippet": "def get_test_mols(args, mdp, num):\n samples = []\n fps = []\n early_stops = []\n while len(samples) < num:\n if len(samples) % 5000 == 0:\n print(f'{len(samples)}/{num} mols have been sampled')\n m = BlockMoleculeDataExtended()\n min_blocks = args.min_blocks\n max_blocks = args.max_blocks\n early_stop_at = np.random.randint(min_blocks, max_blocks + 1)\n early_stops.append(early_stop_at)\n for t in range(max_blocks):\n if t == 0:\n length = mdp.num_blocks+1\n else:\n length = len(m.stems)*mdp.num_blocks+1\n\n action = np.random.randint(1, length)\n\n if t == early_stop_at:\n action = 0\n\n if t >= min_blocks and action == 0:\n fp = AllChem.GetMorganFingerprintAsBitVect(m.mol, 3, 2048)\n if len(samples)==0:\n samples.append(m)\n fps.append(fp)\n else:\n sims = DataStructs.BulkTanimotoSimilarity(fp, fps)\n if max(sims) < 0.7:\n samples.append(m)\n fps.append(fp)\n break\n else:\n action = max(0, action-1)\n action = (action % mdp.num_blocks, action // mdp.num_blocks)\n #print('..', action)\n m = mdp.add_block_to(m, *action)\n if len(m.blocks) and not len(m.stems) or t == max_blocks - 1:\n # can't add anything more to this mol so let's make it\n # terminal. Note that this node's parent isn't just m,\n # because this is a sink for all parent transitions\n fp = AllChem.GetMorganFingerprintAsBitVect(m.mol, 3, 2048)\n if len(samples)==0:\n samples.append(m)\n fps.append(fp)\n else:\n sims = DataStructs.BulkTanimotoSimilarity(fp, fps)\n if max(sims) < 0.7:\n samples.append(m)\n fps.append(fp)\n break\n \n return samples" } ]
from collections import defaultdict from dataset import Dataset from mol_mdp_ext import MolMDPExtended, BlockMoleculeDataExtended from oracle.oracle import Oracle from proxy import get_proxy from generator import TBGFlowNet, FMGFlowNet, MOReinforce from utils.utils import set_random_seed from utils.metrics import compute_success, compute_diversity, compute_novelty, compute_correlation, circle_points from utils.logging import get_logger from datetime import datetime from botorch.utils.multi_objective.hypervolume import Hypervolume from botorch.utils.sampling import sample_simplex from botorch.utils.transforms import normalize, unnormalize from torch.distributions.dirichlet import Dirichlet from main import RolloutWorker, get_test_mols from pymoo.util.ref_dirs import get_reference_directions from copy import deepcopy import random import os import re import argparse import json import time import threading import pdb import pickle import gzip import torch.multiprocessing as mp import torch.nn.functional as F import torch import pandas as pd import numpy as np import warnings
15,576
picked_mols = [] smis = [] for i, weights in enumerate(rollout_worker.test_weights): sampled_mols = [] sampled_raw_rewards = [] sampled_means = [] sampled_smis = [] while len(sampled_mols) < args.num_samples: rollout_worker.rollout(generator, use_rand_policy=False, weights=torch.tensor(weights).unsqueeze(0).to(args.device)) (raw_r, _, m, trajectory_stats, inflow) = rollout_worker.sampled_mols[-1] sampled_mols.append(m) sampled_raw_rewards.append(raw_r[0].item()) sampled_means.append(raw_r[1]) sampled_smis.append(m.smiles) idx_pick = np.argsort(sampled_raw_rewards)[::-1][:int(args.num_samples/len(rollout_worker.test_weights))] picked_mols.extend(np.array(sampled_mols)[idx_pick].tolist()) means.extend(np.array(sampled_means)[idx_pick].tolist()) smis.extend(np.array(sampled_smis)[idx_pick].tolist()) raw_rewards.extend(np.array(sampled_raw_rewards)[idx_pick].tolist()) raw_rewards_weight[str(weights.cpu())] = np.array(sampled_raw_rewards)[idx_pick].mean() raw_rewards_mean = np.mean(list(raw_rewards_weight.values())) assert len(picked_mols) == args.num_samples top_means = torch.tensor(means) scores_dict = oracle.batch_get_scores(picked_mols) scores = torch.tensor(pd.DataFrame.from_dict(scores_dict).values) test_loss = F.mse_loss(top_means, scores) hypervolume = Hypervolume(ref_point=torch.zeros(len(args.objectives))) volume = hypervolume.compute(top_means) volume_oracle = hypervolume.compute(scores) diversity = compute_diversity(picked_mols) batch_metrics = {'Hypervolume_reward': volume, 'Hypervolume_oracle': volume_oracle, 'Reward_mean': raw_rewards_mean, 'scores_max': pd.DataFrame.from_dict(scores_dict).max().to_dict(), 'scores_mean': pd.DataFrame.from_dict(scores_dict).mean().to_dict(), 'Test_loss': test_loss, 'Diversity': diversity} print(batch_metrics) print('Time: {}'.format(time.time()-time_start)) if not compute_multi_objective_metric: return volume, volume_oracle, raw_rewards_weight, raw_rewards_mean, test_loss, diversity else: for i in range(len(picked_mols)): picked_mols[i].score = scores_dict[i] # success/diversity/novelty is computed among the top mols. success, positive_mols = compute_success( picked_mols, scores_dict, args.objectives, score_succ) succ_diversity = compute_diversity(positive_mols) if ref_mols: novelty = compute_novelty(positive_mols, ref_mols) else: novelty = 1. mo_metrics = {'success': success, 'novelty': novelty, 'succ_diversity': succ_diversity, } picked_smis = [(raw_rewards[i], picked_mols[i].score, smis[i]) for i in range(len(raw_rewards))] print(mo_metrics) return (picked_mols, scores_dict, picked_smis), batch_metrics, mo_metrics def log_overall_metrics(args, dataset, batch_infos=None, MultiObjective_metrics=None): volume = dataset.compute_hypervolume() print("Hypervolume for {}: {}".format(args.logger.context, volume)) args.logger.add_scalar('Metric/hypervolume', volume, use_context=False) args.logger.add_object('scores', dataset.scores) args.logger.add_object('smis', dataset.smis) if batch_infos: args.logger.add_scalar( 'Metric/test_loss', batch_infos['Test_loss'], use_context=False) args.logger.add_object('collected_info', batch_infos) if MultiObjective_metrics: args.logger.add_scalars('Metric/MultiObjective', MultiObjective_metrics, use_context=False) def get_test_rays(): if args.n_objectives == 3: n_partitions = 6 elif args.n_objectives == 4: n_partitions = 7 test_rays = get_reference_directions("das-dennis", args.n_objectives, n_partitions=n_partitions).astype(np.float32) test_rays = test_rays[[(r > 0).all() for r in test_rays]] print(f"initialize {len(test_rays)} test rays") return test_rays def main(args): set_random_seed(args.seed) args.logger.set_context('iter_0') bpath = "./data/blocks_105.json" dpath = "./data/docked_mols.h5" # Initialize oracle and dataset (for training surrogate function) oracle = Oracle(args) dataset = Dataset(args, bpath, oracle, args.device) dataset.load_h5(dpath, num_init_examples=args.num_init_examples) log_overall_metrics(args, dataset) args.n_objectives = len(args.objectives) # Initialize surrogate function proxy = get_proxy(args, bpath, oracle) proxy.update(dataset, 0, reset=False) for i in range(1, args.num_outer_loop_iters+1): print(f"====== Starting round {i} ======") args.logger.set_context('iter_{}'.format(i)) test_weights = np.random.dirichlet(args.alpha_vector, 5*(2**(args.n_objectives-2))).astype(np.float32) if args.criterion == 'TB':
warnings.filterwarnings('ignore') def arg_parse(): parser = argparse.ArgumentParser() parser.add_argument("--device", type=str, default='cuda') parser.add_argument('--seed', type=int, default=42, help='seed') parser.add_argument("--run", default=0, help="run", type=int) parser.add_argument('--save', action='store_true', default=False, help='Save model.') parser.add_argument('--debug',action='store_true', default=False, help='debug mode, no multi thread') parser.add_argument("--enable_tensorboard", action='store_true', default=False) parser.add_argument("--log_dir", default='runs/mobo') parser.add_argument("--include_nblocks", default=False) parser.add_argument("--num_init_examples", default=200, type=int) parser.add_argument("--num_outer_loop_iters", default=8, type=int) parser.add_argument("--num_samples", default=100, type=int) parser.add_argument("--floatX", default='float32') parser.add_argument('--sample_iterations', type=int, default=1000, help='sample mols and compute metrics') parser.add_argument("--log_weight_score", action='store_true', default=False) # objectives parser.add_argument("--objectives", type=str, default='gsk3b,jnk3,qed,sa') parser.add_argument("--acq_fn", default='UCB', type=str) parser.add_argument("--beta", default=0.1, type=float) parser.add_argument("--scalar", default='WeightedSum', type=str) parser.add_argument("--alpha", default=1., type=float, help='dirichlet distribution') parser.add_argument("--alpha_vector", default='1,1,1,1', type=str) # Proxy parser.add_argument("--proxy_normalize", action='store_true', default=False, help='normalize Y') parser.add_argument("--proxy_num_iterations", default=10000, type=int) parser.add_argument("--proxy_learning_rate", default=2.5e-4, help="Learning rate", type=float) parser.add_argument("--proxy_mbsize", default=64, help="Minibatch size", type=int) parser.add_argument("--proxy_early_stop_tol", default=10, type=int) parser.add_argument("--proxy_repr_type", default='atom_graph') parser.add_argument("--proxy_model_version", default='v2') parser.add_argument("--proxy_num_conv_steps", default=12, type=int) parser.add_argument("--proxy_nemb", default=64, help="#hidden", type=int) parser.add_argument("--proxy_weight_decay", default=1e-6, help="Weight Decay in Proxy", type=float) parser.add_argument("--proxy_uncertainty", default="evidential", type=str) # deep ensemble and GP parser.add_argument("--proxy_dropout", default=0.1, help="MC Dropout in Proxy", type=float) parser.add_argument("--proxy_num_dropout_samples", default=5, type=int) parser.add_argument("--evidential_lam", default=0.1, type=float) parser.add_argument( "--fp_radius", type=int, default=2, help="Morgan fingerprint radius." ) parser.add_argument( "--fp_nbits", type=int, default=1024, help="Morgan fingerprint nBits." ) # GFlowNet parser.add_argument("--min_blocks", default=2, type=int) parser.add_argument("--max_blocks", default=8, type=int) parser.add_argument("--num_iterations", default=5000, type=int) parser.add_argument("--criterion", default="FM", type=str) parser.add_argument("--learning_rate", default=5e-4, help="Learning rate", type=float) parser.add_argument("--Z_learning_rate", default=5e-3, help="Learning rate", type=float) parser.add_argument("--clip_grad", default=0, type=float) parser.add_argument("--trajectories_mbsize", default=8, type=int) parser.add_argument("--offline_mbsize", default=8, type=int) parser.add_argument("--hindsight_prob", default=0.2, type=float) parser.add_argument("--hindsight_buffer_mbsize", default=8, type=int) parser.add_argument("--hindsight_trajectories_mbsize", default=8, type=int) parser.add_argument("--reward_min", default=1e-2, type=float) parser.add_argument("--reward_norm", default=1, type=float) parser.add_argument("--reward_exp", default=8, type=float) parser.add_argument("--reward_exp_ramping", default=0, type=float) parser.add_argument("--logit_clipping", default=0., type=float) # Hyperparameters for TB parser.add_argument("--partition_init", default=1, type=float) # Hyperparameters for FM parser.add_argument("--log_reg_c", default=(0.1/8)**4, type=float) parser.add_argument("--balanced_loss", default=True) parser.add_argument("--leaf_coef", default=10, type=float) # Architecture parser.add_argument("--repr_type", default='block_graph') parser.add_argument("--model_version", default='v4') parser.add_argument("--num_conv_steps", default=10, type=int) parser.add_argument("--nemb", default=256, help="#hidden", type=int) parser.add_argument("--weight_decay", default=0, type=float) parser.add_argument("--random_action_prob", default=0.05, type=float) parser.add_argument("--bootstrap_tau", default=0, type=float) parser.add_argument("--condition_type", type=str, default='HN') parser.add_argument("--ray_hidden_dim", default=100, type=int) return parser.parse_args() class BoRolloutWorker(RolloutWorker): def __init__(self, args, bpath, proxy, device): super(BoRolloutWorker, self).__init__(args, bpath, proxy, device) self.hindsight_prob = args.hindsight_prob self.hindsight_mols = defaultdict(list) self.hindsight_smiles = defaultdict(list) self.replay_threshold = 0.9 def _get(self, i, dset, weights=None): # Sample trajectories by walking backwards from the molecules in our dataset # Handle possible multithreading issues when independent threads # add/substract from dset: m = dset[i] if not isinstance(m, BlockMoleculeDataExtended): m = m[-1] r, raw_r = self._get_reward(m, weights) done = 1 samples = [] # a sample is a tuple (parents(s), parent actions, reward(s), s, done) # an action is (blockidx, stemidx) or (-1, x) for 'stop' # so we start with the stop action, unless the molecule is already # a "terminal" node (if it has no stems, no actions). if len(m.stems) and len(m.blocks) < self.max_blocks: samples.append(((m,), ((-1, 0),), weights, weights, r, m, done)) r = done = 0 while len(m.blocks): # and go backwards if self.ignore_parents: parents = self.mdp.parents(m) parent, action = parents[self.train_rng.randint(len(parents))] samples.append(((parent,), (action,), weights, weights, r, m, done)) r = done = 0 m = parent else: parents, actions = zip(*self.mdp.parents(m)) samples.append((parents, actions, weights.repeat(len(parents), 1), weights, r, m, done)) r = done = 0 m = parents[self.train_rng.randint(len(parents))] return samples[::-1] def _add_mol_to_replay(self, m): for i, weights in enumerate(self.test_weights): r, raw_r = self._get_reward(m, weights) if len(self.hindsight_mols[i]) < self.max_hindsight_mols or raw_r[0] > self.hindsight_mols[i][0][0]: if m.smiles not in self.hindsight_smiles[i]: self.hindsight_mols[i].append((raw_r[0].item(), m.smiles, m)) self.hindsight_smiles[i].append(m.smiles) if len(self.hindsight_mols[i]) > self.max_hindsight_mols: self.hindsight_mols[i] = sorted(self.hindsight_mols[i], key=lambda x:(x[0]))[ max(int(0.05 * self.max_hindsight_mols), 1):] self.hindsight_smiles[i] = [x[1] for x in self.hindsight_mols[i]] def _add_mol_to_online(self, r, m, inflow): if self.replay_mode == 'online': r = r + self.train_rng.normal() * 0.01 if len(self.online_mols) < self.max_online_mols or r > self.online_mols[0][0]: self.online_mols.append((r, m)) if len(self.online_mols) > self.max_online_mols: self.online_mols = sorted(self.online_mols)[ max(int(0.05 * self.max_online_mols), 1):] elif self.replay_mode == 'prioritized': self.online_mols.append((abs(inflow - np.log(r)), m)) if len(self.online_mols) > self.max_online_mols * 1.1: self.online_mols = self.online_mols[-self.max_online_mols:] def _get_reward(self, m, weights=None): rdmol = m.mol if rdmol is None: return self.reward_min # get reward from proxy raw_reward, score = self.proxy(m, weights) raw_reward = raw_reward.clip(self.reward_min) reward = self.l2r(raw_reward) return reward, (raw_reward, score) def execute_train_episode_batch(self, generator, dataset=None, Y_bounds=None, use_rand_policy=True): if self.train_rng.uniform() < self.hindsight_prob: idx = self.train_rng.randint(self.test_weights.shape[0]) weights = self.test_weights[idx].unsqueeze(0) samples = sum((self.rollout(generator, use_rand_policy, weights) for i in range(self.args.hindsight_trajectories_mbsize)), []) if self.args.hindsight_buffer_mbsize > 0: buffer = deepcopy(self.hindsight_mols[idx]) reward = np.array([x[0] for x in buffer]) prob = reward / sum(reward) eidx = np.random.choice(list(range(len(buffer))), self.args.hindsight_buffer_mbsize, replace=False, p=prob) offline_samples = sum((self._get(i, buffer, weights) for i in eidx), []) samples += offline_samples else: weights = Dirichlet(torch.tensor(self.args.alpha_vector)*self.args.alpha).sample_n(1).to(self.args.device) #* sample weights per batch, seem better samples = sum((self.rollout(generator, use_rand_policy, weights, replay=True) for i in range(self.args.trajectories_mbsize)), []) # offline sampling from dataset if self.args.offline_mbsize > 0 and dataset is not None: # use the oracle reward scores = torch.tensor(pd.DataFrame.from_dict(dataset.scores).values, dtype=torch.float32).to(args.device) if Y_bounds is not None: scores = normalize(scores, Y_bounds) reward = torch.matmul(scores, weights.reshape(-1, 1)) prob = (reward / sum(reward)).squeeze(1).cpu().numpy() eidx = np.random.choice(list(range(len(dataset.all_mols))), self.args.offline_mbsize, replace=False, p=prob) offline_samples = sum((self._get(i, dataset.all_mols, weights) for i in eidx), []) samples += offline_samples return zip(*samples) def initialize_hindsight_mols(self, dataset): for m in dataset.all_mols: for i, weights in enumerate(self.test_weights): r, raw_r = self._get_reward(m, weights) self.hindsight_mols[i].append((raw_r[0].item(), m.smiles, m)) for i, weights in enumerate(self.test_weights): self.hindsight_mols[i] = sorted(self.hindsight_mols[i], key=lambda x:(x[0])) self.hindsight_smiles[i] = [x[1] for x in self.hindsight_mols[i]] def train_generative_model(args, generator, bpath, proxy, oracle, dataset, test_weights, round_idx, do_save=False): print("Training generator...") os.makedirs(os.path.join(args.log_dir, f'round_{round_idx}'), exist_ok=True) device = args.device rollout_worker = BoRolloutWorker(args, bpath, proxy, device) rollout_worker.test_weights = torch.tensor(test_weights).to(device) rollout_worker.initialize_hindsight_mols(dataset) Y_bounds = torch.stack([proxy.partitioning.Y.min(dim=-2).values, proxy.partitioning.Y.max(dim=-2).values]) def save_stuff(round_idx, iter): torch.save(generator.state_dict(), os.path.join( args.log_dir, 'round_{}/{}_generator_checkpoint.pth'.format(round_idx, iter))) pickle.dump(rollout_worker.sampled_mols, gzip.open(f'{args.log_dir}/sampled_mols.pkl.gz', 'wb')) multi_thread = not args.debug if multi_thread: sampler = rollout_worker.start_samplers(generator, 8, dataset) def stop_everything(): print('joining') rollout_worker.stop_samplers_and_join() last_losses = [] train_losses = [] test_losses = [] test_infos = [] train_infos = [] time_last_check = time.time() for i in range(args.num_iterations + 1): if multi_thread: r = sampler() for thread in rollout_worker.sampler_threads: if thread.failed: stop_everything() pdb.post_mortem(thread.exception.__traceback__) return p, pb, a, pw, w, r, s, d, mols = r else: p, pb, a, pw, w, r, s, d, mols = rollout_worker.sample2batch( rollout_worker.execute_train_episode_batch(generator, dataset, Y_bounds, use_rand_policy=True)) loss = generator.train_step(p, pb, a, pw, w, r, s, d, mols, i) last_losses.append(loss) if not i % 100: train_loss = [np.round(np.mean(i), 3) for i in zip(*last_losses)] train_losses.append(train_loss) args.logger.add_scalar( 'Loss/round{}/train'.format(round_idx), train_loss[0], use_context=False) print('Iter {}: Loss {}, Time {}'.format( i, train_loss, round(time.time() - time_last_check, 3))) time_last_check = time.time() last_losses = [] if not i % args.sample_iterations and i != 0: volume, volume_oracle, reward_weight, reward_mean, test_loss, diversity = sample_batch( args, generator, rollout_worker, oracle, proxy, Y_bounds, compute_multi_objective_metric=False) args.logger.add_scalar( 'round{}/Top-100-sampled/volumes'.format(round_idx), volume, use_context=False) args.logger.add_scalar( 'round{}/Top-100-sampled/volumes_oracle'.format(round_idx), volume_oracle, use_context=False) args.logger.add_scalars( 'round{}/Top-100-sampled/reward_weight'.format(round_idx), reward_weight, use_context=False) args.logger.add_scalar( 'round{}/Top-100-sampled/reward_mean'.format(round_idx), reward_mean, use_context=False) # reward_mean is a dict, the keys are test_weights args.logger.add_scalar( 'round{}/Top-100-sampled/test_loss'.format(round_idx), test_loss, use_context=False) args.logger.add_scalar( 'round{}/Top-100-sampled/dists'.format(round_idx), diversity, use_context=False) if do_save: save_stuff(round_idx, i) stop_everything() if do_save: save_stuff(round_idx, i) checkpoint_path = os.path.join(args.log_dir, f'round_{round_idx}/{i}_generator_checkpoint.pth') generator.load_state_dict(torch.load(checkpoint_path)) return rollout_worker, {'train_losses': train_losses, 'test_losses': test_losses, 'test_infos': test_infos, 'train_infos': train_infos} def sample_batch(args, generator, rollout_worker, oracle=None, proxy=None, ref_mols=None, Y_bounds=None, compute_multi_objective_metric=False): score_succ = {'gsk3b': 0.5, 'jnk3': 0.5, 'drd2': 0.5, 'chemprop_sars': 0.5, 'chemprop_hiv': 0.5, "seh": 0.5, 'qed': 0.6, 'sa': 0.67} if Y_bounds is None: Y_bounds = torch.stack([proxy.partitioning.Y.min( dim=-2).values, proxy.partitioning.Y.max(dim=-2).values]) time_start = time.time() print(f"Sampling molecules...") raw_rewards = [] raw_rewards_weight = {} means = [] picked_mols = [] smis = [] for i, weights in enumerate(rollout_worker.test_weights): sampled_mols = [] sampled_raw_rewards = [] sampled_means = [] sampled_smis = [] while len(sampled_mols) < args.num_samples: rollout_worker.rollout(generator, use_rand_policy=False, weights=torch.tensor(weights).unsqueeze(0).to(args.device)) (raw_r, _, m, trajectory_stats, inflow) = rollout_worker.sampled_mols[-1] sampled_mols.append(m) sampled_raw_rewards.append(raw_r[0].item()) sampled_means.append(raw_r[1]) sampled_smis.append(m.smiles) idx_pick = np.argsort(sampled_raw_rewards)[::-1][:int(args.num_samples/len(rollout_worker.test_weights))] picked_mols.extend(np.array(sampled_mols)[idx_pick].tolist()) means.extend(np.array(sampled_means)[idx_pick].tolist()) smis.extend(np.array(sampled_smis)[idx_pick].tolist()) raw_rewards.extend(np.array(sampled_raw_rewards)[idx_pick].tolist()) raw_rewards_weight[str(weights.cpu())] = np.array(sampled_raw_rewards)[idx_pick].mean() raw_rewards_mean = np.mean(list(raw_rewards_weight.values())) assert len(picked_mols) == args.num_samples top_means = torch.tensor(means) scores_dict = oracle.batch_get_scores(picked_mols) scores = torch.tensor(pd.DataFrame.from_dict(scores_dict).values) test_loss = F.mse_loss(top_means, scores) hypervolume = Hypervolume(ref_point=torch.zeros(len(args.objectives))) volume = hypervolume.compute(top_means) volume_oracle = hypervolume.compute(scores) diversity = compute_diversity(picked_mols) batch_metrics = {'Hypervolume_reward': volume, 'Hypervolume_oracle': volume_oracle, 'Reward_mean': raw_rewards_mean, 'scores_max': pd.DataFrame.from_dict(scores_dict).max().to_dict(), 'scores_mean': pd.DataFrame.from_dict(scores_dict).mean().to_dict(), 'Test_loss': test_loss, 'Diversity': diversity} print(batch_metrics) print('Time: {}'.format(time.time()-time_start)) if not compute_multi_objective_metric: return volume, volume_oracle, raw_rewards_weight, raw_rewards_mean, test_loss, diversity else: for i in range(len(picked_mols)): picked_mols[i].score = scores_dict[i] # success/diversity/novelty is computed among the top mols. success, positive_mols = compute_success( picked_mols, scores_dict, args.objectives, score_succ) succ_diversity = compute_diversity(positive_mols) if ref_mols: novelty = compute_novelty(positive_mols, ref_mols) else: novelty = 1. mo_metrics = {'success': success, 'novelty': novelty, 'succ_diversity': succ_diversity, } picked_smis = [(raw_rewards[i], picked_mols[i].score, smis[i]) for i in range(len(raw_rewards))] print(mo_metrics) return (picked_mols, scores_dict, picked_smis), batch_metrics, mo_metrics def log_overall_metrics(args, dataset, batch_infos=None, MultiObjective_metrics=None): volume = dataset.compute_hypervolume() print("Hypervolume for {}: {}".format(args.logger.context, volume)) args.logger.add_scalar('Metric/hypervolume', volume, use_context=False) args.logger.add_object('scores', dataset.scores) args.logger.add_object('smis', dataset.smis) if batch_infos: args.logger.add_scalar( 'Metric/test_loss', batch_infos['Test_loss'], use_context=False) args.logger.add_object('collected_info', batch_infos) if MultiObjective_metrics: args.logger.add_scalars('Metric/MultiObjective', MultiObjective_metrics, use_context=False) def get_test_rays(): if args.n_objectives == 3: n_partitions = 6 elif args.n_objectives == 4: n_partitions = 7 test_rays = get_reference_directions("das-dennis", args.n_objectives, n_partitions=n_partitions).astype(np.float32) test_rays = test_rays[[(r > 0).all() for r in test_rays]] print(f"initialize {len(test_rays)} test rays") return test_rays def main(args): set_random_seed(args.seed) args.logger.set_context('iter_0') bpath = "./data/blocks_105.json" dpath = "./data/docked_mols.h5" # Initialize oracle and dataset (for training surrogate function) oracle = Oracle(args) dataset = Dataset(args, bpath, oracle, args.device) dataset.load_h5(dpath, num_init_examples=args.num_init_examples) log_overall_metrics(args, dataset) args.n_objectives = len(args.objectives) # Initialize surrogate function proxy = get_proxy(args, bpath, oracle) proxy.update(dataset, 0, reset=False) for i in range(1, args.num_outer_loop_iters+1): print(f"====== Starting round {i} ======") args.logger.set_context('iter_{}'.format(i)) test_weights = np.random.dirichlet(args.alpha_vector, 5*(2**(args.n_objectives-2))).astype(np.float32) if args.criterion == 'TB':
generator = TBGFlowNet(args, bpath)
6
2023-10-24 14:10:35+00:00
24k
caglarkucuk/earthformer-satellite-to-radar
ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer_unet_dec.py
[ { "identifier": "Upsample3DLayer", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class Upsample3DLayer(nn.Module):\n \"\"\"Upsampling based on nn.UpSampling and Conv3x3.\n\n If the temporal dimension remains the same:\n x --> interpolation-2d (nearest) --> conv3x3(dim, out_dim)\n Else:\n x --> interpolation-3d (nearest) --> conv3x3x3(dim, out_dim)\n\n \"\"\"\n def __init__(self,\n dim,\n out_dim,\n target_size,\n temporal_upsample=False,\n kernel_size=3,\n layout='THWC',\n conv_init_mode=\"0\",\n ):\n \"\"\"\n\n Parameters\n ----------\n dim\n out_dim\n target_size\n Size of the output tensor. Will be a tuple/list that contains T_new, H_new, W_new\n temporal_upsample\n Whether the temporal axis will go through upsampling.\n kernel_size\n The kernel size of the Conv2D layer\n layout\n The layout of the inputs\n \"\"\"\n super(Upsample3DLayer, self).__init__()\n self.conv_init_mode = conv_init_mode\n self.target_size = target_size\n self.out_dim = out_dim\n self.temporal_upsample = temporal_upsample\n if temporal_upsample:\n self.up = nn.Upsample(size=target_size, mode='nearest') # 3D upsampling\n else:\n self.up = nn.Upsample(size=(target_size[1], target_size[2]), mode='nearest') # 2D upsampling\n self.conv = nn.Conv2d(in_channels=dim, out_channels=out_dim, kernel_size=(kernel_size, kernel_size),\n padding=(kernel_size // 2, kernel_size // 2))\n assert layout in ['THWC', 'CTHW']\n self.layout = layout\n\n self.reset_parameters()\n\n def reset_parameters(self):\n for m in self.children():\n apply_initialization(m,\n conv_mode=self.conv_init_mode)\n\n def forward(self, x):\n \"\"\"\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C) or (B, C, T, H, W)\n\n Returns\n -------\n out\n Shape (B, T, H_new, W_out, C_out) or (B, C, T, H_out, W_out)\n \"\"\"\n if self.layout == 'THWC':\n B, T, H, W, C = x.shape\n if self.temporal_upsample:\n x = x.permute(0, 4, 1, 2, 3) # (B, C, T, H, W)\n return self.conv(self.up(x)).permute(0, 2, 3, 4, 1)\n else:\n assert self.target_size[0] == T\n x = x.reshape(B * T, H, W, C).permute(0, 3, 1, 2) # (B * T, C, H, W)\n x = self.up(x)\n return self.conv(x).permute(0, 2, 3, 1).reshape((B,) + self.target_size + (self.out_dim,))\n elif self.layout == 'CTHW':\n B, C, T, H, W = x.shape\n if self.temporal_upsample:\n return self.conv(self.up(x))\n else:\n assert self.output_size[0] == T\n x = x.permute(0, 2, 1, 3, 4) # (B, T, C, H, W)\n x = x.reshape(B * T, C, H, W)\n return self.conv(self.up(x)).reshape(B, self.target_size[0], self.out_dim, self.target_size[1],\n self.target_size[2]).permute(0, 2, 1, 3, 4)" }, { "identifier": "PatchMerging3D", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class PatchMerging3D(nn.Module):\n \"\"\" Patch Merging Layer\"\"\"\n def __init__(self,\n dim,\n out_dim=None,\n downsample=(1, 2, 2),\n norm_layer='layer_norm',\n padding_type='nearest',\n linear_init_mode=\"0\",\n norm_init_mode=\"0\",\n ):\n \"\"\"\n\n Parameters\n ----------\n dim\n Number of input channels.\n downsample\n downsample factor\n norm_layer\n The normalization layer\n \"\"\"\n super().__init__()\n self.linear_init_mode = linear_init_mode\n self.norm_init_mode = norm_init_mode\n self.dim = dim\n if out_dim is None:\n out_dim = max(downsample) * dim\n self.out_dim = out_dim\n self.downsample = downsample\n self.padding_type = padding_type\n self.reduction = nn.Linear(downsample[0] * downsample[1] * downsample[2] * dim,\n out_dim, bias=False)\n self.norm = get_norm_layer(norm_layer, in_channels=downsample[0] * downsample[1] * downsample[2] * dim)\n self.reset_parameters()\n\n def reset_parameters(self):\n for m in self.children():\n apply_initialization(m,\n linear_mode=self.linear_init_mode,\n norm_mode=self.norm_init_mode)\n\n def get_out_shape(self, data_shape):\n T, H, W, C_in = data_shape\n pad_t = (self.downsample[0] - T % self.downsample[0]) % self.downsample[0]\n pad_h = (self.downsample[1] - H % self.downsample[1]) % self.downsample[1]\n pad_w = (self.downsample[2] - W % self.downsample[2]) % self.downsample[2]\n return (T + pad_t) // self.downsample[0], (H + pad_h) // self.downsample[1], (W + pad_w) // self.downsample[2],\\\n self.out_dim\n\n def forward(self, x):\n \"\"\"\n\n Parameters\n ----------\n x\n Input feature, tensor size (B, T, H, W, C).\n\n Returns\n -------\n out\n Shape (B, T // downsample[0], H // downsample[1], W // downsample[2], out_dim)\n \"\"\"\n B, T, H, W, C = x.shape\n\n # padding\n pad_t = (self.downsample[0] - T % self.downsample[0]) % self.downsample[0]\n pad_h = (self.downsample[1] - H % self.downsample[1]) % self.downsample[1]\n pad_w = (self.downsample[2] - W % self.downsample[2]) % self.downsample[2]\n if pad_h or pad_h or pad_w:\n T += pad_t\n H += pad_h\n W += pad_w\n x = _generalize_padding(x, pad_t, pad_w, pad_h, padding_type=self.padding_type)\n\n x = x.reshape((B,\n T // self.downsample[0], self.downsample[0],\n H // self.downsample[1], self.downsample[1],\n W // self.downsample[2], self.downsample[2], C)) \\\n .permute(0, 1, 3, 5, 2, 4, 6, 7) \\\n .reshape(B, T // self.downsample[0], H // self.downsample[1], W // self.downsample[2],\n self.downsample[0] * self.downsample[1] * self.downsample[2] * C)\n x = self.norm(x)\n x = self.reduction(x)\n\n return x" }, { "identifier": "PosEmbed", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class PosEmbed(nn.Module):\n\n def __init__(self, embed_dim, maxT, maxH, maxW, typ='t+h+w'):\n r\"\"\"\n Parameters\n ----------\n embed_dim\n maxT\n maxH\n maxW\n typ\n The type of the positional embedding.\n - t+h+w:\n Embed the spatial position to embeddings\n - t+hw:\n Embed the spatial position to embeddings\n \"\"\"\n super(PosEmbed, self).__init__()\n self.typ = typ\n\n assert self.typ in ['t+h+w', 't+hw']\n self.maxT = maxT\n self.maxH = maxH\n self.maxW = maxW\n self.embed_dim = embed_dim\n # spatiotemporal learned positional embedding\n if self.typ == 't+h+w':\n self.T_embed = nn.Embedding(num_embeddings=maxT, embedding_dim=embed_dim)\n self.H_embed = nn.Embedding(num_embeddings=maxH, embedding_dim=embed_dim)\n self.W_embed = nn.Embedding(num_embeddings=maxW, embedding_dim=embed_dim)\n\n # nn.init.trunc_normal_(self.T_embed.weight, std=0.02)\n # nn.init.trunc_normal_(self.H_embed.weight, std=0.02)\n # nn.init.trunc_normal_(self.W_embed.weight, std=0.02)\n elif self.typ == 't+hw':\n self.T_embed = nn.Embedding(num_embeddings=maxT, embedding_dim=embed_dim)\n self.HW_embed = nn.Embedding(num_embeddings=maxH * maxW, embedding_dim=embed_dim)\n # nn.init.trunc_normal_(self.T_embed.weight, std=0.02)\n # nn.init.trunc_normal_(self.HW_embed.weight, std=0.02)\n else:\n raise NotImplementedError\n self.reset_parameters()\n\n def reset_parameters(self):\n for m in self.children():\n apply_initialization(m, embed_mode=\"0\")\n\n def forward(self, x):\n \"\"\"\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C)\n\n Returns\n -------\n out\n Return the x + positional embeddings\n \"\"\"\n _, T, H, W, _ = x.shape\n t_idx = torch.arange(T, device=x.device) # (T, C)\n h_idx = torch.arange(H, device=x.device) # (H, C)\n w_idx = torch.arange(W, device=x.device) # (W, C)\n if self.typ == 't+h+w':\n return x + self.T_embed(t_idx).reshape(T, 1, 1, self.embed_dim)\\\n + self.H_embed(h_idx).reshape(1, H, 1, self.embed_dim)\\\n + self.W_embed(w_idx).reshape(1, 1, W, self.embed_dim)\n elif self.typ == 't+hw':\n spatial_idx = h_idx.unsqueeze(-1) * self.maxW + w_idx\n return x + self.T_embed(t_idx).reshape(T, 1, 1, self.embed_dim) + self.HW_embed(spatial_idx)\n else:\n raise NotImplementedError" }, { "identifier": "InitialEncoder", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class InitialEncoder(nn.Module):\n def __init__(self,\n dim,\n out_dim,\n downsample_scale: Union[int, Sequence[int]],\n num_conv_layers=2,\n activation='leaky',\n padding_type='nearest',\n conv_init_mode=\"0\",\n linear_init_mode=\"0\",\n norm_init_mode=\"0\",\n ):\n super(InitialEncoder, self).__init__()\n\n self.num_conv_layers = num_conv_layers\n self.conv_init_mode = conv_init_mode\n self.linear_init_mode = linear_init_mode\n self.norm_init_mode = norm_init_mode\n\n conv_block = []\n for i in range(num_conv_layers):\n if i == 0:\n conv_block.append(nn.Conv2d(kernel_size=(3, 3), padding=(1, 1),\n in_channels=dim, out_channels=out_dim))\n conv_block.append(nn.GroupNorm(16, out_dim))\n conv_block.append(get_activation(activation))\n else:\n conv_block.append(nn.Conv2d(kernel_size=(3, 3), padding=(1, 1),\n in_channels=out_dim, out_channels=out_dim))\n conv_block.append(nn.GroupNorm(16, out_dim))\n conv_block.append(get_activation(activation))\n\n self.conv_block = nn.Sequential(*conv_block)\n if isinstance(downsample_scale, int):\n patch_merge_downsample = (1, downsample_scale, downsample_scale)\n elif len(downsample_scale) == 2:\n patch_merge_downsample = (1, *downsample_scale)\n elif len(downsample_scale) == 3:\n patch_merge_downsample = tuple(downsample_scale)\n else:\n raise NotImplementedError(f\"downsample_scale {downsample_scale} format not supported!\")\n self.patch_merge = PatchMerging3D(\n dim=out_dim, out_dim=out_dim,\n padding_type=padding_type,\n downsample=patch_merge_downsample,\n linear_init_mode=linear_init_mode,\n norm_init_mode=norm_init_mode)\n self.reset_parameters()\n\n def reset_parameters(self):\n for m in self.children():\n apply_initialization(m,\n conv_mode=self.conv_init_mode,\n linear_mode=self.linear_init_mode,\n norm_mode=self.norm_init_mode)\n\n def forward(self, x):\n \"\"\"\n\n x --> [K x Conv2D] --> PatchMerge\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C)\n\n Returns\n -------\n out\n Shape (B, T, H_new, W_new, C_out)\n \"\"\"\n B, T, H, W, C = x.shape\n if self.num_conv_layers > 0:\n x = x.reshape(B * T, H, W, C).permute(0, 3, 1, 2)\n x = self.conv_block(x).permute(0, 2, 3, 1) # (B * T, H, W, C_new)\n x = self.patch_merge(x.reshape(B, T, H, W, -1))\n else:\n x = self.patch_merge(x)\n return x" }, { "identifier": "FinalDecoder", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class FinalDecoder(nn.Module):\n\n def __init__(self,\n target_thw,\n dim,\n num_conv_layers=2,\n activation='leaky',\n conv_init_mode=\"0\",\n linear_init_mode=\"0\",\n norm_init_mode=\"0\",\n ):\n super(FinalDecoder, self).__init__()\n self.target_thw = target_thw\n self.dim = dim\n self.num_conv_layers = num_conv_layers\n self.conv_init_mode = conv_init_mode\n self.linear_init_mode = linear_init_mode\n self.norm_init_mode = norm_init_mode\n\n conv_block = []\n for i in range(num_conv_layers):\n conv_block.append(nn.Conv2d(kernel_size=(3, 3), padding=(1, 1), in_channels=dim, out_channels=dim))\n conv_block.append(nn.GroupNorm(16, dim))\n conv_block.append(get_activation(activation))\n self.conv_block = nn.Sequential(*conv_block)\n self.upsample = Upsample3DLayer(\n dim=dim, out_dim=dim,\n target_size=target_thw, kernel_size=3,\n conv_init_mode=conv_init_mode)\n self.reset_parameters()\n\n def reset_parameters(self):\n for m in self.children():\n apply_initialization(m,\n conv_mode=self.conv_init_mode,\n linear_mode=self.linear_init_mode,\n norm_mode=self.norm_init_mode)\n\n def forward(self, x):\n \"\"\"\n\n x --> Upsample --> [K x Conv2D]\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C)\n\n Returns\n -------\n out\n Shape (B, T, H_new, W_new, C)\n \"\"\"\n x = self.upsample(x)\n if self.num_conv_layers > 0:\n B, T, H, W, C = x.shape\n x = x.reshape(B * T, H, W, C).permute(0, 3, 1, 2)\n x = self.conv_block(x).permute(0, 2, 3, 1).reshape(B, T, H, W, -1)\n return x" }, { "identifier": "InitialStackPatchMergingEncoder", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class InitialStackPatchMergingEncoder(nn.Module):\n\n def __init__(self,\n num_merge: int,\n in_dim,\n out_dim_list,\n downsample_scale_list,\n num_conv_per_merge_list=None,\n activation='leaky',\n padding_type='nearest',\n conv_init_mode=\"0\",\n linear_init_mode=\"0\",\n norm_init_mode=\"0\",\n ):\n super(InitialStackPatchMergingEncoder, self).__init__()\n\n self.conv_init_mode = conv_init_mode\n self.linear_init_mode = linear_init_mode\n self.norm_init_mode = norm_init_mode\n\n self.num_merge = num_merge\n self.in_dim = in_dim\n self.out_dim_list = out_dim_list[:num_merge]\n self.downsample_scale_list = downsample_scale_list[:num_merge]\n self.num_conv_per_merge_list = num_conv_per_merge_list\n self.num_group_list = [max(1, out_dim // 4) for out_dim in self.out_dim_list]\n\n self.conv_block_list = nn.ModuleList()\n self.patch_merge_list = nn.ModuleList()\n for i in range(num_merge):\n if i == 0:\n in_dim = in_dim\n else:\n in_dim = self.out_dim_list[i - 1]\n out_dim = self.out_dim_list[i]\n downsample_scale = self.downsample_scale_list[i]\n\n conv_block = []\n for j in range(self.num_conv_per_merge_list[i]):\n if j == 0:\n conv_in_dim = in_dim\n else:\n conv_in_dim = out_dim\n conv_block.append(nn.Conv2d(kernel_size=(3, 3), padding=(1, 1),\n in_channels=conv_in_dim, out_channels=out_dim))\n conv_block.append(nn.GroupNorm(self.num_group_list[i], out_dim))\n conv_block.append(get_activation(activation))\n\n conv_block = nn.Sequential(*conv_block)\n self.conv_block_list.append(conv_block)\n patch_merge = PatchMerging3D(\n dim=out_dim, out_dim=out_dim,\n padding_type=padding_type,\n downsample=(1, downsample_scale, downsample_scale),\n linear_init_mode=linear_init_mode,\n norm_init_mode=norm_init_mode)\n self.patch_merge_list.append(patch_merge)\n self.reset_parameters()\n\n def reset_parameters(self):\n for m in self.children():\n apply_initialization(m,\n conv_mode=self.conv_init_mode,\n linear_mode=self.linear_init_mode,\n norm_mode=self.norm_init_mode)\n\n def get_out_shape_list(self, input_shape):\n \"\"\"\n T, H, W, C\n \"\"\"\n out_shape_list = []\n for patch_merge in self.patch_merge_list:\n input_shape = patch_merge.get_out_shape(input_shape)\n out_shape_list.append(input_shape)\n return out_shape_list\n\n def forward(self, x):\n \"\"\"\n\n x --> [K x Conv2D] --> PatchMerge --> ... --> [K x Conv2D] --> PatchMerge\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C)\n\n Returns\n -------\n out\n Shape (B, T, H_new, W_new, C_out)\n \"\"\"\n for i, (conv_block, patch_merge) in \\\n enumerate(zip(self.conv_block_list, self.patch_merge_list)):\n B, T, H, W, C = x.shape\n if self.num_conv_per_merge_list[i] > 0:\n x = x.reshape(B * T, H, W, C).permute(0, 3, 1, 2)\n x = conv_block(x).permute(0, 2, 3, 1).reshape(B, T, H, W, -1)\n x = patch_merge(x)\n return x" }, { "identifier": "FinalStackUpsamplingDecoder", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class FinalStackUpsamplingDecoder(nn.Module):\n\n def __init__(self,\n target_shape_list,\n in_dim,\n num_conv_per_up_list=None,\n activation='leaky',\n conv_init_mode=\"0\",\n linear_init_mode=\"0\",\n norm_init_mode=\"0\",\n ):\n \"\"\"\n Parameters\n ----------\n target_shape_list:\n list of (T, H ,W ,C)\n \"\"\"\n super(FinalStackUpsamplingDecoder, self).__init__()\n self.conv_init_mode = conv_init_mode\n self.linear_init_mode = linear_init_mode\n self.norm_init_mode = norm_init_mode\n\n self.target_shape_list = target_shape_list\n self.out_dim_list = [target_shape[-1] for target_shape in self.target_shape_list]\n self.num_upsample = len(target_shape_list)\n self.in_dim = in_dim\n self.num_conv_per_up_list = num_conv_per_up_list\n self.num_group_list = [max(1, out_dim // 4) for out_dim in self.out_dim_list]\n\n self.conv_block_list = nn.ModuleList()\n self.upsample_list = nn.ModuleList()\n for i in range(self.num_upsample):\n if i == 0:\n in_dim = in_dim\n else:\n in_dim = self.out_dim_list[i - 1]\n out_dim = self.out_dim_list[i]\n\n upsample = Upsample3DLayer(\n dim=in_dim, out_dim=in_dim,\n target_size=target_shape_list[i][:-1], kernel_size=3,\n conv_init_mode=conv_init_mode)\n self.upsample_list.append(upsample)\n conv_block = []\n for j in range(num_conv_per_up_list[i]):\n if j == 0:\n conv_in_dim = in_dim\n else:\n conv_in_dim = out_dim\n conv_block.append(nn.Conv2d(kernel_size=(3, 3), padding=(1, 1),\n in_channels=conv_in_dim, out_channels=out_dim))\n conv_block.append(nn.GroupNorm(self.num_group_list[i], out_dim))\n conv_block.append(get_activation(activation))\n conv_block = nn.Sequential(*conv_block)\n self.conv_block_list.append(conv_block)\n self.reset_parameters()\n\n def reset_parameters(self):\n for m in self.children():\n apply_initialization(m,\n conv_mode=self.conv_init_mode,\n linear_mode=self.linear_init_mode,\n norm_mode=self.norm_init_mode)\n\n @staticmethod\n def get_init_params(enc_input_shape, enc_out_shape_list, large_channel=False):\n dec_target_shape_list = list(enc_out_shape_list[:-1])[::-1] + [tuple(enc_input_shape), ]\n if large_channel:\n dec_target_shape_list_large_channel = []\n for i, enc_out_shape in enumerate(enc_out_shape_list[::-1]):\n dec_target_shape_large_channel = list(dec_target_shape_list[i])\n dec_target_shape_large_channel[-1] = enc_out_shape[-1]\n dec_target_shape_list_large_channel.append(tuple(dec_target_shape_large_channel))\n dec_target_shape_list = dec_target_shape_list_large_channel\n dec_in_dim = enc_out_shape_list[-1][-1]\n return dec_target_shape_list, dec_in_dim\n\n def forward(self, x):\n \"\"\"\n\n x --> Upsample --> [K x Conv2D] --> ... --> Upsample --> [K x Conv2D]\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C)\n\n Returns\n -------\n out\n Shape (B, T, H_new, W_new, C)\n \"\"\"\n for i, (conv_block, upsample) in \\\n enumerate(zip(self.conv_block_list, self.upsample_list)):\n x = upsample(x)\n if self.num_conv_per_up_list[i] > 0:\n B, T, H, W, C = x.shape\n x = x.reshape(B * T, H, W, C).permute(0, 3, 1, 2)\n x = conv_block(x).permute(0, 2, 3, 1).reshape(B, T, H, W, -1)\n return x" }, { "identifier": "StackCuboidSelfAttentionBlock", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class StackCuboidSelfAttentionBlock(nn.Module):\n \"\"\"\n\n - \"use_inter_ffn\" is True\n x --> attn1 -----+-------> ffn1 ---+---> attn2 --> ... --> ffn_k --> out\n | ^ | ^\n | | | |\n |-------------| |-------------|\n - \"use_inter_ffn\" is False\n x --> attn1 -----+------> attn2 --> ... attnk --+----> ffnk ---+---> out\n | ^ | ^ ^ | ^\n | | | | | | |\n |-------------| |------------| ----------| |-----------|\n If we have enabled global memory vectors, each attention will be a\n\n \"\"\"\n def __init__(self,\n dim,\n num_heads,\n block_cuboid_size=[(4, 4, 4), (4, 4, 4)],\n block_shift_size=[(0, 0, 0), (2, 2, 2)],\n block_strategy=[('d', 'd', 'd'),\n ('l', 'l', 'l')],\n padding_type='ignore',\n qkv_bias=False,\n qk_scale=None,\n attn_drop=0.0,\n proj_drop=0.0,\n ffn_drop=0.0,\n activation='leaky',\n gated_ffn=False,\n norm_layer='layer_norm',\n use_inter_ffn=False,\n use_global_vector=False,\n use_global_vector_ffn=True,\n use_global_self_attn=False,\n separate_global_qkv=False,\n global_dim_ratio=1,\n checkpoint_level=True,\n use_relative_pos=True,\n use_final_proj=True,\n # initialization\n attn_linear_init_mode=\"0\",\n ffn_linear_init_mode=\"0\",\n norm_init_mode=\"0\",\n ):\n super(StackCuboidSelfAttentionBlock, self).__init__()\n # initialization\n self.attn_linear_init_mode = attn_linear_init_mode\n self.ffn_linear_init_mode = ffn_linear_init_mode\n self.norm_init_mode = norm_init_mode\n\n assert len(block_cuboid_size[0]) > 0 and len(block_shift_size) > 0 and len(block_strategy) > 0,\\\n f'Format of the block cuboid size is not correct.' \\\n f' block_cuboid_size={block_cuboid_size}'\n assert len(block_cuboid_size) == len(block_shift_size) == len(block_strategy)\n self.num_attn = len(block_cuboid_size)\n self.checkpoint_level = checkpoint_level\n self.use_inter_ffn = use_inter_ffn\n # global vectors\n self.use_global_vector = use_global_vector\n self.use_global_vector_ffn = use_global_vector_ffn\n self.use_global_self_attn = use_global_self_attn\n self.global_dim_ratio = global_dim_ratio\n\n if self.use_inter_ffn:\n self.ffn_l = nn.ModuleList(\n [PositionwiseFFN(\n units=dim,\n hidden_size=4 * dim,\n activation_dropout=ffn_drop,\n dropout=ffn_drop,\n gated_proj=gated_ffn,\n activation=activation,\n normalization=norm_layer,\n pre_norm=True,\n linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,)\n for _ in range(self.num_attn)])\n if self.use_global_vector_ffn and self.use_global_vector:\n self.global_ffn_l = nn.ModuleList(\n [PositionwiseFFN(\n units=global_dim_ratio * dim,\n hidden_size=global_dim_ratio * 4 * dim,\n activation_dropout=ffn_drop,\n dropout=ffn_drop,\n gated_proj=gated_ffn,\n activation=activation,\n normalization=norm_layer,\n pre_norm=True,\n linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,)\n for _ in range(self.num_attn)])\n else:\n self.ffn_l = nn.ModuleList(\n [PositionwiseFFN(\n units=dim, hidden_size=4 * dim,\n activation_dropout=ffn_drop,\n dropout=ffn_drop,\n gated_proj=gated_ffn, activation=activation,\n normalization=norm_layer,\n pre_norm=True,\n linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,)])\n if self.use_global_vector_ffn and self.use_global_vector:\n self.global_ffn_l = nn.ModuleList(\n [PositionwiseFFN(\n units=global_dim_ratio * dim,\n hidden_size=global_dim_ratio * 4 * dim,\n activation_dropout=ffn_drop,\n dropout=ffn_drop,\n gated_proj=gated_ffn, activation=activation,\n normalization=norm_layer,\n pre_norm=True,\n linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,)])\n self.attn_l = nn.ModuleList(\n [CuboidSelfAttentionLayer(\n dim=dim, num_heads=num_heads,\n cuboid_size=ele_cuboid_size,\n shift_size=ele_shift_size,\n strategy=ele_strategy,\n padding_type=padding_type,\n qkv_bias=qkv_bias,\n qk_scale=qk_scale,\n attn_drop=attn_drop,\n proj_drop=proj_drop,\n norm_layer=norm_layer,\n use_global_vector=use_global_vector,\n use_global_self_attn=use_global_self_attn,\n separate_global_qkv=separate_global_qkv,\n global_dim_ratio=global_dim_ratio,\n checkpoint_level=checkpoint_level,\n use_relative_pos=use_relative_pos,\n use_final_proj=use_final_proj,\n attn_linear_init_mode=attn_linear_init_mode,\n ffn_linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,)\n for ele_cuboid_size, ele_shift_size, ele_strategy\n in zip(block_cuboid_size, block_shift_size, block_strategy)])\n\n def reset_parameters(self):\n for m in self.ffn_l:\n m.reset_parameters()\n if self.use_global_vector_ffn and self.use_global_vector:\n for m in self.global_ffn_l:\n m.reset_parameters()\n for m in self.attn_l:\n m.reset_parameters()\n\n def forward(self, x, global_vectors=None):\n if self.use_inter_ffn:\n if self.use_global_vector:\n for idx, (attn, ffn) in enumerate(zip(self.attn_l, self.ffn_l)):\n if self.checkpoint_level >= 2 and self.training:\n x_out, global_vectors_out = checkpoint.checkpoint(attn, x, global_vectors)\n else:\n x_out, global_vectors_out = attn(x, global_vectors)\n x = x + x_out\n global_vectors = global_vectors + global_vectors_out\n\n if self.checkpoint_level >= 1 and self.training:\n x = checkpoint.checkpoint(ffn, x)\n if self.use_global_vector_ffn:\n global_vectors = checkpoint.checkpoint(self.global_ffn_l[idx], global_vectors)\n else:\n x = ffn(x)\n if self.use_global_vector_ffn:\n global_vectors = self.global_ffn_l[idx](global_vectors)\n return x, global_vectors\n else:\n for idx, (attn, ffn) in enumerate(zip(self.attn_l, self.ffn_l)):\n if self.checkpoint_level >= 2 and self.training:\n x = x + checkpoint.checkpoint(attn, x)\n else:\n x = x + attn(x)\n if self.checkpoint_level >= 1 and self.training:\n x = checkpoint.checkpoint(ffn, x)\n else:\n x = ffn(x)\n return x\n else:\n if self.use_global_vector:\n for idx, attn in enumerate(self.attn_l):\n if self.checkpoint_level >= 2 and self.training:\n x_out, global_vectors_out = checkpoint.checkpoint(attn, x, global_vectors)\n else:\n x_out, global_vectors_out = attn(x, global_vectors)\n x = x + x_out\n global_vectors = global_vectors + global_vectors_out\n if self.checkpoint_level >= 1 and self.training:\n x = checkpoint.checkpoint(self.ffn_l[0], x)\n if self.use_global_vector_ffn:\n global_vectors = checkpoint.checkpoint(self.global_ffn_l[0], global_vectors)\n else:\n x = self.ffn_l[0](x)\n if self.use_global_vector_ffn:\n global_vectors = self.global_ffn_l[0](global_vectors)\n return x, global_vectors\n else:\n for idx, attn in enumerate(self.attn_l):\n if self.checkpoint_level >= 2 and self.training:\n out = checkpoint.checkpoint(attn, x)\n else:\n out = attn(x)\n x = x + out\n if self.checkpoint_level >= 1 and self.training:\n x = checkpoint.checkpoint(self.ffn_l[0], x)\n else:\n x = self.ffn_l[0](x)\n return x" }, { "identifier": "StackCuboidCrossAttentionBlock", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class StackCuboidCrossAttentionBlock(nn.Module):\n \"\"\"A stack of cuboid cross attention layers.\n\n The advantage of cuboid attention is that we can combine cuboid attention building blocks with different\n hyper-parameters to mimic a broad range of space-time correlation patterns.\n\n - \"use_inter_ffn\" is True\n x, mem --> attn1 -----+-------> ffn1 ---+---> attn2 --> ... --> ffn_k --> out\n | ^ | ^\n | | | |\n |-------------|----|-------------|\n - \"use_inter_ffn\" is False\n x, mem --> attn1 -----+------> attn2 --> ... attnk --+----> ffnk ---+---> out, mem\n | ^ | ^ ^ | ^\n | | | | | | |\n |-------------|----|------------|-- ----------|--|-----------|\n \"\"\"\n def __init__(self,\n dim,\n num_heads,\n block_cuboid_hw=[(4, 4), (4, 4)],\n block_shift_hw=[(0, 0), (2, 2)],\n block_n_temporal=[1, 2],\n block_strategy=[('d', 'd', 'd'),\n ('l', 'l', 'l')],\n padding_type='ignore',\n cross_last_n_frames=None,\n qkv_bias=False,\n qk_scale=None,\n attn_drop=0.0,\n proj_drop=0.0,\n ffn_drop=0.0,\n activation='leaky',\n gated_ffn=False,\n norm_layer='layer_norm',\n use_inter_ffn=True,\n max_temporal_relative=50,\n checkpoint_level=1,\n use_relative_pos=True,\n # global vectors\n use_global_vector=False,\n separate_global_qkv=False,\n global_dim_ratio=1,\n # initialization\n attn_linear_init_mode=\"0\",\n ffn_linear_init_mode=\"0\",\n norm_init_mode=\"0\",\n ):\n super(StackCuboidCrossAttentionBlock, self).__init__()\n # initialization\n self.attn_linear_init_mode = attn_linear_init_mode\n self.ffn_linear_init_mode = ffn_linear_init_mode\n self.norm_init_mode = norm_init_mode\n\n assert len(block_cuboid_hw[0]) > 0 and len(block_shift_hw) > 0 and len(block_strategy) > 0,\\\n f'Incorrect format.' \\\n f' block_cuboid_hw={block_cuboid_hw}, block_shift_hw={block_shift_hw}, block_strategy={block_strategy}'\n assert len(block_cuboid_hw) == len(block_shift_hw) == len(block_strategy)\n self.num_attn = len(block_cuboid_hw)\n self.checkpoint_level = checkpoint_level\n self.use_inter_ffn = use_inter_ffn\n self.use_global_vector = use_global_vector\n if self.use_inter_ffn:\n self.ffn_l = nn.ModuleList(\n [PositionwiseFFN(\n units=dim,\n hidden_size=4 * dim,\n activation_dropout=ffn_drop,\n dropout=ffn_drop,\n gated_proj=gated_ffn,\n activation=activation,\n normalization=norm_layer,\n pre_norm=True,\n linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,)\n for _ in range(self.num_attn)])\n else:\n self.ffn_l = nn.ModuleList(\n [PositionwiseFFN(\n units=dim,\n hidden_size=4 * dim,\n activation_dropout=ffn_drop,\n dropout=ffn_drop,\n gated_proj=gated_ffn,\n activation=activation,\n normalization=norm_layer,\n pre_norm=True,\n linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,)])\n self.attn_l = nn.ModuleList(\n [CuboidCrossAttentionLayer(\n dim=dim,\n num_heads=num_heads,\n cuboid_hw=ele_cuboid_hw,\n shift_hw=ele_shift_hw,\n strategy=ele_strategy,\n n_temporal=ele_n_temporal,\n cross_last_n_frames=cross_last_n_frames,\n padding_type=padding_type,\n qkv_bias=qkv_bias,\n qk_scale=qk_scale,\n attn_drop=attn_drop,\n proj_drop=proj_drop,\n norm_layer=norm_layer,\n max_temporal_relative=max_temporal_relative,\n use_global_vector=use_global_vector,\n separate_global_qkv=separate_global_qkv,\n global_dim_ratio=global_dim_ratio,\n checkpoint_level=checkpoint_level,\n use_relative_pos=use_relative_pos,\n attn_linear_init_mode=attn_linear_init_mode,\n ffn_linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,)\n for ele_cuboid_hw, ele_shift_hw, ele_strategy, ele_n_temporal\n in zip(block_cuboid_hw, block_shift_hw, block_strategy, block_n_temporal)])\n\n def reset_parameters(self):\n for m in self.ffn_l:\n m.reset_parameters()\n for m in self.attn_l:\n m.reset_parameters()\n\n def forward(self, x, mem, mem_global_vector=None):\n \"\"\"\n\n Parameters\n ----------\n x\n Shape (B, T_x, H, W, C)\n mem\n Shape (B, T_mem, H, W, C)\n mem_global_vector\n Shape (B, N_global, C)\n\n Returns\n -------\n out\n Shape (B, T_x, H, W, C_out)\n \"\"\"\n if self.use_inter_ffn:\n for attn, ffn in zip(self.attn_l, self.ffn_l):\n if self.checkpoint_level >= 2 and self.training:\n x = x + checkpoint.checkpoint(attn, x, mem, mem_global_vector)\n else:\n x = x + attn(x, mem, mem_global_vector)\n if self.checkpoint_level >= 1 and self.training:\n x = checkpoint.checkpoint(ffn, x)\n else:\n x = ffn(x)\n return x\n else:\n for attn in self.attn_l:\n if self.checkpoint_level >= 2 and self.training:\n x = x + checkpoint.checkpoint(attn, x, mem, mem_global_vector)\n else:\n x = x + attn(x, mem, mem_global_vector)\n if self.checkpoint_level >= 1 and self.training:\n x = checkpoint.checkpoint(self.ffn_l[0], x)\n else:\n x = self.ffn_l[0](x)\n return x" }, { "identifier": "CuboidTransformerEncoder", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class CuboidTransformerEncoder(nn.Module):\n \"\"\"Encoder of the CuboidTransformer\n\n x --> attn_block --> patch_merge --> attn_block --> patch_merge --> ... --> out\n\n \"\"\"\n def __init__(self,\n input_shape,\n base_units=128,\n block_units=None,\n scale_alpha=1.0,\n depth=[4, 4, 4],\n downsample=2,\n downsample_type='patch_merge',\n block_attn_patterns=None,\n block_cuboid_size=[(4, 4, 4),\n (4, 4, 4)],\n block_strategy=[('l', 'l', 'l'),\n ('d', 'd', 'd')],\n block_shift_size=[(0, 0, 0),\n (0, 0, 0)],\n num_heads=4,\n attn_drop=0.0,\n proj_drop=0.0,\n ffn_drop=0.0,\n activation=\"leaky\",\n ffn_activation='leaky',\n gated_ffn=False,\n norm_layer='layer_norm',\n use_inter_ffn=True,\n padding_type='ignore',\n checkpoint_level=True,\n use_relative_pos=True,\n self_attn_use_final_proj=True,\n # global vectors\n use_global_vector=False,\n use_global_vector_ffn=True,\n use_global_self_attn=False,\n separate_global_qkv=False,\n global_dim_ratio=1,\n # initialization\n attn_linear_init_mode=\"0\",\n ffn_linear_init_mode=\"0\",\n conv_init_mode=\"0\",\n down_linear_init_mode=\"0\",\n norm_init_mode=\"0\",\n ):\n \"\"\"\n\n Parameters\n ----------\n input_shape\n The shape of the input. Contains T, H, W, C\n initial_data_thw\n The shape of the first layer\n base_units\n The number of units\n scale_alpha\n We scale up the channels based on the formula:\n - round_to(base_units * max(downsample_scale) ** units_alpha, 4)\n depth\n The number of layers for each block\n downsample\n The downsample ratio\n downsample_type\n Type of the downsampling layer\n block_attn_patterns\n Attention pattern for the cuboid attention for each block.\n block_cuboid_size\n A list of cuboid size parameters\n block_strategy\n A list of cuboid strategies\n block_shift_size\n A list of shift sizes\n num_global\n The number of global vectors\n num_heads\n The number of heads.\n attn_drop\n proj_drop\n ffn_drop\n gated_ffn\n Whether to enable gated ffn or not\n norm_layer\n The normalization layer\n use_inter_ffn\n Whether to use intermediate FFN\n padding_type\n \"\"\"\n super(CuboidTransformerEncoder, self).__init__()\n # initialization mode\n self.attn_linear_init_mode = attn_linear_init_mode\n self.ffn_linear_init_mode = ffn_linear_init_mode\n self.conv_init_mode = conv_init_mode\n self.down_linear_init_mode = down_linear_init_mode\n self.norm_init_mode = norm_init_mode\n\n self.input_shape = input_shape\n self.depth = depth\n self.num_blocks = len(depth)\n self.base_units = base_units\n self.scale_alpha = scale_alpha\n if not isinstance(downsample, (tuple, list)):\n downsample = (1, downsample, downsample)\n self.downsample = downsample\n self.downsample_type = downsample_type\n self.num_heads = num_heads\n self.use_global_vector = use_global_vector\n self.checkpoint_level = checkpoint_level\n if block_units is None:\n block_units = [round_to(base_units * int((max(downsample) ** scale_alpha) ** i), 4)\n for i in range(self.num_blocks)]\n else:\n assert len(block_units) == self.num_blocks and block_units[0] == base_units\n self.block_units = block_units\n\n if self.num_blocks > 1:\n if downsample_type == 'patch_merge':\n self.down_layers = nn.ModuleList(\n [PatchMerging3D(dim=self.block_units[i],\n downsample=downsample,\n # downsample=(1, 1, 1),\n padding_type=padding_type,\n out_dim=self.block_units[i + 1],\n linear_init_mode=down_linear_init_mode,\n norm_init_mode=norm_init_mode)\n for i in range(self.num_blocks - 1)])\n else:\n raise NotImplementedError\n if self.use_global_vector:\n self.down_layer_global_proj = nn.ModuleList(\n [nn.Linear(in_features=global_dim_ratio*self.block_units[i],\n out_features=global_dim_ratio*self.block_units[i + 1])\n for i in range(self.num_blocks - 1)])\n\n if block_attn_patterns is not None:\n mem_shapes = self.get_mem_shapes()\n if isinstance(block_attn_patterns, (tuple, list)):\n assert len(block_attn_patterns) == self.num_blocks\n else:\n block_attn_patterns = [block_attn_patterns for _ in range(self.num_blocks)]\n block_cuboid_size = []\n block_strategy = []\n block_shift_size = []\n for idx, key in enumerate(block_attn_patterns):\n func = CuboidSelfAttentionPatterns.get(key)\n cuboid_size, strategy, shift_size = func(mem_shapes[idx])\n block_cuboid_size.append(cuboid_size)\n block_strategy.append(strategy)\n block_shift_size.append(shift_size)\n else:\n if not isinstance(block_cuboid_size[0][0], (list, tuple)):\n block_cuboid_size = [block_cuboid_size for _ in range(self.num_blocks)]\n else:\n assert len(block_cuboid_size) == self.num_blocks,\\\n f'Incorrect input format! Received block_cuboid_size={block_cuboid_size}'\n\n if not isinstance(block_strategy[0][0], (list, tuple)):\n block_strategy = [block_strategy for _ in range(self.num_blocks)]\n else:\n assert len(block_strategy) == self.num_blocks,\\\n f'Incorrect input format! Received block_strategy={block_strategy}'\n\n if not isinstance(block_shift_size[0][0], (list, tuple)):\n block_shift_size = [block_shift_size for _ in range(self.num_blocks)]\n else:\n assert len(block_shift_size) == self.num_blocks,\\\n f'Incorrect input format! Received block_shift_size={block_shift_size}'\n self.block_cuboid_size = block_cuboid_size\n self.block_strategy = block_strategy\n self.block_shift_size = block_shift_size\n\n self.blocks = nn.ModuleList([nn.Sequential(\n *[StackCuboidSelfAttentionBlock(\n dim=self.block_units[i],\n num_heads=num_heads,\n block_cuboid_size=block_cuboid_size[i],\n block_strategy=block_strategy[i],\n block_shift_size=block_shift_size[i],\n attn_drop=attn_drop,\n proj_drop=proj_drop,\n ffn_drop=ffn_drop,\n activation=ffn_activation,\n gated_ffn=gated_ffn,\n norm_layer=norm_layer,\n use_inter_ffn=use_inter_ffn,\n padding_type=padding_type,\n use_global_vector=use_global_vector,\n use_global_vector_ffn=use_global_vector_ffn,\n use_global_self_attn=use_global_self_attn,\n separate_global_qkv=separate_global_qkv,\n global_dim_ratio=global_dim_ratio,\n checkpoint_level=checkpoint_level,\n use_relative_pos=use_relative_pos,\n use_final_proj=self_attn_use_final_proj,\n # initialization\n attn_linear_init_mode=attn_linear_init_mode,\n ffn_linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,\n ) for _ in range(depth[i])])\n for i in range(self.num_blocks)])\n self.reset_parameters()\n\n def reset_parameters(self):\n if self.num_blocks > 1:\n for m in self.down_layers:\n m.reset_parameters()\n if self.use_global_vector:\n apply_initialization(self.down_layer_global_proj,\n linear_mode=self.down_linear_init_mode)\n for ms in self.blocks:\n for m in ms:\n m.reset_parameters()\n\n def get_mem_shapes(self):\n \"\"\"Get the shape of the output memory based on the input shape. This can be used for constructing the decoder.\n\n Returns\n -------\n mem_shapes\n A list of shapes of the output memory\n \"\"\"\n\n if self.num_blocks == 1:\n return [self.input_shape]\n else:\n mem_shapes = [self.input_shape]\n curr_shape = self.input_shape\n for down_layer in self.down_layers:\n curr_shape = down_layer.get_out_shape(curr_shape)\n mem_shapes.append(curr_shape)\n return mem_shapes\n\n def forward(self, x, global_vectors=None):\n \"\"\"\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C)\n\n Returns\n -------\n out\n A list of tensors from the bottom layer to the top layer of the encoder. For example, it can have shape\n - (B, T, H, W, C1)\n - (B, T, H // 2, W // 2, 2 * C1)\n - (B, T, H // 4, W // 4, 4 * C1)\n ...\n global_mem_out\n Optional\n \"\"\"\n B, T, H, W, C_in = x.shape\n assert (T, H, W, C_in) == self.input_shape \n\n if self.use_global_vector:\n out = []\n global_mem_out = []\n for i in range(self.num_blocks):\n for l in self.blocks[i]:\n x, global_vectors = l(x, global_vectors)\n out.append(x)\n global_mem_out.append(global_vectors)\n if self.num_blocks > 1 and i < self.num_blocks - 1:\n x = self.down_layers[i](x)\n global_vectors = self.down_layer_global_proj[i](global_vectors)\n return out, global_mem_out\n else:\n out = []\n for i in range(self.num_blocks):\n x = self.blocks[i](x)\n out.append(x)\n if self.num_blocks > 1 and i < self.num_blocks - 1:\n x = self.down_layers[i](x)\n return out" }, { "identifier": "CuboidSelfAttentionPatterns", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer_patterns.py", "snippet": "def full_attention(input_shape):\ndef self_axial(input_shape):\ndef self_video_swin(input_shape, P=2, M=4):\ndef self_divided_space_time(input_shape):\ndef self_spatial_lg_v1(input_shape, M=4):\ndef self_axial_space_dilate_K(input_shape, K=2):\ndef cross_KxK(mem_shape, K):\ndef cross_KxK_lg(mem_shape, K):\ndef cross_KxK_heter(mem_shape, K):\n T, H, W, _ = input_shape\n T, H, W, _ = input_shape\n T, H, W, _ = input_shape\n P = min(P, T)\n M = min(M, H, W)\n T, H, W, _ = input_shape\n T, H, W, _ = input_shape\n T, H, W, _ = input_shape\n K = min(K, H, W)\n K = min(K, H, W)\n K = min(K, H, W)\n K = min(K, H, W)" }, { "identifier": "get_activation", "path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py", "snippet": "def get_activation(act, inplace=False, **kwargs):\n \"\"\"\n\n Parameters\n ----------\n act\n Name of the activation\n inplace\n Whether to perform inplace activation\n\n Returns\n -------\n activation_layer\n The activation\n \"\"\"\n if act is None:\n return lambda x: x\n if isinstance(act, str):\n if act == 'leaky':\n negative_slope = kwargs.get(\"negative_slope\", 0.1)\n return nn.LeakyReLU(negative_slope, inplace=inplace)\n elif act == 'identity':\n return nn.Identity()\n elif act == 'elu':\n return nn.ELU(inplace=inplace)\n elif act == 'gelu':\n return nn.GELU()\n elif act == 'relu':\n return nn.ReLU()\n elif act == 'sigmoid':\n return nn.Sigmoid()\n elif act == 'tanh':\n return nn.Tanh()\n elif act == 'softrelu' or act == 'softplus':\n return nn.Softplus()\n elif act == 'softsign':\n return nn.Softsign()\n else:\n raise NotImplementedError('act=\"{}\" is not supported. '\n 'Try to include it if you can find that in '\n 'https://pytorch.org/docs/stable/nn.html'.format(act))\n else:\n return act" }, { "identifier": "get_norm_layer", "path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py", "snippet": "def get_norm_layer(normalization: str = 'layer_norm',\n axis: int = -1,\n epsilon: float = 1e-5,\n in_channels: int = 0, **kwargs):\n \"\"\"Get the normalization layer based on the provided type\n\n Parameters\n ----------\n normalization\n The type of the layer normalization from ['layer_norm']\n axis\n The axis to normalize the\n epsilon\n The epsilon of the normalization layer\n in_channels\n Input channel\n\n Returns\n -------\n norm_layer\n The layer normalization layer\n \"\"\"\n if isinstance(normalization, str):\n if normalization == 'layer_norm':\n assert in_channels > 0\n assert axis == -1\n norm_layer = nn.LayerNorm(normalized_shape=in_channels, eps=epsilon, **kwargs)\n elif normalization == 'rms_norm':\n assert axis == -1\n norm_layer = RMSNorm(d=in_channels, eps=epsilon, **kwargs)\n else:\n raise NotImplementedError('normalization={} is not supported'.format(normalization))\n return norm_layer\n elif normalization is None:\n return nn.Identity()\n else:\n raise NotImplementedError('The type of normalization must be str')" }, { "identifier": "_generalize_padding", "path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py", "snippet": "def _generalize_padding(x, pad_t, pad_h, pad_w, padding_type, t_pad_left=False):\n \"\"\"\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C)\n pad_t\n pad_h\n pad_w\n padding_type\n t_pad_left\n\n Returns\n -------\n out\n The result after padding the x. Shape will be (B, T + pad_t, H + pad_h, W + pad_w, C)\n \"\"\"\n if pad_t == 0 and pad_h == 0 and pad_w == 0:\n return x\n\n assert padding_type in ['zeros', 'ignore', 'nearest']\n B, T, H, W, C = x.shape\n\n if padding_type == 'nearest':\n return F.interpolate(x.permute(0, 4, 1, 2, 3), size=(T + pad_t, H + pad_h, W + pad_w)).permute(0, 2, 3, 4, 1)\n else:\n if t_pad_left:\n return F.pad(x, (0, 0, 0, pad_w, 0, pad_h, pad_t, 0))\n else:\n return F.pad(x, (0, 0, 0, pad_w, 0, pad_h, 0, pad_t))" }, { "identifier": "_generalize_unpadding", "path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py", "snippet": "def _generalize_unpadding(x, pad_t, pad_h, pad_w, padding_type):\n assert padding_type in['zeros', 'ignore', 'nearest']\n B, T, H, W, C = x.shape\n if pad_t == 0 and pad_h == 0 and pad_w == 0:\n return x\n\n if padding_type == 'nearest':\n return F.interpolate(x.permute(0, 4, 1, 2, 3), size=(T - pad_t, H - pad_h, W - pad_w)).permute(0, 2, 3, 4, 1)\n else:\n return x[:, :(T - pad_t), :(H - pad_h), :(W - pad_w), :].contiguous()" }, { "identifier": "apply_initialization", "path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py", "snippet": "def apply_initialization(m,\n linear_mode=\"0\",\n conv_mode=\"0\",\n norm_mode=\"0\",\n embed_mode=\"0\"):\n if isinstance(m, nn.Linear):\n\n if linear_mode in (\"0\", ):\n nn.init.kaiming_normal_(m.weight,\n mode='fan_in', nonlinearity=\"linear\")\n elif linear_mode in (\"1\", ):\n nn.init.kaiming_normal_(m.weight,\n a=0.1,\n mode='fan_out',\n nonlinearity=\"leaky_relu\")\n else:\n raise NotImplementedError\n if hasattr(m, 'bias') and m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, (nn.Conv2d, nn.Conv3d, nn.ConvTranspose2d, nn.ConvTranspose3d)):\n if conv_mode in (\"0\", ):\n nn.init.kaiming_normal_(m.weight,\n a=0.1,\n mode='fan_out',\n nonlinearity=\"leaky_relu\")\n else:\n raise NotImplementedError\n if hasattr(m, 'bias') and m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.LayerNorm):\n if norm_mode in (\"0\", ):\n if m.elementwise_affine:\n nn.init.ones_(m.weight)\n nn.init.zeros_(m.bias)\n else:\n raise NotImplementedError\n elif isinstance(m, nn.GroupNorm):\n if norm_mode in (\"0\", ):\n if m.affine:\n nn.init.ones_(m.weight)\n nn.init.zeros_(m.bias)\n else:\n raise NotImplementedError\n # # pos_embed already initialized when created\n elif isinstance(m, nn.Embedding):\n if embed_mode in (\"0\", ):\n nn.init.trunc_normal_(m.weight.data, std=0.02)\n else:\n raise NotImplementedError\n else:\n pass" }, { "identifier": "round_to", "path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py", "snippet": "def round_to(dat, c):\n return dat + (dat - dat % c) % c" } ]
from typing import Sequence, Union from functools import lru_cache from collections import OrderedDict from torch import nn from einops import rearrange from .cuboid_transformer import ( Upsample3DLayer, PatchMerging3D, PosEmbed, InitialEncoder, FinalDecoder, InitialStackPatchMergingEncoder, FinalStackUpsamplingDecoder, StackCuboidSelfAttentionBlock, StackCuboidCrossAttentionBlock, CuboidTransformerEncoder) from .cuboid_transformer_patterns import CuboidSelfAttentionPatterns, CuboidCrossAttentionPatterns from .utils import ( get_activation, get_norm_layer, _generalize_padding, _generalize_unpadding, apply_initialization, round_to) import warnings import torch import torch.nn.functional as F import torch.utils.checkpoint as checkpoint
17,519
downsample_type=downsample_type, block_attn_patterns=enc_attn_patterns, block_cuboid_size=enc_cuboid_size, block_strategy=enc_cuboid_strategy, block_shift_size=enc_shift_size, num_heads=num_heads, attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, gated_ffn=gated_ffn, ffn_activation=ffn_activation, norm_layer=norm_layer, use_inter_ffn=enc_use_inter_ffn, padding_type=padding_type, use_global_vector=num_global_vectors > 0, use_global_vector_ffn=use_global_vector_ffn, use_global_self_attn=use_global_self_attn, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, self_attn_use_final_proj=self_attn_use_final_proj, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, conv_init_mode=conv_init_mode, down_linear_init_mode=down_up_linear_init_mode, norm_init_mode=norm_init_mode, ) self.enc_pos_embed = PosEmbed( embed_dim=base_units, typ=pos_embed_type, maxH=H_in, maxW=W_in, maxT=T_in) mem_shapes = self.encoder.get_mem_shapes() self.dec_pos_embed = PosEmbed( embed_dim=mem_shapes[-1][-1], typ=pos_embed_type, maxT=T_out, maxH=mem_shapes[-1][1], maxW=mem_shapes[-1][2]) self.unet_dec_cross_mode = unet_dec_cross_mode self.decoder = CuboidTransformerUNetDecoder( target_temporal_length=T_out, mem_shapes=mem_shapes, cross_start=dec_cross_start, depth=dec_depth, upsample_type=upsample_type, block_self_attn_patterns=dec_self_attn_patterns, block_self_cuboid_size=dec_self_cuboid_size, block_self_shift_size=dec_self_shift_size, block_self_cuboid_strategy=dec_self_cuboid_strategy, block_cross_attn_patterns=dec_cross_attn_patterns, block_cross_cuboid_hw=dec_cross_cuboid_hw, block_cross_shift_hw=dec_cross_shift_hw, block_cross_cuboid_strategy=dec_cross_cuboid_strategy, block_cross_n_temporal=dec_cross_n_temporal, cross_last_n_frames=dec_cross_last_n_frames, num_heads=num_heads, attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, upsample_kernel_size=upsample_kernel_size, ffn_activation=ffn_activation, gated_ffn=gated_ffn, norm_layer=norm_layer, use_inter_ffn=dec_use_inter_ffn, max_temporal_relative=T_in + T_out, padding_type=padding_type, hierarchical_pos_embed=dec_hierarchical_pos_embed, pos_embed_type=pos_embed_type, use_self_global=(num_global_vectors > 0) and use_dec_self_global, self_update_global=dec_self_update_global, use_cross_global=(num_global_vectors > 0) and use_dec_cross_global, use_global_vector_ffn=use_global_vector_ffn, use_global_self_attn=use_global_self_attn, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, self_attn_use_final_proj=self_attn_use_final_proj, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, conv_init_mode=conv_init_mode, up_linear_init_mode=down_up_linear_init_mode, norm_init_mode=norm_init_mode, # different from CuboidTransformerDecoder downsample=downsample, downsample_type=downsample_type, cross_mode=unet_dec_cross_mode, down_linear_init_mode=down_up_linear_init_mode, ) self.reset_parameters() def get_initial_encoder_final_decoder( self, initial_downsample_type, activation, # initial_downsample_type=="conv" initial_downsample_scale, initial_downsample_conv_layers, final_upsample_conv_layers, padding_type, # initial_downsample_type == "stack_conv" initial_downsample_stack_conv_num_layers, initial_downsample_stack_conv_dim_list, initial_downsample_stack_conv_downscale_list, initial_downsample_stack_conv_num_conv_list, ): T_in, H_in, W_in, C_in = self.input_shape T_out, H_out, W_out, C_out = self.target_shape # Construct the initial upsampling / downsampling layers self.initial_downsample_type = initial_downsample_type if self.initial_downsample_type == "conv": if isinstance(initial_downsample_scale, int): initial_downsample_scale = (1, initial_downsample_scale, initial_downsample_scale) elif len(initial_downsample_scale) == 2: initial_downsample_scale = (1, *initial_downsample_scale) elif len(initial_downsample_scale) == 3: initial_downsample_scale = tuple(initial_downsample_scale) else: raise NotImplementedError(f"initial_downsample_scale {initial_downsample_scale} format not supported!") # if any(ele > 1 for ele in initial_downsample_scale):
"""CuboidTransformer adapted for auxiliary inputs in decoder""" class CuboidTransformerUNetDecoder(nn.Module): """U-Net style Decoder of the CuboidTransformer. For each block, we first apply the StackCuboidSelfAttention and then apply the StackCuboidCrossAttention We add cross attention following 3 modes: cross_mode == "down": x --> attn --> cross_attn --> downscale --> ... --> z --> attn --> upscale --> ... --> out ^ ^ | | | | mem mem cross_mode == "up": x --> attn --> downscale --> ... --> z --> attn --> cross_attn --> upscale --> ... --> out ^ ^ | | | | mem mem cross_mode == "both": x --> attn --> cross_attn --> downscale --> ... --> z --> attn --> cross_attn --> upscale --> ... --> out ^ ^ ^ ^ | | | | | | | | mem mem mem mem """ def __init__(self, target_temporal_length, mem_shapes, cross_start=0, depth=[2, 2], upsample_type="upsample", upsample_kernel_size=3, block_self_attn_patterns=None, block_self_cuboid_size=[(4, 4, 4), (4, 4, 4)], block_self_cuboid_strategy=[('l', 'l', 'l'), ('d', 'd', 'd')], block_self_shift_size=[(1, 1, 1), (0, 0, 0)], block_cross_attn_patterns=None, block_cross_cuboid_hw=[(4, 4), (4, 4)], block_cross_cuboid_strategy=[('l', 'l', 'l'), ('d', 'l', 'l')], block_cross_shift_hw=[(0, 0), (0, 0)], block_cross_n_temporal=[1, 2], cross_last_n_frames=None, num_heads=4, attn_drop=0.0, proj_drop=0.0, ffn_drop=0.0, ffn_activation='leaky', gated_ffn=False, norm_layer='layer_norm', use_inter_ffn=False, hierarchical_pos_embed=False, pos_embed_type='t+hw', max_temporal_relative=50, padding_type='ignore', checkpoint_level=True, use_relative_pos=True, self_attn_use_final_proj=True, # global vectors use_self_global=False, self_update_global=True, use_cross_global=False, use_global_vector_ffn=True, use_global_self_attn=False, separate_global_qkv=False, global_dim_ratio=1, # initialization attn_linear_init_mode="0", ffn_linear_init_mode="0", conv_init_mode="0", up_linear_init_mode="0", norm_init_mode="0", # different from `CuboidTransformerDecoder`, no arg `use_first_self_attn=False` downsample=2, downsample_type='patch_merge', cross_mode="up", down_linear_init_mode="0", ): """ Parameters ---------- target_temporal_length mem_shapes cross_start The block to start cross attention depth Depth of each block downsample The downsample ratio downsample_type Type of the downsampling layer upsample_type The type of the upsampling layers upsample_kernel_size block_self_attn_patterns Pattern of the block self attentions block_self_cuboid_size block_self_cuboid_strategy block_self_shift_size block_cross_attn_patterns block_cross_cuboid_hw block_cross_cuboid_strategy block_cross_shift_hw block_cross_n_temporal cross_last_n_frames cross_mode Must be one of ("up", "down", "both") Control whether the upsampling/downsampling/both phases cross attend to the encoded latent features num_heads attn_drop proj_drop ffn_drop ffn_activation gated_ffn Whether to enable gated ffn or not norm_layer The normalization layer use_inter_ffn Whether to use intermediate FFN hierarchical_pos_embed Whether to add pos embedding for each hierarchy. max_temporal_relative padding_type checkpoint_level """ super(CuboidTransformerUNetDecoder, self).__init__() # initialization mode self.attn_linear_init_mode = attn_linear_init_mode self.ffn_linear_init_mode = ffn_linear_init_mode self.conv_init_mode = conv_init_mode self.up_linear_init_mode = up_linear_init_mode self.norm_init_mode = norm_init_mode assert len(depth) == len(mem_shapes) self.target_temporal_length = target_temporal_length self.num_blocks = len(mem_shapes) self.cross_start = cross_start self.mem_shapes = mem_shapes self.block_units = tuple(mem_shape[-1] for mem_shape in self.mem_shapes) self.depth = depth if not isinstance(downsample, (tuple, list)): downsample = (1, downsample, downsample) self.downsample = downsample self.downsample_type = downsample_type self.upsample_type = upsample_type self.hierarchical_pos_embed = hierarchical_pos_embed self.checkpoint_level = checkpoint_level self.use_self_global = use_self_global self.self_update_global = self_update_global self.use_cross_global = use_cross_global self.use_global_vector_ffn = use_global_vector_ffn assert cross_mode in ["up", "down", "both"], f"Invalid cross_mode {cross_mode}!" self.cross_mode = cross_mode self.up_use_cross = self.cross_mode in ["up", "both"] self.down_use_cross = self.cross_mode in ["down", "both"] if self.num_blocks > 1: # Construct downsampling layers if downsample_type == 'patch_merge': self.downsample_layers = nn.ModuleList( [PatchMerging3D(dim=self.block_units[i], downsample=downsample, # downsample=(1, 1, 1), padding_type=padding_type, out_dim=self.block_units[i + 1], linear_init_mode=down_linear_init_mode, norm_init_mode=norm_init_mode) for i in range(self.num_blocks - 1)]) else: raise NotImplementedError # Construct upsampling layers if self.upsample_type == "upsample": self.upsample_layers = nn.ModuleList([ Upsample3DLayer( dim=self.mem_shapes[i + 1][-1], out_dim=self.mem_shapes[i][-1], target_size=(target_temporal_length,) + self.mem_shapes[i][1:3], kernel_size=upsample_kernel_size, temporal_upsample=False, conv_init_mode=conv_init_mode, ) for i in range(self.num_blocks - 1)]) else: raise NotImplementedError if self.hierarchical_pos_embed: self.down_hierarchical_pos_embed_l = nn.ModuleList([ PosEmbed(embed_dim=self.block_units[i], typ=pos_embed_type, maxT=self.mem_shapes[i][0], maxH=self.mem_shapes[i][1], maxW=self.mem_shapes[i][2]) for i in range(self.num_blocks - 1)]) self.up_hierarchical_pos_embed_l = nn.ModuleList([ PosEmbed(embed_dim=self.block_units[i], typ=pos_embed_type, maxT=self.mem_shapes[i][0], maxH=self.mem_shapes[i][1], maxW=self.mem_shapes[i][2]) for i in range(self.num_blocks - 1)]) if block_self_attn_patterns is not None: if isinstance(block_self_attn_patterns, (tuple, list)): assert len(block_self_attn_patterns) == self.num_blocks else: block_self_attn_patterns = [block_self_attn_patterns for _ in range(self.num_blocks)] block_self_cuboid_size = [] block_self_cuboid_strategy = [] block_self_shift_size = [] for idx, key in enumerate(block_self_attn_patterns): func = CuboidSelfAttentionPatterns.get(key) cuboid_size, strategy, shift_size = func(mem_shapes[idx]) block_self_cuboid_size.append(cuboid_size) block_self_cuboid_strategy.append(strategy) block_self_shift_size.append(shift_size) else: if not isinstance(block_self_cuboid_size[0][0], (list, tuple)): block_self_cuboid_size = [block_self_cuboid_size for _ in range(self.num_blocks)] else: assert len(block_self_cuboid_size) == self.num_blocks,\ f'Incorrect input format! Received block_self_cuboid_size={block_self_cuboid_size}' if not isinstance(block_self_cuboid_strategy[0][0], (list, tuple)): block_self_cuboid_strategy = [block_self_cuboid_strategy for _ in range(self.num_blocks)] else: assert len(block_self_cuboid_strategy) == self.num_blocks,\ f'Incorrect input format! Received block_self_cuboid_strategy={block_self_cuboid_strategy}' if not isinstance(block_self_shift_size[0][0], (list, tuple)): block_self_shift_size = [block_self_shift_size for _ in range(self.num_blocks)] else: assert len(block_self_shift_size) == self.num_blocks,\ f'Incorrect input format! Received block_self_shift_size={block_self_shift_size}' down_self_blocks = [] up_self_blocks = [] for i in range(self.num_blocks): ele_depth = depth[i] stack_cuboid_blocks =\ [StackCuboidSelfAttentionBlock( dim=self.mem_shapes[i][-1], num_heads=num_heads, block_cuboid_size=block_self_cuboid_size[i], block_strategy=block_self_cuboid_strategy[i], block_shift_size=block_self_shift_size[i], attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, activation=ffn_activation, gated_ffn=gated_ffn, norm_layer=norm_layer, use_inter_ffn=use_inter_ffn, padding_type=padding_type, use_global_vector=use_self_global, use_global_vector_ffn=use_global_vector_ffn, use_global_self_attn=use_global_self_attn, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, use_final_proj=self_attn_use_final_proj, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, norm_init_mode=norm_init_mode, ) for _ in range(ele_depth)] down_self_blocks.append(nn.ModuleList(stack_cuboid_blocks)) stack_cuboid_blocks = \ [StackCuboidSelfAttentionBlock( dim=self.mem_shapes[i][-1], num_heads=num_heads, block_cuboid_size=block_self_cuboid_size[i], block_strategy=block_self_cuboid_strategy[i], block_shift_size=block_self_shift_size[i], attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, activation=ffn_activation, gated_ffn=gated_ffn, norm_layer=norm_layer, use_inter_ffn=use_inter_ffn, padding_type=padding_type, use_global_vector=use_self_global, use_global_vector_ffn=use_global_vector_ffn, use_global_self_attn=use_global_self_attn, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, use_final_proj=self_attn_use_final_proj, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, norm_init_mode=norm_init_mode, ) for _ in range(ele_depth)] up_self_blocks.append(nn.ModuleList(stack_cuboid_blocks)) self.down_self_blocks = nn.ModuleList(down_self_blocks) self.up_self_blocks = nn.ModuleList(up_self_blocks) if block_cross_attn_patterns is not None: if isinstance(block_cross_attn_patterns, (tuple, list)): assert len(block_cross_attn_patterns) == self.num_blocks else: block_cross_attn_patterns = [block_cross_attn_patterns for _ in range(self.num_blocks)] block_cross_cuboid_hw = [] block_cross_cuboid_strategy = [] block_cross_shift_hw = [] block_cross_n_temporal = [] for idx, key in enumerate(block_cross_attn_patterns): if key == "last_frame_dst": cuboid_hw = None shift_hw = None strategy = None n_temporal = None else: func = CuboidCrossAttentionPatterns.get(key) cuboid_hw, shift_hw, strategy, n_temporal = func(mem_shapes[idx]) block_cross_cuboid_hw.append(cuboid_hw) block_cross_cuboid_strategy.append(strategy) block_cross_shift_hw.append(shift_hw) block_cross_n_temporal.append(n_temporal) else: if not isinstance(block_cross_cuboid_hw[0][0], (list, tuple)): block_cross_cuboid_hw = [block_cross_cuboid_hw for _ in range(self.num_blocks)] else: assert len(block_cross_cuboid_hw) == self.num_blocks, \ f'Incorrect input format! Received block_cross_cuboid_hw={block_cross_cuboid_hw}' if not isinstance(block_cross_cuboid_strategy[0][0], (list, tuple)): block_cross_cuboid_strategy = [block_cross_cuboid_strategy for _ in range(self.num_blocks)] else: assert len(block_cross_cuboid_strategy) == self.num_blocks, \ f'Incorrect input format! Received block_cross_cuboid_strategy={block_cross_cuboid_strategy}' if not isinstance(block_cross_shift_hw[0][0], (list, tuple)): block_cross_shift_hw = [block_cross_shift_hw for _ in range(self.num_blocks)] else: assert len(block_cross_shift_hw) == self.num_blocks, \ f'Incorrect input format! Received block_cross_shift_hw={block_cross_shift_hw}' if not isinstance(block_cross_n_temporal[0], (list, tuple)): block_cross_n_temporal = [block_cross_n_temporal for _ in range(self.num_blocks)] else: assert len(block_cross_n_temporal) == self.num_blocks, \ f'Incorrect input format! Received block_cross_n_temporal={block_cross_n_temporal}' if self.up_use_cross: self.up_cross_blocks = nn.ModuleList() for i in range(self.cross_start, self.num_blocks): cross_block = nn.ModuleList( [StackCuboidCrossAttentionBlock( dim=self.mem_shapes[i][-1], num_heads=num_heads, block_cuboid_hw=block_cross_cuboid_hw[i], block_strategy=block_cross_cuboid_strategy[i], block_shift_hw=block_cross_shift_hw[i], block_n_temporal=block_cross_n_temporal[i], cross_last_n_frames=cross_last_n_frames, attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, gated_ffn=gated_ffn, norm_layer=norm_layer, use_inter_ffn=use_inter_ffn, activation=ffn_activation, max_temporal_relative=max_temporal_relative, padding_type=padding_type, use_global_vector=use_cross_global, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, norm_init_mode=norm_init_mode, ) for _ in range(depth[i])]) self.up_cross_blocks.append(cross_block) if self.down_use_cross: self.down_cross_blocks = nn.ModuleList() for i in range(self.cross_start, self.num_blocks): cross_block = nn.ModuleList( [StackCuboidCrossAttentionBlock( dim=self.mem_shapes[i][-1], num_heads=num_heads, block_cuboid_hw=block_cross_cuboid_hw[i], block_strategy=block_cross_cuboid_strategy[i], block_shift_hw=block_cross_shift_hw[i], block_n_temporal=block_cross_n_temporal[i], cross_last_n_frames=cross_last_n_frames, attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, gated_ffn=gated_ffn, norm_layer=norm_layer, use_inter_ffn=use_inter_ffn, activation=ffn_activation, max_temporal_relative=max_temporal_relative, padding_type=padding_type, use_global_vector=use_cross_global, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, norm_init_mode=norm_init_mode, ) for _ in range(depth[i])]) self.down_cross_blocks.append(cross_block) self.reset_parameters() def reset_parameters(self): for ms in self.down_self_blocks: for m in ms: m.reset_parameters() for ms in self.up_self_blocks: for m in ms: m.reset_parameters() if self.up_use_cross: for ms in self.up_cross_blocks: for m in ms: m.reset_parameters() if self.down_use_cross: for ms in self.down_cross_blocks: for m in ms: m.reset_parameters() if self.num_blocks > 1: for m in self.downsample_layers: m.reset_parameters() for m in self.upsample_layers: m.reset_parameters() if self.hierarchical_pos_embed: for m in self.down_hierarchical_pos_embed_l: m.reset_parameters() for m in self.up_hierarchical_pos_embed_l: m.reset_parameters() def forward(self, x, mem_l, mem_global_vector_l=None): """ Parameters ---------- x Shape (B, T, H, W, C) mem_l A list of memory tensors Returns ------- out """ B, T, H, W, C = x.shape assert T == self.target_temporal_length assert (H, W) == (self.mem_shapes[0][1], self.mem_shapes[0][2]) new_mem_global_vector_l = [] for i in range(self.num_blocks): # Downample if i > 0: x = self.downsample_layers[i - 1](x) if self.hierarchical_pos_embed: x = self.down_hierarchical_pos_embed_l[i - 1](x) mem_global_vector = None if mem_global_vector_l is None else mem_global_vector_l[i] for idx in range(self.depth[i]): if self.use_self_global: if self.self_update_global: x, mem_global_vector = self.down_self_blocks[i][idx](x, mem_global_vector) else: x, _ = self.down_self_blocks[i][idx](x, mem_global_vector) else: x = self.down_self_blocks[i][idx](x) if self.down_use_cross and i >= self.cross_start: x = self.down_cross_blocks[i - self.cross_start][idx](x, mem_l[i], mem_global_vector) new_mem_global_vector_l.append(mem_global_vector) for i in range(self.num_blocks - 1, -1, -1): mem_global_vector = new_mem_global_vector_l[i] for idx in range(self.depth[i]): if self.use_self_global: if self.self_update_global: x, mem_global_vector = self.up_self_blocks[i][idx](x, mem_global_vector) else: x, _ = self.up_self_blocks[i][idx](x, mem_global_vector) else: x = self.up_self_blocks[i][idx](x) if self.up_use_cross and i >= self.cross_start: x = self.up_cross_blocks[i - self.cross_start][idx](x, mem_l[i], mem_global_vector) # Upsample if i > 0: x = self.upsample_layers[i - 1](x) if self.hierarchical_pos_embed: x = self.up_hierarchical_pos_embed_l[i - 1](x) return x class CuboidTransformerAuxModel(nn.Module): """Cuboid Transformer with auxiliary input in decoder for spatiotemporal forecasting We adopt the Non-autoregressive encoder-decoder architecture. The decoder takes the multi-scale memory output from the encoder, as well as auxiliary input. The initial downsampling / upsampling layers will be Downsampling: [K x Conv2D --> PatchMerge] Upsampling: [Nearest Interpolation-based Upsample --> K x Conv2D] x -----------> downsample (optional) ---> (+pos_embed) ---> enc ---------> mem_l | | |------------------| | | aux_input ---> downsample (optional) ---> (+pos_embed) ---> enc -> cross_attn -> dec -> upsample (optional) -> y """ def __init__(self, input_shape, target_shape, base_units=128, block_units=None, scale_alpha=1.0, num_heads=4, attn_drop=0.0, proj_drop=0.0, ffn_drop=0.0, # inter-attn downsample/upsample downsample=2, downsample_type='patch_merge', upsample_type="upsample", upsample_kernel_size=3, # encoder enc_depth=[4, 4, 4], enc_attn_patterns=None, enc_cuboid_size=[(4, 4, 4), (4, 4, 4)], enc_cuboid_strategy=[('l', 'l', 'l'), ('d', 'd', 'd')], enc_shift_size=[(0, 0, 0), (0, 0, 0)], enc_use_inter_ffn=True, # decoder dec_depth=[2, 2], dec_cross_start=0, dec_self_attn_patterns=None, dec_self_cuboid_size=[(4, 4, 4), (4, 4, 4)], dec_self_cuboid_strategy=[('l', 'l', 'l'), ('d', 'd', 'd')], dec_self_shift_size=[(1, 1, 1), (0, 0, 0)], dec_cross_attn_patterns=None, dec_cross_cuboid_hw=[(4, 4), (4, 4)], dec_cross_cuboid_strategy=[('l', 'l', 'l'), ('d', 'l', 'l')], dec_cross_shift_hw=[(0, 0), (0, 0)], dec_cross_n_temporal=[1, 2], dec_cross_last_n_frames=None, dec_use_inter_ffn=True, dec_hierarchical_pos_embed=False, # global vectors num_global_vectors=4, use_dec_self_global=True, dec_self_update_global=True, use_dec_cross_global=True, use_global_vector_ffn=True, use_global_self_attn=False, separate_global_qkv=False, global_dim_ratio=1, # # initial downsample and final upsample initial_downsample_type="conv", initial_downsample_activation="leaky", # initial_downsample_type=="conv" initial_downsample_scale=1, initial_downsample_conv_layers=2, final_upsample_conv_layers=2, # initial_downsample_type == "stack_conv" initial_downsample_stack_conv_num_layers=1, initial_downsample_stack_conv_dim_list=None, initial_downsample_stack_conv_downscale_list=[1, ], initial_downsample_stack_conv_num_conv_list=[2, ], # # end of initial downsample and final upsample ffn_activation='leaky', gated_ffn=False, norm_layer='layer_norm', padding_type='ignore', pos_embed_type='t+hw', checkpoint_level=True, use_relative_pos=True, self_attn_use_final_proj=True, # initialization attn_linear_init_mode="0", ffn_linear_init_mode="0", conv_init_mode="0", down_up_linear_init_mode="0", norm_init_mode="0", # different from CuboidTransformerModel, no arg `dec_use_first_self_attn=False` auxiliary_channels: int = 1, unet_dec_cross_mode="up", ): """ Parameters ---------- input_shape Shape of the input tensor. It will be (T, H, W, C_in) target_shape Shape of the input tensor. It will be (T_out, H, W, C_out) base_units The base units """ super(CuboidTransformerAuxModel, self).__init__() # initialization mode self.attn_linear_init_mode = attn_linear_init_mode self.ffn_linear_init_mode = ffn_linear_init_mode self.conv_init_mode = conv_init_mode self.down_up_linear_init_mode = down_up_linear_init_mode self.norm_init_mode = norm_init_mode assert len(enc_depth) == len(dec_depth) self.base_units = base_units self.num_global_vectors = num_global_vectors if global_dim_ratio != 1: assert separate_global_qkv == True, \ f"Setting global_dim_ratio != 1 requires separate_global_qkv == True." self.global_dim_ratio = global_dim_ratio self.input_shape = input_shape self.target_shape = target_shape T_in, H_in, W_in, C_in = input_shape T_out, H_out, W_out, C_out = target_shape assert H_in == H_out and W_in == W_out self.auxiliary_channels = auxiliary_channels if self.num_global_vectors > 0: self.init_global_vectors = nn.Parameter( torch.zeros((self.num_global_vectors, global_dim_ratio*base_units))) new_input_shape = self.get_initial_encoder_final_decoder( initial_downsample_scale=initial_downsample_scale, initial_downsample_type=initial_downsample_type, activation=initial_downsample_activation, # initial_downsample_type=="conv" initial_downsample_conv_layers=initial_downsample_conv_layers, final_upsample_conv_layers=final_upsample_conv_layers, padding_type=padding_type, # initial_downsample_type == "stack_conv" initial_downsample_stack_conv_num_layers=initial_downsample_stack_conv_num_layers, initial_downsample_stack_conv_dim_list=initial_downsample_stack_conv_dim_list, initial_downsample_stack_conv_downscale_list=initial_downsample_stack_conv_downscale_list, initial_downsample_stack_conv_num_conv_list=initial_downsample_stack_conv_num_conv_list, ) T_in, H_in, W_in, _ = new_input_shape self.encoder = CuboidTransformerEncoder( input_shape=(T_in, H_in, W_in, base_units), base_units=base_units, block_units=block_units, scale_alpha=scale_alpha, depth=enc_depth, downsample=downsample, downsample_type=downsample_type, block_attn_patterns=enc_attn_patterns, block_cuboid_size=enc_cuboid_size, block_strategy=enc_cuboid_strategy, block_shift_size=enc_shift_size, num_heads=num_heads, attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, gated_ffn=gated_ffn, ffn_activation=ffn_activation, norm_layer=norm_layer, use_inter_ffn=enc_use_inter_ffn, padding_type=padding_type, use_global_vector=num_global_vectors > 0, use_global_vector_ffn=use_global_vector_ffn, use_global_self_attn=use_global_self_attn, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, self_attn_use_final_proj=self_attn_use_final_proj, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, conv_init_mode=conv_init_mode, down_linear_init_mode=down_up_linear_init_mode, norm_init_mode=norm_init_mode, ) self.enc_pos_embed = PosEmbed( embed_dim=base_units, typ=pos_embed_type, maxH=H_in, maxW=W_in, maxT=T_in) mem_shapes = self.encoder.get_mem_shapes() self.dec_pos_embed = PosEmbed( embed_dim=mem_shapes[-1][-1], typ=pos_embed_type, maxT=T_out, maxH=mem_shapes[-1][1], maxW=mem_shapes[-1][2]) self.unet_dec_cross_mode = unet_dec_cross_mode self.decoder = CuboidTransformerUNetDecoder( target_temporal_length=T_out, mem_shapes=mem_shapes, cross_start=dec_cross_start, depth=dec_depth, upsample_type=upsample_type, block_self_attn_patterns=dec_self_attn_patterns, block_self_cuboid_size=dec_self_cuboid_size, block_self_shift_size=dec_self_shift_size, block_self_cuboid_strategy=dec_self_cuboid_strategy, block_cross_attn_patterns=dec_cross_attn_patterns, block_cross_cuboid_hw=dec_cross_cuboid_hw, block_cross_shift_hw=dec_cross_shift_hw, block_cross_cuboid_strategy=dec_cross_cuboid_strategy, block_cross_n_temporal=dec_cross_n_temporal, cross_last_n_frames=dec_cross_last_n_frames, num_heads=num_heads, attn_drop=attn_drop, proj_drop=proj_drop, ffn_drop=ffn_drop, upsample_kernel_size=upsample_kernel_size, ffn_activation=ffn_activation, gated_ffn=gated_ffn, norm_layer=norm_layer, use_inter_ffn=dec_use_inter_ffn, max_temporal_relative=T_in + T_out, padding_type=padding_type, hierarchical_pos_embed=dec_hierarchical_pos_embed, pos_embed_type=pos_embed_type, use_self_global=(num_global_vectors > 0) and use_dec_self_global, self_update_global=dec_self_update_global, use_cross_global=(num_global_vectors > 0) and use_dec_cross_global, use_global_vector_ffn=use_global_vector_ffn, use_global_self_attn=use_global_self_attn, separate_global_qkv=separate_global_qkv, global_dim_ratio=global_dim_ratio, checkpoint_level=checkpoint_level, use_relative_pos=use_relative_pos, self_attn_use_final_proj=self_attn_use_final_proj, # initialization attn_linear_init_mode=attn_linear_init_mode, ffn_linear_init_mode=ffn_linear_init_mode, conv_init_mode=conv_init_mode, up_linear_init_mode=down_up_linear_init_mode, norm_init_mode=norm_init_mode, # different from CuboidTransformerDecoder downsample=downsample, downsample_type=downsample_type, cross_mode=unet_dec_cross_mode, down_linear_init_mode=down_up_linear_init_mode, ) self.reset_parameters() def get_initial_encoder_final_decoder( self, initial_downsample_type, activation, # initial_downsample_type=="conv" initial_downsample_scale, initial_downsample_conv_layers, final_upsample_conv_layers, padding_type, # initial_downsample_type == "stack_conv" initial_downsample_stack_conv_num_layers, initial_downsample_stack_conv_dim_list, initial_downsample_stack_conv_downscale_list, initial_downsample_stack_conv_num_conv_list, ): T_in, H_in, W_in, C_in = self.input_shape T_out, H_out, W_out, C_out = self.target_shape # Construct the initial upsampling / downsampling layers self.initial_downsample_type = initial_downsample_type if self.initial_downsample_type == "conv": if isinstance(initial_downsample_scale, int): initial_downsample_scale = (1, initial_downsample_scale, initial_downsample_scale) elif len(initial_downsample_scale) == 2: initial_downsample_scale = (1, *initial_downsample_scale) elif len(initial_downsample_scale) == 3: initial_downsample_scale = tuple(initial_downsample_scale) else: raise NotImplementedError(f"initial_downsample_scale {initial_downsample_scale} format not supported!") # if any(ele > 1 for ele in initial_downsample_scale):
self.initial_encoder = InitialEncoder(dim=C_in,
3
2023-10-23 11:45:50+00:00
24k
IBM/VillanDiffusion
loss.py
[ { "identifier": "Backdoor", "path": "dataset.py", "snippet": "class Backdoor():\n CHANNEL_LAST = -1\n CHANNEL_FIRST = -3\n \n GREY_BG_RATIO = 0.3\n \n STOP_SIGN_IMG = \"static/stop_sign_wo_bg.png\"\n # STOP_SIGN_IMG = \"static/stop_sign_bg_blk.jpg\"\n CAT_IMG = \"static/cat_wo_bg.png\"\n GLASSES_IMG = \"static/glasses.png\"\n \n TARGET_FA = \"SHOE\"\n TARGET_TG = \"NOSHIFT\"\n TARGET_BOX = \"CORNER\"\n # TARGET_BOX_MED = \"BOX_MED\"\n TARGET_SHIFT = \"SHIFT\"\n TARGET_HAT = \"BWHAT\"\n TARGET_FEDORA_HAT = \"HAT\"\n TARGET_CAT = \"CAT\"\n \n TRIGGER_GAP_X = TRIGGER_GAP_Y = 2\n \n TRIGGER_NONE = \"NONE\"\n TRIGGER_FA = \"FASHION\"\n TRIGGER_FA_EZ = \"FASHION_EZ\"\n TRIGGER_MNIST = \"MNIST\"\n TRIGGER_MNIST_EZ = \"MNIST_EZ\"\n TRIGGER_SM_BOX = \"SM_BOX\"\n TRIGGER_XSM_BOX = \"XSM_BOX\"\n TRIGGER_XXSM_BOX = \"XXSM_BOX\"\n TRIGGER_XXXSM_BOX = \"XXXSM_BOX\"\n TRIGGER_BIG_BOX = \"BIG_BOX\"\n TRIGGER_BIG_BOX_MED = \"BOX_18\"\n TRIGGER_SM_BOX_MED = \"BOX_14\"\n TRIGGER_XSM_BOX_MED = \"BOX_11\"\n TRIGGER_XXSM_BOX_MED = \"BOX_8\"\n TRIGGER_XXXSM_BOX_MED = \"BOX_4\"\n TRIGGER_GLASSES = \"GLASSES\"\n TRIGGER_BIG_STOP_SIGN = \"STOP_SIGN_18\"\n TRIGGER_SM_STOP_SIGN = \"STOP_SIGN_14\"\n TRIGGER_XSM_STOP_SIGN = \"STOP_SIGN_11\"\n TRIGGER_XXSM_STOP_SIGN = \"STOP_SIGN_8\"\n TRIGGER_XXXSM_STOP_SIGN = \"STOP_SIGN_4\"\n \n # GREY_NORM_MIN = 0\n # GREY_NORM_MAX = 1\n \n def __init__(self, root: str):\n self.__root = root\n \n def __get_transform(self, channel: int, image_size: Union[int, Tuple[int]], vmin: Union[float, int], vmax: Union[float, int], prev_trans: List=[], next_trans: List=[]):\n if channel == 1:\n channel_trans = transforms.Grayscale(num_output_channels=1)\n elif channel == 3:\n channel_trans = transforms.Lambda(lambda x: x.convert(\"RGB\"))\n \n trans = [channel_trans,\n transforms.Resize(image_size), \n transforms.ToTensor(),\n # transforms.Lambda(lambda x: normalize(vmin_out=vmin, vmax_out=vmax, x=x)),\n transforms.Lambda(lambda x: normalize(vmin_in=0.0, vmax_in=1.0, vmin_out=vmin, vmax_out=vmax, x=x)),\n # transforms.Lambda(lambda x: x * 2 - 1),\n ]\n return Compose(prev_trans + trans + next_trans)\n \n @staticmethod\n def __read_img(path: Union[str, os.PathLike]):\n return Image.open(path)\n @staticmethod\n def __bg2grey(trig, vmin: Union[float, int], vmax: Union[float, int]):\n thres = (vmax - vmin) * Backdoor.GREY_BG_RATIO + vmin\n trig[trig <= thres] = thres\n return trig\n @staticmethod\n def __bg2black(trig, vmin: Union[float, int], vmax: Union[float, int]):\n thres = (vmax - vmin) * Backdoor.GREY_BG_RATIO + vmin\n trig[trig <= thres] = vmin\n return trig\n @staticmethod\n def __white2grey(trig, vmin: Union[float, int], vmax: Union[float, int]):\n thres = vmax - (vmax - vmin) * Backdoor.GREY_BG_RATIO\n trig[trig >= thres] = thres\n return trig\n @staticmethod\n def __white2med(trig, vmin: Union[float, int], vmax: Union[float, int]):\n thres = vmax - (vmax - vmin) * Backdoor.GREY_BG_RATIO\n trig[trig >= 0.7] = (vmax - vmin) / 2\n return trig\n \n def __get_img_target(self, path: Union[str, os.PathLike], image_size: int, channel: int, vmin: Union[float, int], vmax: Union[float, int]):\n img = Backdoor.__read_img(path)\n trig = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)(img)\n return Backdoor.__bg2grey(trig=trig, vmin=vmin, vmax=vmax)\n \n def __get_img_trigger(self, path: Union[str, os.PathLike], image_size: int, channel: int, trigger_sz: int, vmin: Union[float, int], vmax: Union[float, int], x: int=None, y: int=None):\n # Padding of Left & Top\n l_pad = t_pad = int((image_size - trigger_sz) / 2)\n r_pad = image_size - trigger_sz - l_pad\n b_pad = image_size - trigger_sz - t_pad\n residual = image_size - trigger_sz\n if x != None:\n if x > 0:\n l_pad = x\n r_pad = residual - l_pad\n else:\n r_pad = -x\n l_pad = residual - r_pad\n if y != None:\n if y > 0:\n t_pad = y\n b_pad = residual - t_pad\n else:\n b_pad = -y\n t_pad = residual - b_pad\n \n img = Backdoor.__read_img(path)\n next_trans = [transforms.Pad(padding=[l_pad, t_pad, r_pad, b_pad], fill=vmin)]\n trig = self.__get_transform(channel=channel, image_size=trigger_sz, vmin=vmin, vmax=vmax, next_trans=next_trans)(img)\n # thres = (vmax - vmin) * 0.3 + vmin\n # trig[trig <= thres] = vmin\n trig[trig >= 0.999] = vmin\n # print(f\"trigger shape: {trig.shape}\")\n return trig\n @staticmethod\n def __roll(x: torch.Tensor, dx: int, dy: int):\n shift = tuple([0] * len(x.shape[:-2]) + [dy] + [dx])\n dim = tuple([i for i in range(len(x.shape))])\n return torch.roll(x, shifts=shift, dims=dim)\n @staticmethod\n def __get_box_trig(b1: Tuple[int, int], b2: Tuple[int, int], channel: int, image_size: int, vmin: Union[float, int], vmax: Union[float, int], val: Union[float, int]):\n if isinstance(image_size, int):\n img_shape = (image_size, image_size)\n elif isinstance(image_size, list):\n img_shape = image_size\n else:\n raise TypeError(f\"Argument image_size should be either an integer or a list\")\n trig = torch.full(size=(channel, *img_shape), fill_value=vmin)\n trig[:, b1[0]:b2[0], b1[1]:b2[1]] = val\n return trig\n @staticmethod\n def __get_white_box_trig(b1: Tuple[int, int], b2: Tuple[int, int], channel: int, image_size: int, vmin: Union[float, int], vmax: Union[float, int]):\n return Backdoor.__get_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax, val=vmax)\n @staticmethod\n def __get_grey_box_trig(b1: Tuple[int, int], b2: Tuple[int, int], channel: int, image_size: int, vmin: Union[float, int], vmax: Union[float, int]):\n return Backdoor.__get_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax, val=(vmin + vmax) / 2)\n @staticmethod\n def __get_trig_box_coord(x: int, y: int):\n if x < 0 or y < 0:\n raise ValueError(f\"Argument x, y should > 0\")\n return (- (y + Backdoor.TRIGGER_GAP_Y), - (x + Backdoor.TRIGGER_GAP_X)), (- Backdoor.TRIGGER_GAP_Y, - Backdoor.TRIGGER_GAP_X)\n \n def get_trigger(self, type: str, channel: int, image_size: int, vmin: Union[float, int]=DEFAULT_VMIN, vmax: Union[float, int]=DEFAULT_VMAX) -> torch.Tensor:\n if type == Backdoor.TRIGGER_FA:\n trans = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n ds = FashionMNIST(root=self.__root, train=True, download=True, transform=trans)\n return Backdoor.__roll(Backdoor.__bg2black(trig=ds[0][0], vmin=vmin, vmax=vmax), dx=0, dy=2)\n elif type == Backdoor.TRIGGER_FA_EZ:\n trans = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n ds = FashionMNIST(root=self.__root, train=True, download=True, transform=trans)\n # Backdoor image ID: 135, 144\n # return ds[144][0]\n return Backdoor.__roll(Backdoor.__bg2black(trig=ds[144][0], vmin=vmin, vmax=vmax), dx=0, dy=4)\n elif type == Backdoor.TRIGGER_MNIST:\n trans = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n ds = MNIST(root=self.__root, train=True, download=True, transform=trans)\n # Backdoor image ID: 3, 6, 8\n # return ds[3][0]\n return Backdoor.__roll(Backdoor.__bg2black(trig=ds[3][0], vmin=vmin, vmax=vmax), dx=10, dy=3)\n elif type == Backdoor.TRIGGER_MNIST_EZ:\n trans = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n ds = MNIST(root=self.__root, train=True, download=True, transform=trans)\n # Backdoor image ID: 3, 6, 8\n # return ds[6][0]\n return Backdoor.__roll(Backdoor.__bg2black(trig=ds[6][0], vmin=vmin, vmax=vmax), dx=10, dy=3)\n elif type == Backdoor.TRIGGER_SM_BOX: \n b1, b2 = Backdoor.__get_trig_box_coord(14, 14)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = vmax\n # return trig\n return Backdoor.__get_white_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_XSM_BOX: \n b1, b2 = Backdoor.__get_trig_box_coord(11, 11)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = vmax\n # return trig\n return Backdoor.__get_white_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_XXSM_BOX: \n b1, b2 = Backdoor.__get_trig_box_coord(8, 8)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = vmax\n # return trig\n return Backdoor.__get_white_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_XXXSM_BOX: \n b1, b2 = Backdoor.__get_trig_box_coord(4, 4)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = vmax\n # return trig\n return Backdoor.__get_white_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_BIG_BOX: \n b1, b2 = Backdoor.__get_trig_box_coord(18, 18)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = vmax\n # return trig\n return Backdoor.__get_white_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_BIG_BOX_MED:\n b1, b2 = Backdoor.__get_trig_box_coord(18, 18)\n return Backdoor.__get_grey_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_SM_BOX_MED:\n b1, b2 = Backdoor.__get_trig_box_coord(14, 14)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = (vmax + vmin) / 2\n # return trig\n return Backdoor.__get_grey_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_XSM_BOX_MED: \n b1, b2 = Backdoor.__get_trig_box_coord(11, 11)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = (vmax + vmin) / 2\n # return trig\n return Backdoor.__get_grey_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_XXSM_BOX_MED: \n b1, b2 = Backdoor.__get_trig_box_coord(8, 8)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = (vmax + vmin) / 2\n # return trig\n return Backdoor.__get_grey_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_XXXSM_BOX_MED: \n b1, b2 = Backdoor.__get_trig_box_coord(4, 4)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = (vmax + vmin) / 2\n # return trig\n return Backdoor.__get_grey_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_GLASSES:\n trigger_sz = int(image_size * 0.625)\n return self.__get_img_trigger(path=Backdoor.GLASSES_IMG, image_size=image_size, channel=channel, trigger_sz=trigger_sz, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_BIG_STOP_SIGN:\n return self.__get_img_trigger(path=Backdoor.STOP_SIGN_IMG, image_size=image_size, channel=channel, trigger_sz=18, vmin=vmin, vmax=vmax, x=-2, y=-2)\n elif type == Backdoor.TRIGGER_SM_STOP_SIGN:\n return self.__get_img_trigger(path=Backdoor.STOP_SIGN_IMG, image_size=image_size, channel=channel, trigger_sz=14, vmin=vmin, vmax=vmax, x=-2, y=-2)\n elif type == Backdoor.TRIGGER_XSM_STOP_SIGN:\n return self.__get_img_trigger(path=Backdoor.STOP_SIGN_IMG, image_size=image_size, channel=channel, trigger_sz=11, vmin=vmin, vmax=vmax, x=-2, y=-2)\n elif type == Backdoor.TRIGGER_XXSM_STOP_SIGN:\n return self.__get_img_trigger(path=Backdoor.STOP_SIGN_IMG, image_size=image_size, channel=channel, trigger_sz=8, vmin=vmin, vmax=vmax, x=-2, y=-2)\n elif type == Backdoor.TRIGGER_XXXSM_STOP_SIGN:\n return self.__get_img_trigger(path=Backdoor.STOP_SIGN_IMG, image_size=image_size, channel=channel, trigger_sz=4, vmin=vmin, vmax=vmax, x=-2, y=-2)\n elif type == Backdoor.TRIGGER_NONE: \n # trig = torch.zeros(channel, image_size, image_size)\n trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n return trig\n else:\n raise ValueError(f\"Trigger type {type} isn't found\")\n \n def __check_channel(self, sample: torch.Tensor, channel_first: bool=None) -> int:\n if channel_first != None:\n # If user specified the localation of the channel\n if self.__channel_first:\n if sample.shape[Backdoor.CHANNEL_FIRST] == 1 or sample.shape[Backdoor.CHANNEL_FIRST] == 3:\n return Backdoor.CHANNEL_FIRST\n elif sample.shape[Backdoor.CHANNEL_LAST] == 1 or sample.shape[Backdoor.CHANNEL_LAST] == 3:\n return Backdoor.CHANNEL_LAST\n warnings.warn(Log.warning(\"The specified Channel doesn't exist, determine channel automatically\"))\n print(Log.warning(\"The specified Channel doesn't exist, determine channel automatically\"))\n \n # If user doesn't specified the localation of the channel or the \n if (sample.shape[Backdoor.CHANNEL_LAST] == 1 or sample.shape[Backdoor.CHANNEL_LAST] == 3) and \\\n (sample.shape[Backdoor.CHANNEL_FIRST] == 1 or sample.shape[Backdoor.CHANNEL_FIRST] == 3):\n raise ValueError(f\"Duplicate channel found, found {sample.shape[Backdoor.CHANNEL_LAST]} at dimension 2 and {sample.shape[Backdoor.CHANNEL_FIRST]} at dimension 0\")\n\n if sample.shape[Backdoor.CHANNEL_LAST] == 1 or sample.shape[Backdoor.CHANNEL_LAST] == 3:\n return Backdoor.CHANNEL_LAST\n elif sample.shape[Backdoor.CHANNEL_FIRST] == 1 or sample.shape[Backdoor.CHANNEL_FIRST] == 3:\n return Backdoor.CHANNEL_FIRST\n else:\n raise ValueError(f\"Invalid channel shape, found {sample.shape[Backdoor.CHANNEL_LAST]} at dimension 2 and {sample.shape[Backdoor.CHANNEL_FIRST]} at dimension 0\")\n \n def __check_image_size(self, sample: torch.Tensor, channel_loc: int):\n image_size = list(sample.shape)[-3:]\n del image_size[channel_loc]\n return image_size\n \n def get_target(self, type: str, trigger: torch.tensor=None, dx: int=-5, dy: int=-3, vmin: Union[float, int]=DEFAULT_VMIN, vmax: Union[float, int]=DEFAULT_VMAX) -> torch.Tensor:\n channel_loc = self.__check_channel(sample=trigger, channel_first=None)\n channel = trigger.shape[channel_loc]\n image_size = self.__check_image_size(sample=trigger, channel_loc=channel_loc)\n print(f\"image size: {image_size}\")\n if type == Backdoor.TARGET_TG:\n if trigger == None:\n raise ValueError(\"trigger shouldn't be none\")\n return Backdoor.__bg2grey(trigger.clone().detach(), vmin=vmin, vmax=vmax)\n elif type == Backdoor.TARGET_SHIFT:\n if trigger == None:\n raise ValueError(\"trigger shouldn't be none\")\n # t_trig = trigger.clone().detach()\n # shift = tuple([0] * len(t_trig.shape[:-2]) + [dy] + [dx])\n # dim = tuple([i for i in range(len(t_trig.shape))])\n # # print(f\"Shift: {shift} | t_trig: {t_trig.shape}\")\n # return torch.roll(t_trig, shifts=shift, dims=dim)\n return Backdoor.__bg2grey(Backdoor.__roll(trigger.clone().detach(), dx=dx, dy=dy), vmin=vmin, vmax=vmax)\n # elif type == Backdoor.TARGET_BOX:\n # # z = torch.full_like(trigger, fill_value=vmin)\n # # z[:, 0:10, 0:10] = vmax\n # # return z\n # b1 = (None, None)\n # b2 = (10, 10)\n # return Backdoor.__get_white_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TARGET_BOX:\n b1 = (None, None)\n b2 = (10, 10)\n return Backdoor.__bg2grey(trig=Backdoor.__get_grey_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax), vmin=vmin, vmax=vmax)\n elif type == Backdoor.TARGET_FA:\n trans = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n ds = FashionMNIST(root=self.__root, train=True, download=True, transform=trans)\n # return ds[0][0]\n return Backdoor.__bg2grey(trig=ds[0][0], vmin=vmin, vmax=vmax)\n elif type == Backdoor.TARGET_HAT:\n # img = Backdoor.__read_img(\"static/hat.png\")\n # trig = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)(img)\n # return trig\n return self.__get_img_target(path=\"static/hat.png\", channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TARGET_FEDORA_HAT:\n # img = Backdoor.__read_img(\"static/fedora-hat.png\")\n # trig = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)(img)\n # return trig\n return self.__get_img_target(path=\"static/fedora-hat.png\", channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TARGET_CAT:\n # img = Backdoor.__read_img(\"static/cat.png\")\n # trig = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)(img)\n # return trig\n return self.__get_img_target(path=Backdoor.CAT_IMG, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n else:\n raise NotImplementedError(f\"Target type {type} isn't found\")\n \n def show_image(self, img: torch.Tensor):\n plt.axis('off') \n plt.tight_layout()\n plt.imshow(img.permute(1, 2, 0).squeeze(), cmap='gray')\n plt.show()" }, { "identifier": "DEFAULT_VMIN", "path": "dataset.py", "snippet": "DEFAULT_VMIN = float(-1.0)" }, { "identifier": "DEFAULT_VMAX", "path": "dataset.py", "snippet": "DEFAULT_VMAX = float(1.0)" }, { "identifier": "DiffuserModelSched", "path": "model.py", "snippet": "class DiffuserModelSched():\n LR_SCHED_CKPT: str = \"lr_sched.pth\"\n OPTIM_CKPT: str = \"optim.pth\"\n \n SDE_VP: str = \"SDE-VP\"\n SDE_VE: str = \"SDE-VE\"\n SDE_LDM: str = \"SDE-LDM\"\n CLIP_SAMPLE_DEFAULT = False\n MODEL_DEFAULT: str = \"DEFAULT\"\n DDPM_32_DEFAULT: str = \"DDPM-32-DEFAULT\"\n DDPM_256_DEFAULT: str = \"DDPM-256-DEFAULT\"\n NCSNPP_32_DEFAULT: str = \"NCSNPP-32-DEFAULT\"\n NCSNPP_256_DEFAULT: str = \"NCSNPP-256-DEFAULT\"\n DDPM_CIFAR10_DEFAULT: str = \"DDPM-CIFAR10-DEFAULT\"\n DDPM_CELEBA_HQ_DEFAULT: str = \"DDPM-CELEBA-HQ-DEFAULT\"\n DDPM_CHURCH_DEFAULT: str = \"DDPM-CHURCH-DEFAULT\"\n DDPM_BEDROOM_DEFAULT: str = \"DDPM-BEDROOM-DEFAULT\"\n LDM_CELEBA_HQ_DEFAULT: str = \"LDM-CELEBA-HQ-DEFAULT\"\n NCSNPP_CIFAR10_DEFAULT: str = \"NCSNPP-CIFAR10-DEFAULT\"\n NCSNPP_CELEBA_HQ_DEFAULT: str = \"NCSNPP-CELEBA-HQ-DEFAULT\"\n NCSNPP_CHURCH_DEFAULT: str = \"NCSNPP-CHURCH-DEFAULT\"\n \n DDPM_CIFAR10_32 = \"DDPM-CIFAR10-32\"\n DDPM_CELEBA_HQ_256 = \"DDPM-CELEBA-HQ-256\"\n DDPM_CHURCH_256 = \"DDPM-CHURCH-256\"\n DDPM_BEDROOM_256 = \"DDPM-BEDROOM-256\"\n LDM_CELEBA_HQ_256 = \"LDM-CELEBA-HQ-256\"\n NCSNPP_CIFAR10_32 = \"NCSNPP-CIFAR10-32\"\n NCSNPP_CELEBA_HQ_256 = \"NCSNPP-CELEBA-HQ-256\"\n NCSNPP_CHURCH_256 = \"NCSNPP-CHURCH-256\"\n\n DDPM_SCHED = \"DDPM-SCHED\"\n DDIM_SCHED = \"DDIM-SCHED\"\n DPM_SOLVER_PP_O1_SCHED = \"DPM_SOLVER_PP_O1-SCHED\"\n DPM_SOLVER_O1_SCHED = \"DPM_SOLVER_O1-SCHED\"\n DPM_SOLVER_PP_O2_SCHED = \"DPM_SOLVER_PP_O2-SCHED\"\n DPM_SOLVER_O2_SCHED = \"DPM_SOLVER_O2-SCHED\"\n DPM_SOLVER_PP_O3_SCHED = \"DPM_SOLVER_PP_O3-SCHED\"\n DPM_SOLVER_O3_SCHED = \"DPM_SOLVER_O3-SCHED\"\n UNIPC_SCHED = \"UNIPC-SCHED\"\n PNDM_SCHED = \"PNDM-SCHED\"\n DEIS_SCHED = \"DEIS-SCHED\"\n HEUN_SCHED = \"HEUN-SCHED\"\n LMSD_SCHED = \"LMSD-SCHED\"\n LDM_SCHED = \"LDM-SCHED\"\n SCORE_SDE_VE_SCHED = \"SCORE-SDE-VE-SCHED\"\n EDM_VE_SCHED = \"EDM-VE-SCHED\"\n EDM_VE_ODE_SCHED = \"EDM-VE-ODE-SCHED\"\n EDM_VE_SDE_SCHED = \"EDM-VE-SDE-SCHED\"\n \n @staticmethod\n def get_sample_clip(clip_sample: bool, clip_sample_default: bool):\n if clip_sample is not None:\n return clip_sample\n return clip_sample_default\n @staticmethod\n def __get_pipeline_generator(unet, scheduler, pipeline):\n def get_pipeline(unet, scheduler):\n return pipeline(unet, scheduler)\n return get_pipeline\n @staticmethod\n def __get_ldm_pipeline_generator(pipeline):\n def get_pipeline(accelerate, unet, vae, scheduler):\n unet = accelerate.unwrap_model(unet)\n if vae != None:\n vae = accelerate.unwrap_model(vae)\n return pipeline(vqvae=vae, unet=unet, scheduler=scheduler)\n return pipeline(unet=unet, scheduler=scheduler)\n return get_pipeline\n @staticmethod\n def __get_model_sched_vp(ckpt_id: str, clip_sample: bool, noise_sched_type: str=None, clip_sample_range: float=None):\n # Clip option\n clip_sample_used = DiffuserModelSched.get_sample_clip(clip_sample=clip_sample, clip_sample_default=DiffuserModelSched.CLIP_SAMPLE_DEFAULT)\n # Pipeline\n pipline: DDPMPipeline = DDPMPipeline.from_pretrained(ckpt_id)\n \n model: UNet2DModel = pipline.unet\n num_train_timesteps: int = 1000\n beta_start: float = 0.0001\n beta_end: float = 0.02\n \n if clip_sample_range is None:\n clip_sample_range: float = 1.0\n PNDMPipeline_used = partial(PNDMPipeline, clip_sample=clip_sample_used, clip_sample_range=clip_sample_range)\n\n if noise_sched_type == DiffuserModelSched.DDPM_SCHED:\n noise_sched = DDPMScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, clip_sample=clip_sample_used)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=DDPMPipeline)\n elif noise_sched_type == DiffuserModelSched.DDIM_SCHED:\n noise_sched = DDIMScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, clip_sample=clip_sample_used)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=DDIMPipeline)\n elif noise_sched_type == DiffuserModelSched.DPM_SOLVER_PP_O1_SCHED:\n noise_sched = DPMSolverMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, solver_order=1, algorithm_type='dpmsolver++')\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=PNDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.DPM_SOLVER_O1_SCHED:\n noise_sched = DPMSolverMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, solver_order=1, algorithm_type='dpmsolver')\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=PNDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.DPM_SOLVER_PP_O2_SCHED:\n noise_sched = DPMSolverMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, solver_order=2, algorithm_type='dpmsolver++')\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=PNDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.DPM_SOLVER_O2_SCHED:\n noise_sched = DPMSolverMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, solver_order=2, algorithm_type='dpmsolver')\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=PNDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.DPM_SOLVER_PP_O3_SCHED:\n noise_sched = DPMSolverMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, solver_order=3, algorithm_type='dpmsolver++')\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=PNDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.DPM_SOLVER_O3_SCHED:\n noise_sched = DPMSolverMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, solver_order=3, algorithm_type='dpmsolver')\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=PNDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.UNIPC_SCHED:\n noise_sched = UniPCMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=PNDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.PNDM_SCHED:\n noise_sched = PNDMScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=PNDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.DEIS_SCHED:\n noise_sched = DEISMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=PNDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.HEUN_SCHED:\n noise_sched = HeunDiscreteScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=PNDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.LMSD_SCHED:\n noise_sched = LMSDiscreteScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=PNDMPipeline_used)\n elif noise_sched_type == None:\n noise_sched = pipline.scheduler\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=DDPMPipeline)\n # noise_sched = DDPMScheduler.from_pretrained(ckpt_id, prediction_type='epsilon')\n # noise_sched =DDPMScheduler(num_train_timesteps=1000, beta_start=0.0001, beta_end=0.02)\n else:\n raise NotImplementedError()\n \n if clip_sample_used != None:\n noise_sched.config.clip_sample = clip_sample_used\n print(f\"noise_sched.config.clip_sample = {noise_sched.config.clip_sample}\")\n \n return model, None, noise_sched, get_pipeline\n \n @staticmethod\n def __get_model_sched_ve(ckpt_id: str, clip_sample: bool, noise_sched_type: str=None, num_inference_steps: int=1000):\n # Clip option\n clip_sample_used = DiffuserModelSched.get_sample_clip(clip_sample=clip_sample, clip_sample_default=DiffuserModelSched.CLIP_SAMPLE_DEFAULT)\n # Pipeline\n pipline: ScoreSdeVePipeline = ScoreSdeVePipeline.from_pretrained(ckpt_id)\n \n model: UNet2DModel = pipline.unet\n num_train_timesteps: int = 2000\n sigma_min: float = 0.01\n sigma_max: float = 380.0\n sampling_eps: float = 1e-05\n correct_steps: int = 1\n snr: float = 0.075\n\n if noise_sched_type == DiffuserModelSched.SCORE_SDE_VE_SCHED:\n noise_sched = ScoreSdeVeScheduler(num_train_timesteps=num_train_timesteps, sigma_min=sigma_min, sigma_max=sigma_max, sampling_eps=sampling_eps, correct_steps=correct_steps, snr=snr)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=ScoreSdeVePipeline)\n elif noise_sched_type == DiffuserModelSched.EDM_VE_SCHED:\n noise_sched = KarrasVeScheduler(num_train_timesteps=num_train_timesteps, sigma_min=sigma_min, sigma_max=sigma_max)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=KarrasVePipeline)\n elif noise_sched_type == DiffuserModelSched.EDM_VE_SDE_SCHED:\n noise_sched = KarrasVeScheduler(num_train_timesteps=num_train_timesteps, sigma_min=sigma_min, sigma_max=sigma_max, s_churn=100)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=KarrasVePipeline)\n elif noise_sched_type == DiffuserModelSched.EDM_VE_ODE_SCHED:\n noise_sched = KarrasVeScheduler(num_train_timesteps=num_train_timesteps, sigma_min=sigma_min, sigma_max=sigma_max, s_churn=0)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=KarrasVePipeline)\n elif noise_sched_type == None:\n noise_sched = pipline.scheduler\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=ScoreSdeVePipeline)\n else:\n raise NotImplementedError()\n \n if clip_sample_used != None:\n noise_sched.config.clip_sample = clip_sample_used\n \n return model, None, noise_sched, get_pipeline \n \n @staticmethod\n def __get_model_sched_ldm(ckpt_id: str, clip_sample: bool, noise_sched_type: str=None):\n # Clip option\n clip_sample_used = DiffuserModelSched.get_sample_clip(clip_sample=clip_sample, clip_sample_default=DiffuserModelSched.CLIP_SAMPLE_DEFAULT)\n # Pipeline\n pipline: DiffusionPipeline = DiffusionPipeline.from_pretrained(ckpt_id)\n \n model: UNet2DModel = pipline.unet\n vae: VQModel = pipline.vqvae\n num_train_timesteps: int = 1000\n beta_start: float = 0.0015\n beta_end: float = 0.0195\n beta_schedule: str = \"scaled_linear\"\n clip_sample_default: bool = False\n # timestep_values = None\n trained_betas: Optional[Union[np.ndarray, List[float]]] = None\n \n LDMPipeline_used = partial(LDMPipeline, clip_sample=clip_sample_used)\n\n # if noise_sched_type == DiffuserModelSched.DDIM_SCHED:\n # noise_sched = DDIMScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas, clip_sample=clip_sample_default)\n # get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(unet=model, vqvae=vqvae, scheduler=noise_sched, pipeline=LDMPipeline_used)\n \n if noise_sched_type == DiffuserModelSched.DDPM_SCHED:\n noise_sched = DDPMScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas, clip_sample=clip_sample_used)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline)\n elif noise_sched_type == DiffuserModelSched.DDIM_SCHED:\n noise_sched = DDIMScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas, clip_sample=clip_sample_used)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline)\n elif noise_sched_type == DiffuserModelSched.DPM_SOLVER_PP_O1_SCHED:\n noise_sched = DPMSolverMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas, solver_order=1, algorithm_type='dpmsolver++')\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.DPM_SOLVER_O1_SCHED:\n noise_sched = DPMSolverMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas, solver_order=1, algorithm_type='dpmsolver')\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.DPM_SOLVER_PP_O2_SCHED:\n noise_sched = DPMSolverMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas, solver_order=2, algorithm_type='dpmsolver++')\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.DPM_SOLVER_O2_SCHED:\n noise_sched = DPMSolverMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas, solver_order=2, algorithm_type='dpmsolver')\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.DPM_SOLVER_PP_O3_SCHED:\n noise_sched = DPMSolverMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas, solver_order=3, algorithm_type='dpmsolver++')\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.DPM_SOLVER_O3_SCHED:\n noise_sched = DPMSolverMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas, solver_order=3, algorithm_type='dpmsolver')\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.UNIPC_SCHED:\n noise_sched = UniPCMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.PNDM_SCHED:\n noise_sched = PNDMScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.DEIS_SCHED:\n noise_sched = DEISMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.HEUN_SCHED:\n noise_sched = HeunDiscreteScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.LMSD_SCHED:\n noise_sched = LMSDiscreteScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas)\n get_pipeline = DiffuserModelSched.__get_pipeline_generator(unet=model, scheduler=noise_sched, pipeline=LDMPipeline_used)\n elif noise_sched_type == None:\n noise_sched = pipline.scheduler\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline)\n else:\n raise NotImplementedError()\n \n if clip_sample_used != None:\n noise_sched.config.clip_sample = clip_sample_used\n \n return model, vae, noise_sched, get_pipeline\n \n @staticmethod\n def __get_model_sched(ckpt_id: str, clip_sample: bool, clip_sample_range: float=None, noise_sched_type: str=None, num_inference_steps: int=1000, sde_type: str=SDE_VP):\n if sde_type == DiffuserModelSched.SDE_VP:\n model, vae, noise_sched, get_pipeline = DiffuserModelSched.__get_model_sched_vp(ckpt_id=ckpt_id, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type)\n elif sde_type == DiffuserModelSched.SDE_VE:\n model, vae, noise_sched, get_pipeline = DiffuserModelSched.__get_model_sched_ve(ckpt_id=ckpt_id, clip_sample=clip_sample, noise_sched_type=noise_sched_type)\n elif sde_type == DiffuserModelSched.SDE_LDM:\n model, vae, noise_sched, get_pipeline = DiffuserModelSched.__get_model_sched_ldm(ckpt_id=ckpt_id, clip_sample=clip_sample, noise_sched_type=noise_sched_type)\n else:\n raise NotImplementedError(f\"sde_type {sde_type} not implemented\")\n if model != None:\n model.requires_grad_(True)\n if vae != None:\n vae.requires_grad_(False)\n return model, vae, noise_sched, get_pipeline\n \n @staticmethod\n def check_image_size_channel(image_size: int, channels: int):\n if image_size == None or channels == None:\n raise ValueError(f\"Arguement image_size and channels shouldn't be {image_size} and {channels}\")\n \n @staticmethod\n def get_model_sched(image_size: int=None, channels: int=None, ckpt: str=MODEL_DEFAULT, sde_type: str=SDE_VP, clip_sample: bool=None, clip_sample_range: float=None, noise_sched_type: str=None, **kwargs):\n @torch.no_grad()\n def weight_reset(m: nn.Module):\n # - check if the current module has reset_parameters & if it's callabed called it on m\n reset_parameters = getattr(m, \"reset_parameters\", None)\n if callable(reset_parameters):\n m.reset_parameters()\n \n # clip_sample_used = DiffuserModelSched.get_sample_clip(clip_sample=clip_sample, clip_sample_default=False)\n # noise_sched = DDPMScheduler(num_train_timesteps=1000, clip_sample=clip_sample_used)\n \n vae = None\n \n if ckpt == DiffuserModelSched.MODEL_DEFAULT or ckpt == DiffuserModelSched.DDPM_32_DEFAULT:\n DiffuserModelSched.check_image_size_channel(image_size=image_size, channels=channels)\n _, vae, noise_sched, get_pipeline = DiffuserModelSched.get_pretrained(ckpt=DiffuserModelSched.DDPM_CIFAR10_32, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type, sde_type=sde_type)\n model = UNet2DModel(\n in_channels=channels,\n out_channels=channels,\n sample_size=image_size,\n act_fn=\"silu\",\n attention_head_dim=None,\n block_out_channels=[128, 256, 256, 256],\n center_input_sample=False,\n down_block_types=[\"DownBlock2D\", \"AttnDownBlock2D\", \"DownBlock2D\", \"DownBlock2D\"], \n downsample_padding=0,\n flip_sin_to_cos=False,\n freq_shift=1,\n layers_per_block=2,\n mid_block_scale_factor=1,\n norm_eps=1e-06,\n norm_num_groups=32,\n time_embedding_type=\"positional\",\n up_block_types=[\"UpBlock2D\", \"UpBlock2D\", \"AttnUpBlock2D\", \"UpBlock2D\"]\n )\n model = model.apply(weight_reset)\n elif ckpt == DiffuserModelSched.NCSNPP_32_DEFAULT:\n DiffuserModelSched.check_image_size_channel(image_size=image_size, channels=channels)\n _, vae, noise_sched, get_pipeline = DiffuserModelSched.get_pretrained(ckpt=DiffuserModelSched.NCSNPP_CELEBA_HQ_256, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type, sde_type=sde_type)\n model = UNet2DModel(\n in_channels=channels,\n out_channels=channels,\n sample_size=image_size,\n act_fn=\"silu\",\n attention_head_dim=None,\n block_out_channels=[128, 256, 256, 256],\n center_input_sample=False,\n down_block_types=[\"SkipDownBlock2D\", \"AttnSkipDownBlock2D\", \"SkipDownBlock2D\", \"SkipDownBlock2D\"], \n downsample_padding=1,\n flip_sin_to_cos=True,\n freq_shift=0,\n layers_per_block=4,\n mid_block_scale_factor=1.41421356237,\n norm_eps=1e-06,\n norm_num_groups=None,\n time_embedding_type=\"fourier\",\n up_block_types=[\"SkipUpBlock2D\", \"SkipUpBlock2D\", \"AttnSkipUpBlock2D\", \"SkipUpBlock2D\"]\n )\n model = model.apply(weight_reset)\n elif ckpt == DiffuserModelSched.DDPM_CIFAR10_DEFAULT:\n model, vae, noise_sched, get_pipeline = DiffuserModelSched.get_pretrained(ckpt=DiffuserModelSched.DDPM_CIFAR10_32, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type, sde_type=sde_type)\n model = model.apply(weight_reset)\n elif ckpt == DiffuserModelSched.DDPM_CELEBA_HQ_DEFAULT:\n model, vae, noise_sched, get_pipeline = DiffuserModelSched.get_pretrained(ckpt=DiffuserModelSched.DDPM_CELEBA_HQ_256, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type, sde_type=sde_type)\n model = model.apply(weight_reset)\n elif ckpt == DiffuserModelSched.DDPM_CHURCH_DEFAULT:\n model, vae, noise_sched, get_pipeline = DiffuserModelSched.get_pretrained(ckpt=DiffuserModelSched.DDPM_CHURCH_256, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type, sde_type=sde_type)\n model = model.apply(weight_reset)\n elif ckpt == DiffuserModelSched.DDPM_BEDROOM_DEFAULT:\n model, vae, noise_sched, get_pipeline = DiffuserModelSched.get_pretrained(ckpt=DiffuserModelSched.DDPM_BEDROOM_256, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type, sde_type=sde_type)\n model = model.apply(weight_reset)\n elif ckpt == DiffuserModelSched.LDM_CELEBA_HQ_DEFAULT:\n model, vae, noise_sched, get_pipeline = DiffuserModelSched.get_pretrained(ckpt=DiffuserModelSched.LDM_CELEBA_HQ_256, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type, sde_type=sde_type)\n model = model.apply(weight_reset)\n elif ckpt == DiffuserModelSched.NCSNPP_CIFAR10_DEFAULT:\n _, vae, noise_sched, get_pipeline = DiffuserModelSched.get_pretrained(ckpt=DiffuserModelSched.NCSNPP_CELEBA_HQ_256, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type, sde_type=sde_type)\n model = UNet2DModel(\n in_channels=3,\n out_channels=3,\n sample_size=32,\n act_fn=\"silu\",\n attention_head_dim=None,\n block_out_channels=[128, 256, 256, 256],\n center_input_sample=False,\n down_block_types=[\"SkipDownBlock2D\", \"AttnSkipDownBlock2D\", \"SkipDownBlock2D\", \"SkipDownBlock2D\"], \n downsample_padding=1,\n flip_sin_to_cos=True,\n freq_shift=0,\n layers_per_block=4,\n mid_block_scale_factor=1.41421356237,\n norm_eps=1e-06,\n norm_num_groups=None,\n time_embedding_type=\"fourier\",\n up_block_types=[\"SkipUpBlock2D\", \"SkipUpBlock2D\", \"AttnSkipUpBlock2D\", \"SkipUpBlock2D\"]\n )\n model = model.apply(weight_reset)\n elif ckpt == DiffuserModelSched.NCSNPP_CELEBA_HQ_DEFAULT:\n model, vae, noise_sched, get_pipeline = DiffuserModelSched.get_pretrained(ckpt=DiffuserModelSched.NCSNPP_CELEBA_HQ_256, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type, sde_type=sde_type)\n model = model.apply(weight_reset)\n elif ckpt == DiffuserModelSched.NCSNPP_CHURCH_DEFAULT:\n model, vae, noise_sched, get_pipeline = DiffuserModelSched.get_pretrained(ckpt=DiffuserModelSched.NCSNPP_CHURCH_256, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type, sde_type=sde_type)\n model = model.apply(weight_reset)\n else:\n model, vae, noise_sched, get_pipeline = DiffuserModelSched.get_pretrained(ckpt=ckpt, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type, sde_type=sde_type)\n return model, vae, noise_sched, get_pipeline\n \n @staticmethod\n def get_pretrained(ckpt: str, clip_sample: bool=None, clip_sample_range: float=None, noise_sched_type: str=None, num_inference_steps: int=1000, sde_type: str=SDE_VP):\n if ckpt == DiffuserModelSched.DDPM_CIFAR10_32:\n ckpt: str = \"google/ddpm-cifar10-32\"\n elif ckpt == DiffuserModelSched.DDPM_CELEBA_HQ_256:\n ckpt: str = \"google/ddpm-ema-celebahq-256\"\n elif ckpt == DiffuserModelSched.DDPM_CHURCH_256:\n ckpt: str = \"google/ddpm-ema-church-256\"\n elif ckpt == DiffuserModelSched.DDPM_BEDROOM_256:\n ckpt: str = \"google/ddpm-ema-bedroom-256\"\n elif ckpt == DiffuserModelSched.LDM_CELEBA_HQ_256:\n ckpt: str = \"CompVis/ldm-celebahq-256\"\n elif ckpt == DiffuserModelSched.NCSNPP_CIFAR10_32: \n ckpt: str = \"fusing/cifar10-ncsnpp-ve\"\n elif ckpt == DiffuserModelSched.NCSNPP_CELEBA_HQ_256:\n ckpt: str = \"google/ncsnpp-celebahq-256\"\n elif ckpt == DiffuserModelSched.NCSNPP_CHURCH_256:\n ckpt: str = \"google/ncsnpp-church-256\"\n \n # return model, noise_sched\n return DiffuserModelSched.__get_model_sched(ckpt_id=ckpt, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type, sde_type=sde_type)\n \n @staticmethod\n def get_optim(ckpt: str, optim: torch.optim, lr_sched: torch.optim.lr_scheduler):\n lr_sched.load_state_dict(torch.load(DiffuserModelSched.LR_SCHED_CKPT, map_location=\"cpu\"))\n optim.load_state_dict(torch.load(DiffuserModelSched.OPTIM_CKPT, map_location=\"cpu\"))\n return optim, lr_sched" } ]
import copy import torch import torch.nn.functional as F import os from functools import partial from os import terminal_size from sched import scheduler from typing import Callable, Dict, List, Tuple, Union from torch import nn from matplotlib import pyplot as plt from dataset import Backdoor, DEFAULT_VMIN, DEFAULT_VMAX from model import DiffuserModelSched from diffusers import DDPMScheduler from dataset import DatasetLoader from model import DiffuserModelSched
17,665
ws.append((sigma_i ** 2 - residuals[i]) ** 0.5) return torch.Tensor(ws) def get_hs_ve(rhos_hat: torch.Tensor) -> torch.Tensor: hs = [rhos_hat[0]] residuals = [0] for i, rho_hat_i in enumerate(rhos_hat): if i < 1: continue residuals.append(hs[i - 1] + residuals[i - 1]) hs.append(rho_hat_i - residuals[i]) return torch.Tensor(hs) def get_R_coef_gen_ve(sigmas: torch.Tensor, rhos_hat: torch.Tensor, ws: torch.Tensor, hs: torch.Tensor, psi: float=1, solver_type: str='sde', vp_scale: float=1.0, ve_scale: float=1.0) -> Tuple[torch.Tensor, torch.Tensor]: # BadDiffusion style correction term, None if psi != 0: raise NotImplementedError(f"Variance Explode model doesn't support BadDiffusion style correction term") # TrojDiff style correction term if hs == None: raise ValueError(f"Arguement hs shouldn't be {hs} when psi is {psi}") prev_rhos_hat = torch.roll(rhos_hat, 1, 0) prev_rhos_hat[0] = 0 prev_sigmas = torch.roll(sigmas, 1, 0) prev_sigmas[0] = 0 trojdiff_step = rhos_hat trojdiff_coef = ve_scale * (ws ** 2 * (rhos_hat - prev_rhos_hat) + hs * prev_sigmas) / (ws ** 2 * sigmas) # print(f"trojdiff_coef isnan: {torch.isnan(trojdiff_coef)}") # Coefficients & Steps step = trojdiff_step coef = trojdiff_coef if str(solver_type).lower() == 'ode': return step, 2 * coef elif str(solver_type).lower() == 'sde': return step, coef else: raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") def get_R_coef_gen_ve_reduce(sigmas: torch.Tensor, hs: torch.Tensor, rhos_hat_w: float=1.0, psi: float=1, solver_type: str='sde', vp_scale: float=1.0, ve_scale: float=1.0) -> Tuple[torch.Tensor, torch.Tensor]: # BadDiffusion style correction term, None if psi != 0: raise NotImplementedError(f"Variance Explode model doesn't support BadDiffusion style correction term") # TrojDiff style correction term if hs == None: raise ValueError(f"Arguement hs shouldn't be {hs} when psi is {psi}") # prev_rhos_hat = torch.roll(rhos_hat, 1, 0) # prev_rhos_hat[0] = 0 prev_sigmas = torch.roll(sigmas, 1, 0) prev_sigmas[0] = 0 trojdiff_step = rhos_hat_w * sigmas trojdiff_coef = ve_scale * (sigmas * rhos_hat_w / (sigmas + prev_sigmas)) # print(f"trojdiff_coef isnan: {torch.isnan(trojdiff_coef)}") # Coefficients & Steps step = trojdiff_step coef = trojdiff_coef if str(solver_type).lower() == 'ode': return step, 2 * coef elif str(solver_type).lower() == 'sde': return step, coef else: raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") def get_hs_vp(alphas: torch.Tensor, alphas_cumprod: torch.Tensor) -> torch.Tensor: hs = [(1 - alphas_cumprod[0]) ** 0.5] residuals = [0] for i, (alphas_cumprod_i, alphas_i) in enumerate(zip(alphas_cumprod, alphas)): if i < 1: continue residuals.append((alphas_i ** 0.5) * (hs[i - 1] + residuals[i - 1])) hs.append((1 - alphas_cumprod_i) ** 0.5 - residuals[i]) return torch.Tensor(hs) def get_R_coef_gen_vp(alphas_cumprod: torch.Tensor, alphas: torch.Tensor, hs: torch.Tensor=None, psi: float=1, solver_type: str='sde', vp_scale: float=1.0, ve_scale: float=1.0) -> Tuple[torch.Tensor, torch.Tensor]: # BadDiffusion style correction term baddiff_step = 1 - alphas_cumprod ** 0.5 baddiff_coef = vp_scale * (1 - alphas ** 0.5) * (1 - alphas_cumprod) ** 0.5 / (1 - alphas) # TrojDiff style correction term if psi != 1: if hs == None: raise ValueError(f"Arhuement hs shouldn't be {hs} when psi is {psi}") trojdiff_step = (1 - alphas_cumprod) ** 0.5 trojdiff_coef = - ve_scale * ((alphas ** 0.5 - 1) * (1 - alphas_cumprod) ** 0.5 * (1 - alphas) - hs * (alphas - alphas_cumprod)) / (1 - alphas) # Coefficients & Steps step = psi * baddiff_step + (1 - psi) * trojdiff_step coef = psi * baddiff_coef + (1 - psi) * trojdiff_coef else: # Coefficients & Steps step = baddiff_step coef = baddiff_coef if str(solver_type).lower() == 'ode': return step, 2 * coef elif str(solver_type).lower() == 'sde': return step, coef else: raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") def get_R_coef_elbo_gen(noise_sched, sde_type: str="vp", psi: float=1, solver_type: str='sde', vp_scale: float=1.0, ve_scale: float=1.0, device=None, dtype=None, rhos_hat_w: float=1.0, rhos_hat_b: float=0.0) -> Tuple[torch.Tensor, torch.Tensor]:
# %% # from tmp_loss_sde import q_sample_diffuser_alt_half """## Defining the forward diffusion process The forward diffusion process gradually adds noise to an image from the real distribution, in a number of time steps $T$. This happens according to a **variance schedule**. The original DDPM authors employed a linear schedule: > We set the forward process variances to constants increasing linearly from $\beta_1 = 10^{−4}$ to $\beta_T = 0.02$. However, it was shown in ([Nichol et al., 2021](https://arxiv.org/abs/2102.09672)) that better results can be achieved when employing a cosine schedule. Below, we define various schedules for the $T$ timesteps, as well as corresponding variables which we'll need, such as cumulative variances. """ def cosine_beta_schedule(timesteps, s=0.008): """ cosine schedule as proposed in https://arxiv.org/abs/2102.09672 """ steps = timesteps + 1 x = torch.linspace(0, timesteps, steps) alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * torch.pi * 0.5) ** 2 alphas_cumprod = alphas_cumprod / alphas_cumprod[0] betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1]) return torch.clip(betas, 0.0001, 0.9999) def linear_beta_schedule(timesteps): beta_start = 0.0001 beta_end = 0.02 return torch.linspace(beta_start, beta_end, timesteps) def quadratic_beta_schedule(timesteps): beta_start = 0.0001 beta_end = 0.02 return torch.linspace(beta_start**0.5, beta_end**0.5, timesteps) ** 2 def sigmoid_beta_schedule(timesteps): beta_start = 0.0001 beta_end = 0.02 betas = torch.linspace(-6, 6, timesteps) return torch.sigmoid(betas) * (beta_end - beta_start) + beta_start def extract(a, t, x_shape): batch_size = t.shape[0] out = a.gather(-1, t.cpu()) return out.reshape(batch_size, *((1,) * (len(x_shape) - 1))).to(t.device) class NoiseScheduler(): SCHED_COSINE = "SC_COS" SCHED_LINEAR = "SC_LIN" SCHED_QUADRATIC = "SC_QUAD" SCHED_SIGMOID = "SC_SIGM" def __init__(self, timesteps: int, scheduler: str, s: float=0.008): self.__timesteps = int(timesteps) self.__s = float(s) self.__scheduler = scheduler # define beta schedule if self.__scheduler == self.SCHED_COSINE: self.__betas = NoiseScheduler.cosine_beta_schedule(timesteps=self.__timesteps, s=self.__s) elif self.__scheduler == self.SCHED_LINEAR: self.__betas = NoiseScheduler.linear_beta_schedule(timesteps=self.__timesteps) self.__derivative_beta = 1 / self.__timesteps self.__derivative_alpha = - 1 / self.__timesteps elif self.__scheduler == self.SCHED_QUADRATIC: self.__betas = NoiseScheduler.quadratic_beta_schedule(timesteps=self.__timesteps) elif self.__scheduler == self.SCHED_SIGMOID: self.__betas = NoiseScheduler.sigmoid_beta_schedule(timesteps=self.__timesteps) else: raise ImportError(f"Undefined scheduler: {self.__scheduler}") # define alphas self.__alphas = 1. - self.betas self.__alphas_cumprod = torch.cumprod(self.alphas, axis=0) self.__alphas_cumprod_prev = F.pad(self.alphas_cumprod[:-1], (1, 0), value=1.0) self.__sqrt_recip_alphas = torch.sqrt(1.0 / self.alphas) # Calculations for backdoor self.__sqrt_alphas = torch.sqrt(self.alphas) self.__one_minus_sqrt_alphas = 1 - self.sqrt_alphas self.__one_minus_alphas = 1 - self.alphas # calculations for diffusion q(x_t | x_{t-1}) and others self.__sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod) self.__sqrt_one_minus_alphas_cumprod = torch.sqrt(1. - self.alphas_cumprod) self.__R_coef = self.one_minus_sqrt_alphas * self.sqrt_one_minus_alphas_cumprod / self.one_minus_alphas # calculations for posterior q(x_{t-1} | x_t, x_0) self.__posterior_variance = self.betas * (1. - self.alphas_cumprod_prev) / (1. - self.alphas_cumprod) @staticmethod def cosine_beta_schedule(timesteps, s=0.008): """ cosine schedule as proposed in https://arxiv.org/abs/2102.09672 """ steps = timesteps + 1 x = torch.linspace(0, timesteps, steps) alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * torch.pi * 0.5) ** 2 alphas_cumprod = alphas_cumprod / alphas_cumprod[0] betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1]) return torch.clip(betas, 0.0001, 0.9999) @staticmethod def linear_beta_schedule(timesteps): beta_start = 0.0001 beta_end = 0.02 return torch.linspace(beta_start, beta_end, timesteps) @staticmethod def quadratic_beta_schedule(timesteps): beta_start = 0.0001 beta_end = 0.02 return torch.linspace(beta_start**0.5, beta_end**0.5, timesteps) ** 2 @staticmethod def sigmoid_beta_schedule(timesteps): beta_start = 0.0001 beta_end = 0.02 betas = torch.linspace(-6, 6, timesteps) return torch.sigmoid(betas) * (beta_end - beta_start) + beta_start @property def betas(self): return self.__betas @property def alphas(self): return self.__alphas @property def alphas_cumprod(self): return self.__alphas_cumprod @property def alphas_cumprod_prev(self): return self.__alphas_cumprod_prev @property def sqrt_recip_alphas(self): return self.__sqrt_recip_alphas @property def sqrt_alphas(self): return self.__sqrt_alphas @property def one_minus_sqrt_alphas(self): return self.__one_minus_sqrt_alphas @property def one_minus_alphas(self): return self.__one_minus_alphas @property def sqrt_alphas_cumprod(self): return self.__sqrt_alphas_cumprod @property def sqrt_one_minus_alphas_cumprod(self): return self.__sqrt_one_minus_alphas_cumprod @property def R_coef(self): return self.__R_coef @property def posterior_variance(self): return self.__posterior_variance """<img src="https://drive.google.com/uc?id=1QifsBnYiijwTqru6gur9C0qKkFYrm-lN" width="800" /> This means that we can now define the loss function given the model as follows: """ # forward diffusion def q_sample_clean(noise_sched, x_start, t, noise=None): if noise is None: noise = torch.randn_like(x_start) sqrt_alphas_cumprod_t = extract(noise_sched.sqrt_alphas_cumprod, t, x_start.shape) sqrt_one_minus_alphas_cumprod_t = extract( noise_sched.sqrt_one_minus_alphas_cumprod, t, x_start.shape ) return sqrt_alphas_cumprod_t * x_start + sqrt_one_minus_alphas_cumprod_t * noise, noise def q_sample_backdoor(noise_sched, x_start, R, t, noise=None): if noise is None: noise = torch.randn_like(x_start) sqrt_alphas_cumprod_t = extract(noise_sched.sqrt_alphas_cumprod, t, x_start.shape) sqrt_one_minus_alphas_cumprod_t = extract( noise_sched.sqrt_one_minus_alphas_cumprod, t, x_start.shape ) R_coef_t = extract(noise_sched.R_coef, t, x_start.shape) return sqrt_alphas_cumprod_t * x_start + (1 - sqrt_alphas_cumprod_t) * R + sqrt_one_minus_alphas_cumprod_t * noise, R_coef_t * R + noise """ <img src="https://drive.google.com/uc?id=1QifsBnYiijwTqru6gur9C0qKkFYrm-lN" width="800" /> This means that we can now define the loss function given the model as follows: """ def p_losses_clean(noise_sched, denoise_model, x_start, t, noise=None, loss_type="l2"): if len(x_start) == 0: return 0 if noise is None: noise = torch.randn_like(x_start) x_noisy, target = q_sample_clean(noise_sched=noise_sched, x_start=x_start, t=t, noise=noise) predicted_noise = denoise_model(x_noisy, t) if loss_type == 'l1': loss = F.l1_loss(target, predicted_noise) elif loss_type == 'l2': loss = F.mse_loss(target, predicted_noise) elif loss_type == "huber": loss = F.smooth_l1_loss(target, predicted_noise) else: raise NotImplementedError() return loss def p_losses_backdoor(noise_sched, denoise_model, x_start, R, t, noise=None, loss_type="l2"): if len(x_start) == 0: return 0 if noise is None: noise = torch.randn_like(x_start) x_noisy, target = q_sample_backdoor(noise_sched=noise_sched, x_start=x_start, R=R, t=t, noise=noise) predicted_noise = denoise_model(x_noisy, t) if loss_type == 'l1': loss = F.l1_loss(target, predicted_noise) elif loss_type == 'l2': loss = F.mse_loss(target, predicted_noise) elif loss_type == "huber": loss = F.smooth_l1_loss(target, predicted_noise) else: raise NotImplementedError() return loss def p_losses(noise_sched, denoise_model, x_start, R, is_clean, t, noise=None, loss_type="l2"): is_not_clean = torch.where(is_clean, False, True) if noise != None: noise_clean = noise[is_clean] noise_backdoor = noise[is_not_clean] else: noise_clean = noise_backdoor = noise loss_clean = p_losses_clean(noise_sched=noise_sched, denoise_model=denoise_model, x_start=x_start[is_clean], t=t[is_clean], noise=noise_clean, loss_type=loss_type) loss_backdoor = p_losses_backdoor(noise_sched=noise_sched, denoise_model=denoise_model, x_start=x_start[is_not_clean], R=R[is_not_clean], t=t[is_not_clean], noise=noise_backdoor, loss_type=loss_type) return (loss_clean + loss_backdoor) / 2 # ================================================== class LossSampler(): def __init__(self, noise_sched: NoiseScheduler): self.__noise_sched = noise_sched def get_fn(self): return partial(p_losses_backdoor, self.__noise_sched), partial(q_sample_backdoor, self.__noise_sched) def plot(x, title: str, log_scale: bool=False): plt.plot(x) plt.title(title) if log_scale: plt.yscale("log") plt.show() def get_derivative(x: torch.Tensor, t: int): if t + 1 < len(x): return x[t + 1] - x[t] return x[t] - x[t - 1] def get_derivatives(x: torch.Tensor): x_delta_t = torch.roll(x, -1, 0) x_delta_t[-1] = x_delta_t[-2] x[-1] = x[-2] return x_delta_t - x def central_derivative(fn, x, stop_thres: float=1e-5, stop_iter_n: int=50, delta: float=1e-2, divisor: float=10.0): der = lambda d: (fn(x + d) - fn(x - d)) / (2 * d) iter_n = 0 res = der(delta) last_res = 0 while (abs(res - last_res) > stop_thres or iter_n < 1) and iter_n < stop_iter_n: last_res = res delta = delta / divisor res = der(delta) iter_n = iter_n + 1 return res def get_alpha_beta_fn_linear(beta_start: float, beta_end: float, timesteps: int): def beta_fn(t): return float(beta_start) + (float(beta_end) - float(beta_start)) * t / (float(timesteps) - 1.0) def alpha_fn(t): return 1.0 - beta_fn(t) return alpha_fn, beta_fn def integral(fn: Callable[[Union[int, float]], Union[int, float]], interval_low: float, interval_up: float, div: int=100): lin_space = torch.linspace(interval_low, interval_up, div, dtype=torch.float32) res = fn(lin_space[:-1]) return torch.sum(res, dim=0) * (interval_up - interval_low) / div def prod_integral(xs: torch.Tensor, x_fn: Callable[[Union[int, float]], Union[int, float]], div: int=200): def log_x_fn(x): return torch.log(x_fn(x).double()).double() def integral_fn(x): return (torch.trapezoid(log_x_fn(torch.linspace(0, x, div * int(x)).to('cpu').double())) / div).double() def exp_integral_fn(x): return torch.exp(integral_fn(x)).double() return torch.linspace(start=0, end=len(xs)-1, steps=len(xs)).to('cpu').double().apply_(exp_integral_fn).float() def get_alphas_cumprod_derivative(alphas: torch.Tensor, alpha_fn: Callable[[Union[int, float]], Union[int, float]]): div = 200 def log_alpha_fn(x): return torch.log(alpha_fn(x).double()).double() def integral_fn(x): return (torch.trapezoid(log_alpha_fn(torch.linspace(0, x, div * int(x)).to('cpu').double())) / div).double() def exp_integral_fn(x): return torch.exp(integral_fn(x)).double() def der_fn(x): return central_derivative(exp_integral_fn, x, stop_thres=1e-3, stop_iter_n=2, delta=1e-2, divisor=10.0) def coef_fn(x): return (exp_integral_fn(x) * torch.log(alpha_fn(torch.Tensor([x]).double()))).double() # fn_int = torch.linspace(start=0, end=len(alphas)-1, steps=len(alphas)).double().apply_(integral_fn) # fn_prod_int = torch.linspace(start=0, end=len(alphas)-1, steps=len(alphas)).double().apply_(exp_integral_fn) # for i in range(len(fn_prod_int[:20])): # print(f"Time: {i} - Alpha Fn Product Integral Analytic: {fn_prod_int[i]}") # plot(fn_prod_int, title="Alpha Fn Product Integral", log_scale=True) # print(f"fn_int: {fn_int[:20]}") # plot(fn_int, title="Alpha Fn Integral") res = torch.linspace(start=0, end=len(alphas)-1, steps=len(alphas)).to('cpu').float().apply_(coef_fn).double() return res # return torch.exp(integral_res) * (torch.log(alphas[-1]) - torch.log(alphas[0])) def get_alphas_hat_derivative(alphas_cumprod: torch.Tensor, alphas: torch.Tensor, alpha_fn: Callable[[Union[int, float]], Union[int, float]]): return get_alphas_cumprod_derivative(alphas=alphas, alpha_fn=alpha_fn).to(alphas_cumprod.device) / 2 * (alphas_cumprod ** 0.5) def get_sigmas_hat_derivative(alphas_cumprod: torch.Tensor, alphas: torch.Tensor, alpha_fn: Callable[[Union[int, float]], Union[int, float]]): return - get_alphas_cumprod_derivative(alphas=alphas, alpha_fn=alpha_fn).to(alphas_cumprod.device) / 2 * ((1 - alphas_cumprod) ** 0.5) def sci(x: float): return "{:.2e}".format(x) def get_R_coef_alt(alphas_cumprod: torch.Tensor, alphas: torch.Tensor, alpha_fn: Callable[[Union[int, float]], Union[int, float]], psi: float=1, solver_type: str='sde'): one_minus_alphas_cumprod = 1 - alphas_cumprod # Fokker-Planck: g^2(t) = derivative of \hat{\beta}^2(t) # coef = psi * (torch.sqrt(one_minus_alphas_cumprod / alphas_cumprod)) + (1 - psi) # g^2(t) = \frac{d \hat{\beta}^2(t)}{dt} - 2 * \frac{d \log \hat{\alpha}(t)}{dt} * \hat{\beta}^2(t) coef = (psi * (torch.sqrt(one_minus_alphas_cumprod / alphas_cumprod)) + (1 - psi)) / (1 + (one_minus_alphas_cumprod / alphas_cumprod)) # Simplified # coef = torch.ones_like(alphas_cumprod) if str(solver_type).lower() == 'ode': return coef elif str(solver_type).lower() == 'sde': return 0.5 * coef else: raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") def get_R_coef_variational(alphas_cumprod: torch.Tensor, psi: float=1, solver_type: str='sde'): coef = psi * (1 - alphas_cumprod ** 0.5) / (1 - alphas_cumprod) ** 0.5 + (1 - psi) if str(solver_type).lower() == 'ode': return 2 * coef elif str(solver_type).lower() == 'sde': return coef else: raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") # def get_R_coef_baddiff(alphas_cumprod: torch.Tensor, psi: float=1, solver_type: str='sde'): # coef = psi * (1 - alphas_cumprod ** 0.5) / (1 - alphas_cumprod) ** 0.5 + (1 - psi) # if str(solver_type).lower() == 'ode': # return 2 * coef # elif str(solver_type).lower() == 'sde': # return coef # else: # raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") def get_R_coef(alphas_cumprod: torch.Tensor, alphas: torch.Tensor, alpha_fn: Callable[[Union[int, float]], Union[int, float]], psi: float=1): alphas_hat = (alphas_cumprod ** 0.5).double() sigmas_hat = ((1 - alphas_cumprod) ** 0.5).double() alphas_hat_derivative = get_alphas_hat_derivative(alphas_cumprod=alphas_cumprod, alphas=alphas, alpha_fn=alpha_fn).double() sigmas_hat_derivative = get_sigmas_hat_derivative(alphas_cumprod=alphas_cumprod, alphas=alphas, alpha_fn=alpha_fn).double() alt_r = 0.5 * alphas_hat / (alphas_hat + sigmas_hat) # plot(alt_r, title="Alternate R", log_scale=True) a = (- psi * alphas_hat_derivative + (1 - psi) * sigmas_hat_derivative).double() b = (psi * (1 - alphas_hat) + (1 - psi) * sigmas_hat).double() c = (2 * sigmas_hat * sigmas_hat_derivative - 2 * (alphas_hat_derivative / alphas_hat) * (sigmas_hat ** 2)).double() # plot(alpha_fn(torch.linspace(0, 999, 1000).float()), title="Alpha Fn", log_scale=True) # fn_cumprod = torch.cumprod(alpha_fn(torch.linspace(0, 999, 1000).float()), dim=0) # for i in range(len(fn_cumprod[:20])): # print(f"Time: {i} - Alpha Fn Cumprod: {fn_cumprod[i]}") # plot(fn_cumprod, title="Alpha Fn Cumprod", log_scale=True) # plot(alphas, title="Alpha") # for i in range(len(alphas_cumprod[:20])): # print(f"Time: {i} - Alpha Cumprod: {alphas_cumprod[i]}") # plot(alphas_cumprod, title="Alpha Cumprod", log_scale=True) # plot(get_alphas_cumprod_derivative(alphas=alphas, alpha_fn=alpha_fn), title="Alpha Cumprod Derivative Anlytic") # plot(get_derivatives(x=alphas_cumprod)[:-1], title="Alpha Cumprod Derivative Numeric") # plot(alphas_hat, title="Alpha Hat", log_scale=True) # plot(sigmas_hat, title="Beta Hat", log_scale=True) # plot(alphas_hat_derivative, title="Alpha Hat Derivative") # plot(sigmas_hat_derivative, title="Sigma Hat Derivative") # plot(a, title="Rho Derivative") # plot(b, title="Rho") # plot(c, title="G^2", log_scale=True) # plot(alphas_hat_derivative / alphas_hat, title="f(t)") coef = (sigmas_hat * a / (c)).double() # for i in range(len(sigmas_hat[:20])): # print(f"Time: {i} - R: {sci(coef[i])} beta_hat: {sci(sigmas_hat[i])}, rho_deriv: {sci(a[i])}, G^2: {sci(c[i])}") if torch.isnan(sigmas_hat).any(): print(f"sigmas_hat - Nan: {sigmas_hat[torch.isnan(sigmas_hat).nonzero()]}") if torch.isnan(a).any(): print(f"Rho Derivative - Nan: {a[torch.isnan(a).nonzero()]}") if torch.isnan(b).any(): print(f"Rho - Nan: {b[torch.isnan(b).nonzero()]}") if torch.isnan(c).any(): print(f"G^2 - Nan: {c[torch.isnan(c).nonzero()]}") # return torch.clamp(coef, min=None, max=1) # return coef return alt_r def get_ks(alphas_hat: torch.Tensor) -> torch.Tensor: prev_alphas_hat = torch.roll(alphas_hat, 1, 0) prev_alphas_hat[0] = 1 return alphas_hat / prev_alphas_hat def get_ws(betas_hat: torch.Tensor, ks: torch.Tensor) -> torch.Tensor: ws = [betas_hat[0]] residuals = [0] for i, beta_hat_i in enumerate(betas_hat): if i < 1: continue residuals.append((ks[i] ** 2) * (ws[i - 1] ** 2 + residuals[i - 1])) ws.append((beta_hat_i ** 2 - residuals[i]) ** 0.5) return torch.Tensor(ws) def get_hs(rhos_hat: torch.Tensor, ks: torch.Tensor) -> torch.Tensor: hs = [rhos_hat[0]] residuals = [0] for i, rho_hat_i in enumerate(rhos_hat): if i < 1: continue residuals.append(ks[i] * (hs[i - 1] + residuals[i - 1])) hs.append(rho_hat_i - residuals[i]) return torch.Tensor(hs) def get_ws_ve(sigmas: torch.Tensor) -> torch.Tensor: ws = [sigmas[0]] residuals = [0] for i, sigma_i in enumerate(sigmas): if i < 1: continue residuals.append(ws[i - 1] ** 2 + residuals[i - 1]) ws.append((sigma_i ** 2 - residuals[i]) ** 0.5) return torch.Tensor(ws) def get_hs_ve(rhos_hat: torch.Tensor) -> torch.Tensor: hs = [rhos_hat[0]] residuals = [0] for i, rho_hat_i in enumerate(rhos_hat): if i < 1: continue residuals.append(hs[i - 1] + residuals[i - 1]) hs.append(rho_hat_i - residuals[i]) return torch.Tensor(hs) def get_R_coef_gen_ve(sigmas: torch.Tensor, rhos_hat: torch.Tensor, ws: torch.Tensor, hs: torch.Tensor, psi: float=1, solver_type: str='sde', vp_scale: float=1.0, ve_scale: float=1.0) -> Tuple[torch.Tensor, torch.Tensor]: # BadDiffusion style correction term, None if psi != 0: raise NotImplementedError(f"Variance Explode model doesn't support BadDiffusion style correction term") # TrojDiff style correction term if hs == None: raise ValueError(f"Arguement hs shouldn't be {hs} when psi is {psi}") prev_rhos_hat = torch.roll(rhos_hat, 1, 0) prev_rhos_hat[0] = 0 prev_sigmas = torch.roll(sigmas, 1, 0) prev_sigmas[0] = 0 trojdiff_step = rhos_hat trojdiff_coef = ve_scale * (ws ** 2 * (rhos_hat - prev_rhos_hat) + hs * prev_sigmas) / (ws ** 2 * sigmas) # print(f"trojdiff_coef isnan: {torch.isnan(trojdiff_coef)}") # Coefficients & Steps step = trojdiff_step coef = trojdiff_coef if str(solver_type).lower() == 'ode': return step, 2 * coef elif str(solver_type).lower() == 'sde': return step, coef else: raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") def get_R_coef_gen_ve_reduce(sigmas: torch.Tensor, hs: torch.Tensor, rhos_hat_w: float=1.0, psi: float=1, solver_type: str='sde', vp_scale: float=1.0, ve_scale: float=1.0) -> Tuple[torch.Tensor, torch.Tensor]: # BadDiffusion style correction term, None if psi != 0: raise NotImplementedError(f"Variance Explode model doesn't support BadDiffusion style correction term") # TrojDiff style correction term if hs == None: raise ValueError(f"Arguement hs shouldn't be {hs} when psi is {psi}") # prev_rhos_hat = torch.roll(rhos_hat, 1, 0) # prev_rhos_hat[0] = 0 prev_sigmas = torch.roll(sigmas, 1, 0) prev_sigmas[0] = 0 trojdiff_step = rhos_hat_w * sigmas trojdiff_coef = ve_scale * (sigmas * rhos_hat_w / (sigmas + prev_sigmas)) # print(f"trojdiff_coef isnan: {torch.isnan(trojdiff_coef)}") # Coefficients & Steps step = trojdiff_step coef = trojdiff_coef if str(solver_type).lower() == 'ode': return step, 2 * coef elif str(solver_type).lower() == 'sde': return step, coef else: raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") def get_hs_vp(alphas: torch.Tensor, alphas_cumprod: torch.Tensor) -> torch.Tensor: hs = [(1 - alphas_cumprod[0]) ** 0.5] residuals = [0] for i, (alphas_cumprod_i, alphas_i) in enumerate(zip(alphas_cumprod, alphas)): if i < 1: continue residuals.append((alphas_i ** 0.5) * (hs[i - 1] + residuals[i - 1])) hs.append((1 - alphas_cumprod_i) ** 0.5 - residuals[i]) return torch.Tensor(hs) def get_R_coef_gen_vp(alphas_cumprod: torch.Tensor, alphas: torch.Tensor, hs: torch.Tensor=None, psi: float=1, solver_type: str='sde', vp_scale: float=1.0, ve_scale: float=1.0) -> Tuple[torch.Tensor, torch.Tensor]: # BadDiffusion style correction term baddiff_step = 1 - alphas_cumprod ** 0.5 baddiff_coef = vp_scale * (1 - alphas ** 0.5) * (1 - alphas_cumprod) ** 0.5 / (1 - alphas) # TrojDiff style correction term if psi != 1: if hs == None: raise ValueError(f"Arhuement hs shouldn't be {hs} when psi is {psi}") trojdiff_step = (1 - alphas_cumprod) ** 0.5 trojdiff_coef = - ve_scale * ((alphas ** 0.5 - 1) * (1 - alphas_cumprod) ** 0.5 * (1 - alphas) - hs * (alphas - alphas_cumprod)) / (1 - alphas) # Coefficients & Steps step = psi * baddiff_step + (1 - psi) * trojdiff_step coef = psi * baddiff_coef + (1 - psi) * trojdiff_coef else: # Coefficients & Steps step = baddiff_step coef = baddiff_coef if str(solver_type).lower() == 'ode': return step, 2 * coef elif str(solver_type).lower() == 'sde': return step, coef else: raise NotImplementedError(f"Coefficient solver_type: {solver_type} isn't implemented") def get_R_coef_elbo_gen(noise_sched, sde_type: str="vp", psi: float=1, solver_type: str='sde', vp_scale: float=1.0, ve_scale: float=1.0, device=None, dtype=None, rhos_hat_w: float=1.0, rhos_hat_b: float=0.0) -> Tuple[torch.Tensor, torch.Tensor]:
if sde_type == DiffuserModelSched.SDE_VP or sde_type == DiffuserModelSched.SDE_LDM:
3
2023-10-17 19:57:37+00:00
24k
nchen909/Pass-Tuning
evaluator/CodeBLEU/syntax_match.py
[ { "identifier": "DFG_python", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_python(root_node,index_to_code,states):\n assignment=['assignment','augmented_assignment','for_in_clause']\n if_statement=['if_statement']\n for_statement=['for_statement']\n while_statement=['while_statement']\n do_first_statement=['for_in_clause'] \n def_statement=['default_parameter']\n states=states.copy() \n if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment': \n idx,code=index_to_code[(root_node.start_point,root_node.end_point)]\n if root_node.type==code:\n return [],states\n elif code in states:\n return [(code,idx,'comesFrom',[code],states[code].copy())],states\n else:\n if root_node.type=='identifier':\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n elif root_node.type in def_statement:\n name=root_node.child_by_field_name('name')\n value=root_node.child_by_field_name('value')\n DFG=[]\n if value is None:\n indexs=tree_to_variable_index(name,index_to_code)\n for index in indexs:\n idx,code=index_to_code[index]\n DFG.append((code,idx,'comesFrom',[],[]))\n states[code]=[idx]\n return sorted(DFG,key=lambda x:x[1]),states\n else:\n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code)\n temp,states=DFG_python(value,index_to_code,states)\n DFG+=temp \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states \n elif root_node.type in assignment:\n if root_node.type=='for_in_clause':\n right_nodes=[root_node.children[-1]]\n left_nodes=[root_node.child_by_field_name('left')]\n else:\n if root_node.child_by_field_name('right') is None:\n return [],states\n left_nodes=[x for x in root_node.child_by_field_name('left').children if x.type!=',']\n right_nodes=[x for x in root_node.child_by_field_name('right').children if x.type!=',']\n if len(right_nodes)!=len(left_nodes):\n left_nodes=[root_node.child_by_field_name('left')]\n right_nodes=[root_node.child_by_field_name('right')]\n if len(left_nodes)==0:\n left_nodes=[root_node.child_by_field_name('left')]\n if len(right_nodes)==0:\n right_nodes=[root_node.child_by_field_name('right')]\n DFG=[]\n for node in right_nodes:\n temp,states=DFG_python(node,index_to_code,states)\n DFG+=temp\n \n for left_node,right_node in zip(left_nodes,right_nodes):\n left_tokens_index=tree_to_variable_index(left_node,index_to_code)\n right_tokens_index=tree_to_variable_index(right_node,index_to_code)\n temp=[]\n for token1_index in left_tokens_index:\n idx1,code1=index_to_code[token1_index]\n temp.append((code1,idx1,'computedFrom',[index_to_code[x][1] for x in right_tokens_index],\n [index_to_code[x][0] for x in right_tokens_index]))\n states[code1]=[idx1]\n DFG+=temp \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in if_statement:\n DFG=[]\n current_states=states.copy()\n others_states=[]\n tag=False\n if 'else' in root_node.type:\n tag=True\n for child in root_node.children:\n if 'else' in child.type:\n tag=True\n if child.type not in ['elif_clause','else_clause']:\n temp,current_states=DFG_python(child,index_to_code,current_states)\n DFG+=temp\n else:\n temp,new_states=DFG_python(child,index_to_code,states)\n DFG+=temp\n others_states.append(new_states)\n others_states.append(current_states)\n if tag is False:\n others_states.append(states)\n new_states={}\n for dic in others_states:\n for key in dic:\n if key not in new_states:\n new_states[key]=dic[key].copy()\n else:\n new_states[key]+=dic[key]\n for key in new_states:\n new_states[key]=sorted(list(set(new_states[key])))\n return sorted(DFG,key=lambda x:x[1]),new_states\n elif root_node.type in for_statement:\n DFG=[]\n for i in range(2):\n right_nodes=[x for x in root_node.child_by_field_name('right').children if x.type!=',']\n left_nodes=[x for x in root_node.child_by_field_name('left').children if x.type!=',']\n if len(right_nodes)!=len(left_nodes):\n left_nodes=[root_node.child_by_field_name('left')]\n right_nodes=[root_node.child_by_field_name('right')]\n if len(left_nodes)==0:\n left_nodes=[root_node.child_by_field_name('left')]\n if len(right_nodes)==0:\n right_nodes=[root_node.child_by_field_name('right')]\n for node in right_nodes:\n temp,states=DFG_python(node,index_to_code,states)\n DFG+=temp\n for left_node,right_node in zip(left_nodes,right_nodes):\n left_tokens_index=tree_to_variable_index(left_node,index_to_code)\n right_tokens_index=tree_to_variable_index(right_node,index_to_code)\n temp=[]\n for token1_index in left_tokens_index:\n idx1,code1=index_to_code[token1_index]\n temp.append((code1,idx1,'computedFrom',[index_to_code[x][1] for x in right_tokens_index],\n [index_to_code[x][0] for x in right_tokens_index]))\n states[code1]=[idx1]\n DFG+=temp \n if root_node.children[-1].type==\"block\":\n temp,states=DFG_python(root_node.children[-1],index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in while_statement: \n DFG=[]\n for i in range(2):\n for child in root_node.children:\n temp,states=DFG_python(child,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states \n else:\n DFG=[]\n for child in root_node.children:\n if child.type in do_first_statement:\n temp,states=DFG_python(child,index_to_code,states)\n DFG+=temp\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp,states=DFG_python(child,index_to_code,states)\n DFG+=temp\n \n return sorted(DFG,key=lambda x:x[1]),states" }, { "identifier": "DFG_java", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_java(root_node,index_to_code,states):\n assignment=['assignment_expression']\n def_statement=['variable_declarator']\n increment_statement=['update_expression']\n if_statement=['if_statement','else']\n for_statement=['for_statement']\n enhanced_for_statement=['enhanced_for_statement']\n while_statement=['while_statement']\n do_first_statement=[] \n states=states.copy()\n if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment':\n idx,code=index_to_code[(root_node.start_point,root_node.end_point)]\n if root_node.type==code:\n return [],states\n elif code in states:\n return [(code,idx,'comesFrom',[code],states[code].copy())],states\n else:\n if root_node.type=='identifier':\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n elif root_node.type in def_statement:\n name=root_node.child_by_field_name('name')\n value=root_node.child_by_field_name('value')\n DFG=[]\n if value is None:\n indexs=tree_to_variable_index(name,index_to_code)\n for index in indexs:\n idx,code=index_to_code[index]\n DFG.append((code,idx,'comesFrom',[],[]))\n states[code]=[idx]\n return sorted(DFG,key=lambda x:x[1]),states\n else:\n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code)\n temp,states=DFG_java(value,index_to_code,states)\n DFG+=temp \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in assignment:\n left_nodes=root_node.child_by_field_name('left')\n right_nodes=root_node.child_by_field_name('right')\n DFG=[]\n temp,states=DFG_java(right_nodes,index_to_code,states)\n DFG+=temp \n name_indexs=tree_to_variable_index(left_nodes,index_to_code)\n value_indexs=tree_to_variable_index(right_nodes,index_to_code) \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in increment_statement:\n DFG=[]\n indexs=tree_to_variable_index(root_node,index_to_code)\n for index1 in indexs:\n idx1,code1=index_to_code[index1]\n for index2 in indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1]\n return sorted(DFG,key=lambda x:x[1]),states \n elif root_node.type in if_statement:\n DFG=[]\n current_states=states.copy()\n others_states=[]\n flag=False\n tag=False\n if 'else' in root_node.type:\n tag=True\n for child in root_node.children:\n if 'else' in child.type:\n tag=True\n if child.type not in if_statement and flag is False:\n temp,current_states=DFG_java(child,index_to_code,current_states)\n DFG+=temp\n else:\n flag=True\n temp,new_states=DFG_java(child,index_to_code,states)\n DFG+=temp\n others_states.append(new_states)\n others_states.append(current_states)\n if tag is False:\n others_states.append(states)\n new_states={}\n for dic in others_states:\n for key in dic:\n if key not in new_states:\n new_states[key]=dic[key].copy()\n else:\n new_states[key]+=dic[key]\n for key in new_states:\n new_states[key]=sorted(list(set(new_states[key])))\n return sorted(DFG,key=lambda x:x[1]),new_states\n elif root_node.type in for_statement:\n DFG=[]\n for child in root_node.children:\n temp,states=DFG_java(child,index_to_code,states)\n DFG+=temp\n flag=False\n for child in root_node.children:\n if flag:\n temp,states=DFG_java(child,index_to_code,states)\n DFG+=temp \n elif child.type==\"local_variable_declaration\":\n flag=True\n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in enhanced_for_statement:\n name=root_node.child_by_field_name('name')\n value=root_node.child_by_field_name('value')\n body=root_node.child_by_field_name('body')\n DFG=[]\n for i in range(2):\n temp,states=DFG_java(value,index_to_code,states)\n DFG+=temp \n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code) \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1] \n temp,states=DFG_java(body,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in while_statement: \n DFG=[]\n for i in range(2):\n for child in root_node.children:\n temp,states=DFG_java(child,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states \n else:\n DFG=[]\n for child in root_node.children:\n if child.type in do_first_statement:\n temp,states=DFG_java(child,index_to_code,states)\n DFG+=temp\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp,states=DFG_java(child,index_to_code,states)\n DFG+=temp\n \n return sorted(DFG,key=lambda x:x[1]),states" }, { "identifier": "DFG_ruby", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_ruby(root_node,index_to_code,states):\n assignment=['assignment','operator_assignment']\n if_statement=['if','elsif','else','unless','when']\n for_statement=['for']\n while_statement=['while_modifier','until']\n do_first_statement=[] \n def_statement=['keyword_parameter']\n if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment':\n states=states.copy()\n idx,code=index_to_code[(root_node.start_point,root_node.end_point)]\n if root_node.type==code:\n return [],states\n elif code in states:\n return [(code,idx,'comesFrom',[code],states[code].copy())],states\n else:\n if root_node.type=='identifier':\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n elif root_node.type in def_statement:\n name=root_node.child_by_field_name('name')\n value=root_node.child_by_field_name('value')\n DFG=[]\n if value is None:\n indexs=tree_to_variable_index(name,index_to_code)\n for index in indexs:\n idx,code=index_to_code[index]\n DFG.append((code,idx,'comesFrom',[],[]))\n states[code]=[idx]\n return sorted(DFG,key=lambda x:x[1]),states\n else:\n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code)\n temp,states=DFG_ruby(value,index_to_code,states)\n DFG+=temp \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states \n elif root_node.type in assignment:\n left_nodes=[x for x in root_node.child_by_field_name('left').children if x.type!=',']\n right_nodes=[x for x in root_node.child_by_field_name('right').children if x.type!=',']\n if len(right_nodes)!=len(left_nodes):\n left_nodes=[root_node.child_by_field_name('left')]\n right_nodes=[root_node.child_by_field_name('right')]\n if len(left_nodes)==0:\n left_nodes=[root_node.child_by_field_name('left')]\n if len(right_nodes)==0:\n right_nodes=[root_node.child_by_field_name('right')]\n if root_node.type==\"operator_assignment\":\n left_nodes=[root_node.children[0]]\n right_nodes=[root_node.children[-1]]\n\n DFG=[]\n for node in right_nodes:\n temp,states=DFG_ruby(node,index_to_code,states)\n DFG+=temp\n \n for left_node,right_node in zip(left_nodes,right_nodes):\n left_tokens_index=tree_to_variable_index(left_node,index_to_code)\n right_tokens_index=tree_to_variable_index(right_node,index_to_code)\n temp=[]\n for token1_index in left_tokens_index:\n idx1,code1=index_to_code[token1_index]\n temp.append((code1,idx1,'computedFrom',[index_to_code[x][1] for x in right_tokens_index],\n [index_to_code[x][0] for x in right_tokens_index]))\n states[code1]=[idx1]\n DFG+=temp \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in if_statement:\n DFG=[]\n current_states=states.copy()\n others_states=[]\n tag=False\n if 'else' in root_node.type:\n tag=True\n for child in root_node.children:\n if 'else' in child.type:\n tag=True\n if child.type not in if_statement:\n temp,current_states=DFG_ruby(child,index_to_code,current_states)\n DFG+=temp\n else:\n temp,new_states=DFG_ruby(child,index_to_code,states)\n DFG+=temp\n others_states.append(new_states)\n others_states.append(current_states)\n if tag is False:\n others_states.append(states)\n new_states={}\n for dic in others_states:\n for key in dic:\n if key not in new_states:\n new_states[key]=dic[key].copy()\n else:\n new_states[key]+=dic[key]\n for key in new_states:\n new_states[key]=sorted(list(set(new_states[key])))\n return sorted(DFG,key=lambda x:x[1]),new_states\n elif root_node.type in for_statement:\n DFG=[]\n for i in range(2):\n left_nodes=[root_node.child_by_field_name('pattern')]\n right_nodes=[root_node.child_by_field_name('value')]\n assert len(right_nodes)==len(left_nodes)\n for node in right_nodes:\n temp,states=DFG_ruby(node,index_to_code,states)\n DFG+=temp\n for left_node,right_node in zip(left_nodes,right_nodes):\n left_tokens_index=tree_to_variable_index(left_node,index_to_code)\n right_tokens_index=tree_to_variable_index(right_node,index_to_code)\n temp=[]\n for token1_index in left_tokens_index:\n idx1,code1=index_to_code[token1_index]\n temp.append((code1,idx1,'computedFrom',[index_to_code[x][1] for x in right_tokens_index],\n [index_to_code[x][0] for x in right_tokens_index]))\n states[code1]=[idx1]\n DFG+=temp \n temp,states=DFG_ruby(root_node.child_by_field_name('body'),index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in while_statement: \n DFG=[]\n for i in range(2):\n for child in root_node.children:\n temp,states=DFG_ruby(child,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states \n else:\n DFG=[]\n for child in root_node.children:\n if child.type in do_first_statement:\n temp,states=DFG_ruby(child,index_to_code,states)\n DFG+=temp\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp,states=DFG_ruby(child,index_to_code,states)\n DFG+=temp\n \n return sorted(DFG,key=lambda x:x[1]),states" }, { "identifier": "DFG_go", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_go(root_node,index_to_code,states):\n assignment=['assignment_statement',]\n def_statement=['var_spec']\n increment_statement=['inc_statement']\n if_statement=['if_statement','else']\n for_statement=['for_statement']\n enhanced_for_statement=[]\n while_statement=[]\n do_first_statement=[] \n states=states.copy()\n if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment':\n idx,code=index_to_code[(root_node.start_point,root_node.end_point)]\n if root_node.type==code:\n return [],states\n elif code in states:\n return [(code,idx,'comesFrom',[code],states[code].copy())],states\n else:\n if root_node.type=='identifier':\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n elif root_node.type in def_statement:\n name=root_node.child_by_field_name('name')\n value=root_node.child_by_field_name('value')\n DFG=[]\n if value is None:\n indexs=tree_to_variable_index(name,index_to_code)\n for index in indexs:\n idx,code=index_to_code[index]\n DFG.append((code,idx,'comesFrom',[],[]))\n states[code]=[idx]\n return sorted(DFG,key=lambda x:x[1]),states\n else:\n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code)\n temp,states=DFG_go(value,index_to_code,states)\n DFG+=temp \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in assignment:\n left_nodes=root_node.child_by_field_name('left')\n right_nodes=root_node.child_by_field_name('right')\n DFG=[]\n temp,states=DFG_go(right_nodes,index_to_code,states)\n DFG+=temp \n name_indexs=tree_to_variable_index(left_nodes,index_to_code)\n value_indexs=tree_to_variable_index(right_nodes,index_to_code) \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in increment_statement:\n DFG=[]\n indexs=tree_to_variable_index(root_node,index_to_code)\n for index1 in indexs:\n idx1,code1=index_to_code[index1]\n for index2 in indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1]\n return sorted(DFG,key=lambda x:x[1]),states \n elif root_node.type in if_statement:\n DFG=[]\n current_states=states.copy()\n others_states=[]\n flag=False\n tag=False\n if 'else' in root_node.type:\n tag=True\n for child in root_node.children:\n if 'else' in child.type:\n tag=True\n if child.type not in if_statement and flag is False:\n temp,current_states=DFG_go(child,index_to_code,current_states)\n DFG+=temp\n else:\n flag=True\n temp,new_states=DFG_go(child,index_to_code,states)\n DFG+=temp\n others_states.append(new_states)\n others_states.append(current_states)\n if tag is False:\n others_states.append(states)\n new_states={}\n for dic in others_states:\n for key in dic:\n if key not in new_states:\n new_states[key]=dic[key].copy()\n else:\n new_states[key]+=dic[key]\n for key in states:\n if key not in new_states:\n new_states[key]=states[key]\n else:\n new_states[key]+=states[key]\n for key in new_states:\n new_states[key]=sorted(list(set(new_states[key])))\n return sorted(DFG,key=lambda x:x[1]),new_states\n elif root_node.type in for_statement:\n DFG=[]\n for child in root_node.children:\n temp,states=DFG_go(child,index_to_code,states)\n DFG+=temp\n flag=False\n for child in root_node.children:\n if flag:\n temp,states=DFG_go(child,index_to_code,states)\n DFG+=temp \n elif child.type==\"for_clause\":\n if child.child_by_field_name('update') is not None:\n temp,states=DFG_go(child.child_by_field_name('update'),index_to_code,states)\n DFG+=temp \n flag=True\n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n else:\n DFG=[]\n for child in root_node.children:\n if child.type in do_first_statement:\n temp,states=DFG_go(child,index_to_code,states)\n DFG+=temp\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp,states=DFG_go(child,index_to_code,states)\n DFG+=temp\n \n return sorted(DFG,key=lambda x:x[1]),states" }, { "identifier": "DFG_php", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_php(root_node,index_to_code,states):\n assignment=['assignment_expression','augmented_assignment_expression']\n def_statement=['simple_parameter']\n increment_statement=['update_expression']\n if_statement=['if_statement','else_clause']\n for_statement=['for_statement']\n enhanced_for_statement=['foreach_statement']\n while_statement=['while_statement']\n do_first_statement=[] \n states=states.copy()\n if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment':\n idx,code=index_to_code[(root_node.start_point,root_node.end_point)]\n if root_node.type==code:\n return [],states\n elif code in states:\n return [(code,idx,'comesFrom',[code],states[code].copy())],states\n else:\n if root_node.type=='identifier':\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n elif root_node.type in def_statement:\n name=root_node.child_by_field_name('name')\n value=root_node.child_by_field_name('default_value')\n DFG=[]\n if value is None:\n indexs=tree_to_variable_index(name,index_to_code)\n for index in indexs:\n idx,code=index_to_code[index]\n DFG.append((code,idx,'comesFrom',[],[]))\n states[code]=[idx]\n return sorted(DFG,key=lambda x:x[1]),states\n else:\n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code)\n temp,states=DFG_php(value,index_to_code,states)\n DFG+=temp \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in assignment:\n left_nodes=root_node.child_by_field_name('left')\n right_nodes=root_node.child_by_field_name('right')\n DFG=[]\n temp,states=DFG_php(right_nodes,index_to_code,states)\n DFG+=temp \n name_indexs=tree_to_variable_index(left_nodes,index_to_code)\n value_indexs=tree_to_variable_index(right_nodes,index_to_code) \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in increment_statement:\n DFG=[]\n indexs=tree_to_variable_index(root_node,index_to_code)\n for index1 in indexs:\n idx1,code1=index_to_code[index1]\n for index2 in indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1]\n return sorted(DFG,key=lambda x:x[1]),states \n elif root_node.type in if_statement:\n DFG=[]\n current_states=states.copy()\n others_states=[]\n flag=False\n tag=False\n if 'else' in root_node.type:\n tag=True\n for child in root_node.children:\n if 'else' in child.type:\n tag=True\n if child.type not in if_statement and flag is False:\n temp,current_states=DFG_php(child,index_to_code,current_states)\n DFG+=temp\n else:\n flag=True\n temp,new_states=DFG_php(child,index_to_code,states)\n DFG+=temp\n others_states.append(new_states)\n others_states.append(current_states)\n new_states={}\n for dic in others_states:\n for key in dic:\n if key not in new_states:\n new_states[key]=dic[key].copy()\n else:\n new_states[key]+=dic[key]\n for key in states:\n if key not in new_states:\n new_states[key]=states[key]\n else:\n new_states[key]+=states[key]\n for key in new_states:\n new_states[key]=sorted(list(set(new_states[key])))\n return sorted(DFG,key=lambda x:x[1]),new_states\n elif root_node.type in for_statement:\n DFG=[]\n for child in root_node.children:\n temp,states=DFG_php(child,index_to_code,states)\n DFG+=temp\n flag=False\n for child in root_node.children:\n if flag:\n temp,states=DFG_php(child,index_to_code,states)\n DFG+=temp \n elif child.type==\"assignment_expression\": \n flag=True\n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in enhanced_for_statement:\n name=None\n value=None\n for child in root_node.children:\n if child.type=='variable_name' and value is None:\n value=child\n elif child.type=='variable_name' and name is None:\n name=child\n break\n body=root_node.child_by_field_name('body')\n DFG=[]\n for i in range(2):\n temp,states=DFG_php(value,index_to_code,states)\n DFG+=temp \n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code) \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1] \n temp,states=DFG_php(body,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in while_statement: \n DFG=[]\n for i in range(2):\n for child in root_node.children:\n temp,states=DFG_php(child,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states \n else:\n DFG=[]\n for child in root_node.children:\n if child.type in do_first_statement:\n temp,states=DFG_php(child,index_to_code,states)\n DFG+=temp\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp,states=DFG_php(child,index_to_code,states)\n DFG+=temp\n \n return sorted(DFG,key=lambda x:x[1]),states" }, { "identifier": "DFG_javascript", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_javascript(root_node,index_to_code,states):\n assignment=['assignment_pattern','augmented_assignment_expression']\n def_statement=['variable_declarator']\n increment_statement=['update_expression']\n if_statement=['if_statement','else']\n for_statement=['for_statement']\n enhanced_for_statement=[]\n while_statement=['while_statement']\n do_first_statement=[] \n states=states.copy()\n if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment':\n idx,code=index_to_code[(root_node.start_point,root_node.end_point)]\n if root_node.type==code:\n return [],states\n elif code in states:\n return [(code,idx,'comesFrom',[code],states[code].copy())],states\n else:\n if root_node.type=='identifier':\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n elif root_node.type in def_statement:\n name=root_node.child_by_field_name('name')\n value=root_node.child_by_field_name('value')\n DFG=[]\n if value is None:\n indexs=tree_to_variable_index(name,index_to_code)\n for index in indexs:\n idx,code=index_to_code[index]\n DFG.append((code,idx,'comesFrom',[],[]))\n states[code]=[idx]\n return sorted(DFG,key=lambda x:x[1]),states\n else:\n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code)\n temp,states=DFG_javascript(value,index_to_code,states)\n DFG+=temp \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in assignment:\n left_nodes=root_node.child_by_field_name('left')\n right_nodes=root_node.child_by_field_name('right')\n DFG=[]\n temp,states=DFG_javascript(right_nodes,index_to_code,states)\n DFG+=temp \n name_indexs=tree_to_variable_index(left_nodes,index_to_code)\n value_indexs=tree_to_variable_index(right_nodes,index_to_code) \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in increment_statement:\n DFG=[]\n indexs=tree_to_variable_index(root_node,index_to_code)\n for index1 in indexs:\n idx1,code1=index_to_code[index1]\n for index2 in indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1]\n return sorted(DFG,key=lambda x:x[1]),states \n elif root_node.type in if_statement:\n DFG=[]\n current_states=states.copy()\n others_states=[]\n flag=False\n tag=False\n if 'else' in root_node.type:\n tag=True\n for child in root_node.children:\n if 'else' in child.type:\n tag=True\n if child.type not in if_statement and flag is False:\n temp,current_states=DFG_javascript(child,index_to_code,current_states)\n DFG+=temp\n else:\n flag=True\n temp,new_states=DFG_javascript(child,index_to_code,states)\n DFG+=temp\n others_states.append(new_states)\n others_states.append(current_states)\n if tag is False:\n others_states.append(states) \n new_states={}\n for dic in others_states:\n for key in dic:\n if key not in new_states:\n new_states[key]=dic[key].copy()\n else:\n new_states[key]+=dic[key]\n for key in states:\n if key not in new_states:\n new_states[key]=states[key]\n else:\n new_states[key]+=states[key]\n for key in new_states:\n new_states[key]=sorted(list(set(new_states[key])))\n return sorted(DFG,key=lambda x:x[1]),new_states\n elif root_node.type in for_statement:\n DFG=[]\n for child in root_node.children:\n temp,states=DFG_javascript(child,index_to_code,states)\n DFG+=temp\n flag=False\n for child in root_node.children:\n if flag:\n temp,states=DFG_javascript(child,index_to_code,states)\n DFG+=temp \n elif child.type==\"variable_declaration\": \n flag=True\n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in while_statement: \n DFG=[]\n for i in range(2):\n for child in root_node.children:\n temp,states=DFG_javascript(child,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states \n else:\n DFG=[]\n for child in root_node.children:\n if child.type in do_first_statement:\n temp,states=DFG_javascript(child,index_to_code,states)\n DFG+=temp\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp,states=DFG_javascript(child,index_to_code,states)\n DFG+=temp\n \n return sorted(DFG,key=lambda x:x[1]),states" }, { "identifier": "DFG_csharp", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_csharp(root_node,index_to_code,states):\n assignment=['assignment_expression']\n def_statement=['variable_declarator']\n increment_statement=['postfix_unary_expression']\n if_statement=['if_statement','else']\n for_statement=['for_statement']\n enhanced_for_statement=['for_each_statement']\n while_statement=['while_statement']\n do_first_statement=[] \n states=states.copy()\n if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment':\n idx,code=index_to_code[(root_node.start_point,root_node.end_point)]\n if root_node.type==code:\n return [],states\n elif code in states:\n return [(code,idx,'comesFrom',[code],states[code].copy())],states\n else:\n if root_node.type=='identifier':\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n elif root_node.type in def_statement:\n if len(root_node.children)==2:\n name=root_node.children[0]\n value=root_node.children[1]\n else:\n name=root_node.children[0]\n value=None\n DFG=[]\n if value is None:\n indexs=tree_to_variable_index(name,index_to_code)\n for index in indexs:\n idx,code=index_to_code[index]\n DFG.append((code,idx,'comesFrom',[],[]))\n states[code]=[idx]\n return sorted(DFG,key=lambda x:x[1]),states\n else:\n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code)\n temp,states=DFG_csharp(value,index_to_code,states)\n DFG+=temp \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in assignment:\n left_nodes=root_node.child_by_field_name('left')\n right_nodes=root_node.child_by_field_name('right')\n DFG=[]\n temp,states=DFG_csharp(right_nodes,index_to_code,states)\n DFG+=temp \n name_indexs=tree_to_variable_index(left_nodes,index_to_code)\n value_indexs=tree_to_variable_index(right_nodes,index_to_code) \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in increment_statement:\n DFG=[]\n indexs=tree_to_variable_index(root_node,index_to_code)\n for index1 in indexs:\n idx1,code1=index_to_code[index1]\n for index2 in indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1]\n return sorted(DFG,key=lambda x:x[1]),states \n elif root_node.type in if_statement:\n DFG=[]\n current_states=states.copy()\n others_states=[]\n flag=False\n tag=False\n if 'else' in root_node.type:\n tag=True\n for child in root_node.children:\n if 'else' in child.type:\n tag=True\n if child.type not in if_statement and flag is False:\n temp,current_states=DFG_csharp(child,index_to_code,current_states)\n DFG+=temp\n else:\n flag=True\n temp,new_states=DFG_csharp(child,index_to_code,states)\n DFG+=temp\n others_states.append(new_states)\n others_states.append(current_states)\n if tag is False:\n others_states.append(states)\n new_states={}\n for dic in others_states:\n for key in dic:\n if key not in new_states:\n new_states[key]=dic[key].copy()\n else:\n new_states[key]+=dic[key]\n for key in new_states:\n new_states[key]=sorted(list(set(new_states[key])))\n return sorted(DFG,key=lambda x:x[1]),new_states\n elif root_node.type in for_statement:\n DFG=[]\n for child in root_node.children:\n temp,states=DFG_csharp(child,index_to_code,states)\n DFG+=temp\n flag=False\n for child in root_node.children:\n if flag:\n temp,states=DFG_csharp(child,index_to_code,states)\n DFG+=temp \n elif child.type==\"local_variable_declaration\":\n flag=True\n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in enhanced_for_statement:\n name=root_node.child_by_field_name('left')\n value=root_node.child_by_field_name('right')\n body=root_node.child_by_field_name('body')\n DFG=[]\n for i in range(2):\n temp,states=DFG_csharp(value,index_to_code,states)\n DFG+=temp \n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code) \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1] \n temp,states=DFG_csharp(body,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in while_statement: \n DFG=[]\n for i in range(2):\n for child in root_node.children:\n temp,states=DFG_csharp(child,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states \n else:\n DFG=[]\n for child in root_node.children:\n if child.type in do_first_statement:\n temp,states=DFG_csharp(child,index_to_code,states)\n DFG+=temp\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp,states=DFG_csharp(child,index_to_code,states)\n DFG+=temp\n \n return sorted(DFG,key=lambda x:x[1]),states" }, { "identifier": "DFG_c", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_c(root_node, index_to_code, states):\n assignment = ['assignment_expression']\n def_statement = ['init_declatator', 'pointer_declarator', 'array_declarator']\n increment_statement = ['update_expression']\n if_statement = ['if_statement', 'else']\n for_statement = ['for_statement']\n while_statement = ['while_statement']\n parameter_statement = ['parameter_declaration']\n do_first_statement = []\n states = states.copy()\n if (len(root_node.children) == 0 or root_node.type == 'string') and root_node.type != 'comment':\n idx, code = index_to_code[(root_node.start_point, root_node.end_point)]\n if root_node.type == code or (root_node.parent.type == 'function_declarator' and root_node):\n return [], states\n elif code in states:\n return [(code, idx, 'comesFrom', [code], states[code].copy())], states\n elif root_node.type == 'identifier':\n if root_node.parent.type == 'declaration':\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n return [], states\n else:\n return [], states\n elif root_node.type in def_statement:\n\n if root_node.parent.type == 'function_definition':\n while root_node.type == 'pointer_declarator' and root_node.child_by_field_name('declarator').type == 'pointer_declarator':\n root_node = root_node.child_by_field_name('declarator')\n DFG = []\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp, states = DFG_c(child, index_to_code, states)\n DFG += temp\n return sorted(DFG, key=lambda x: x[1]), states\n name = root_node.child_by_field_name('declarator')\n value = root_node.child_by_field_name('value')\n DFG = []\n if value is None:\n indexs = tree_to_variable_index(name, index_to_code)\n for index in indexs:\n idx, code = index_to_code[index]\n DFG.append((code, idx, 'comesFrom', [], []))\n states[code] = [idx]\n return sorted(DFG, key=lambda x: x[1]), states\n else:\n name_indexs = tree_to_variable_index(name, index_to_code)\n value_indexs = tree_to_variable_index(value, index_to_code)\n temp, states = DFG_c(value, index_to_code, states)\n DFG += temp\n for index1 in name_indexs:\n idx1, code1 = index_to_code[index1]\n for index2 in value_indexs:\n idx2, code2 = index_to_code[index2]\n DFG.append((code1, idx1, 'comesFrom', [code2], [idx2]))\n states[code1] = [idx1]\n return sorted(DFG, key=lambda x: x[1]), states\n elif root_node.type in assignment:\n # left_nodes = root_node.child_by_field_name('left')\n # right_nodes = root_node.child_by_field_name('right')\n # DFG = []\n # temp, states = DFG_c(right_nodes, index_to_code, states)\n # DFG += temp\n # # filter field identifiers\n # while left_nodes.type == 'field_expression' or left_nodes.type == 'subscript_expression':\n # left_nodes = left_nodes.child_by_field_name('argument')\n # left_node = left_nodes\n # name_indexs = tree_to_variable_index(left_node, index_to_code)\n # value_indexs = tree_to_variable_index(right_nodes, index_to_code)\n # for index1 in name_indexs:\n # idx1, code1 = index_to_code[index1]\n # for index2 in value_indexs:\n # idx2, code2 = index_to_code[index2]\n # if code1 == \"alarm_timers\":\n # print(12)\n # if code1 in\n # DFG.append((code1, idx1, 'computedFrom', [code2], [idx2]))\n # states[code1] = [idx1]\n return [], states\n elif root_node.type in increment_statement:\n DFG = []\n indexs = tree_to_variable_index(root_node, index_to_code)\n for index1 in indexs:\n idx1, code1 = index_to_code[index1]\n for index2 in indexs:\n idx2, code2 = index_to_code[index2]\n DFG.append((code1, idx1, 'computedFrom', [code2], [idx2]))\n states[code1] = [idx1]\n return sorted(DFG, key=lambda x: x[1]), states\n elif root_node.type in if_statement:\n DFG = []\n current_states = states.copy()\n others_states = []\n flag = False\n tag = False\n if 'else' in root_node.type:\n tag = True\n for child in root_node.children:\n if 'else' in child.type:\n tag = True\n if child.type not in if_statement and flag is False:\n temp, current_states = DFG_c(child, index_to_code, current_states)\n DFG += temp\n else:\n flag = True\n temp, new_states = DFG_c(child, index_to_code, states)\n DFG += temp\n others_states.append(new_states)\n others_states.append(current_states)\n if tag is False:\n others_states.append(states)\n new_states = {}\n for dic in others_states:\n for key in dic:\n if key not in new_states:\n new_states[key] = dic[key].copy()\n else:\n new_states[key] += dic[key]\n for key in states:\n if key not in new_states:\n new_states[key] = states[key]\n else:\n new_states[key] += states[key]\n for key in new_states:\n new_states[key] = sorted(list(set(new_states[key])))\n return sorted(DFG, key=lambda x: x[1]), new_states\n elif root_node.type in for_statement:\n DFG = []\n for child in root_node.children:\n temp, states = DFG_c(child, index_to_code, states)\n DFG += temp\n flag = False\n for child in root_node.children:\n if flag:\n temp, states = DFG_c(child, index_to_code, states)\n DFG += temp\n elif child.type == \"variable_declaration\":\n flag = True\n dic = {}\n for x in DFG:\n if (x[0], x[1], x[2]) not in dic:\n dic[(x[0], x[1], x[2])] = [x[3], x[4]]\n else:\n dic[(x[0], x[1], x[2])][0] = list(set(dic[(x[0], x[1], x[2])][0] + x[3]))\n dic[(x[0], x[1], x[2])][1] = sorted(list(set(dic[(x[0], x[1], x[2])][1] + x[4])))\n DFG = [(x[0], x[1], x[2], y[0], y[1]) for x, y in sorted(dic.items(), key=lambda t: t[0][1])]\n return sorted(DFG, key=lambda x: x[1]), states\n elif root_node.type in while_statement:\n DFG = []\n for i in range(2):\n for child in root_node.children:\n temp, states = DFG_c(child, index_to_code, states)\n DFG += temp\n dic = {}\n for x in DFG:\n if (x[0], x[1], x[2]) not in dic:\n dic[(x[0], x[1], x[2])] = [x[3], x[4]]\n else:\n dic[(x[0], x[1], x[2])][0] = list(set(dic[(x[0], x[1], x[2])][0] + x[3]))\n dic[(x[0], x[1], x[2])][1] = sorted(list(set(dic[(x[0], x[1], x[2])][1] + x[4])))\n DFG = [(x[0], x[1], x[2], y[0], y[1]) for x, y in sorted(dic.items(), key=lambda t: t[0][1])]\n return sorted(DFG, key=lambda x: x[1]), states\n elif root_node.type in parameter_statement:\n child = root_node.child_by_field_name('declarator')\n if not child:\n return [], states\n while(child.type != 'identifier'):\n if child.type == 'parenthesized_declarator':\n child = child.children[1]\n else:\n child = child.child_by_field_name('declarator')\n if not child:\n return [], states\n idx,code=index_to_code[(child.start_point,child.end_point)]\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n else:\n DFG = []\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp, states = DFG_c(child, index_to_code, states)\n DFG += temp\n return sorted(DFG, key=lambda x: x[1]), states" }, { "identifier": "remove_comments_and_docstrings", "path": "evaluator/CodeBLEU/parser/utils.py", "snippet": "def remove_comments_and_docstrings(source, lang):\n if lang in ['python']:\n \"\"\"\n Returns 'source' minus comments and docstrings.\n \"\"\"\n io_obj = StringIO(source)\n out = \"\"\n prev_toktype = tokenize.INDENT\n last_lineno = -1\n last_col = 0\n for tok in tokenize.generate_tokens(io_obj.readline):\n token_type = tok[0]\n token_string = tok[1]\n start_line, start_col = tok[2]\n end_line, end_col = tok[3]\n ltext = tok[4]\n if start_line > last_lineno:\n last_col = 0\n if start_col > last_col:\n out += (\" \" * (start_col - last_col))\n # Remove comments:\n if token_type == tokenize.COMMENT:\n pass\n # This series of conditionals removes docstrings:\n elif token_type == tokenize.STRING:\n if prev_toktype != tokenize.INDENT:\n # This is likely a docstring; double-check we're not inside an operator:\n if prev_toktype != tokenize.NEWLINE:\n if start_col > 0:\n out += token_string\n else:\n out += token_string\n prev_toktype = token_type\n last_col = end_col\n last_lineno = end_line\n temp = []\n for x in out.split('\\n'):\n if x.strip() != \"\":\n temp.append(x)\n return '\\n'.join(temp)\n elif lang in ['ruby']:\n return source\n else:\n def replacer(match):\n s = match.group(0)\n if s.startswith('/'):\n return \" \" # note: a space and not an empty string\n else:\n return s\n\n pattern = re.compile(\n r'//.*?$|/\\*.*?\\*/|\\'(?:\\\\.|[^\\\\\\'])*\\'|\"(?:\\\\.|[^\\\\\"])*\"',\n re.DOTALL | re.MULTILINE\n )\n temp = []\n for x in re.sub(pattern, replacer, source).split('\\n'):\n if x.strip() != \"\":\n temp.append(x)\n return '\\n'.join(temp)" }, { "identifier": "tree_to_token_index", "path": "evaluator/CodeBLEU/parser/utils.py", "snippet": "def tree_to_token_index(root_node):\n if (len(root_node.children) == 0 or root_node.type in ['string_literal', 'string',\n 'character_literal']) and root_node.type != 'comment':\n return [(root_node.start_point, root_node.end_point)]\n else:\n code_tokens = []\n for child in root_node.children:\n code_tokens += tree_to_token_index(child)\n return code_tokens" }, { "identifier": "index_to_code_token", "path": "evaluator/CodeBLEU/parser/utils.py", "snippet": "def index_to_code_token(index, code):\n start_point = index[0]\n end_point = index[1]\n if start_point[0] == end_point[0]:\n s = code[start_point[0]][start_point[1]:end_point[1]]\n else:\n s = \"\"\n s += code[start_point[0]][start_point[1]:]\n for i in range(start_point[0] + 1, end_point[0]):\n s += code[i]\n s += code[end_point[0]][:end_point[1]]\n return s" }, { "identifier": "tree_to_variable_index", "path": "evaluator/CodeBLEU/parser/utils.py", "snippet": "def tree_to_variable_index(root_node, index_to_code):\n if (len(root_node.children) == 0 or root_node.type in ['string_literal', 'string',\n 'character_literal']) and root_node.type != 'comment':\n index = (root_node.start_point, root_node.end_point)\n _, code = index_to_code[index]\n if root_node.type != code:\n return [(root_node.start_point, root_node.end_point)]\n else:\n return []\n else:\n code_tokens = []\n for child in root_node.children:\n code_tokens += tree_to_variable_index(child, index_to_code)\n return code_tokens" } ]
from evaluator.CodeBLEU.parser import DFG_python, DFG_java, DFG_ruby, DFG_go, DFG_php, DFG_javascript, DFG_csharp,DFG_c from evaluator.CodeBLEU.parser import (remove_comments_and_docstrings, tree_to_token_index, index_to_code_token, tree_to_variable_index) from tree_sitter import Language, Parser
17,382
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. parser_path = '/data/pretrain-attention/CodePrompt/evaluator/CodeBLEU/parser' dfg_function = { 'python': DFG_python,
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. parser_path = '/data/pretrain-attention/CodePrompt/evaluator/CodeBLEU/parser' dfg_function = { 'python': DFG_python,
'java': DFG_java,
1
2023-10-20 09:24:44+00:00
24k
JoaoPedro9674/django-ledger
django_ledger/io/io_mixin.py
[ { "identifier": "settings", "path": "django_ledger/settings.py", "snippet": " DJANGO_LEDGER_GRAPHQL_SUPPORT_ENABLED = True\n DJANGO_LEDGER_GRAPHQL_SUPPORT_ENABLED = False\n DJANGO_LEDGER_PDF_SUPPORT_ENABLED = True\n DJANGO_LEDGER_PDF_SUPPORT_ENABLED = False\nDJANGO_LEDGER_USE_CLOSING_ENTRIES = getattr(settings, 'DJANGO_LEDGER_USE_CLOSING_ENTRIES', False)\nDJANGO_LEDGER_DEFAULT_CLOSING_ENTRY_CACHE_TIMEOUT = getattr(settings,\n 'DJANGO_LEDGER_DEFAULT_CLOSING_ENTRY_CACHE_TIMEOUT', 3600)\nDJANGO_LEDGER_LOGIN_URL = getattr(settings, 'DJANGO_LEDGER_LOGIN_URL', settings.LOGIN_URL)\nDJANGO_LEDGER_BILL_NUMBER_LENGTH = getattr(settings, 'DJANGO_LEDGER_BILL_NUMBER_LENGTH', 10)\nDJANGO_LEDGER_INVOICE_NUMBER_LENGTH = getattr(settings, 'DJANGO_LEDGER_INVOICE_NUMBER_LENGTH', 10)\nDJANGO_LEDGER_FORM_INPUT_CLASSES = getattr(settings, 'DJANGO_LEDGER_FORM_INPUT_CLASSES', 'input')\nDJANGO_LEDGER_CURRENCY_SYMBOL = getattr(settings, 'DJANGO_LEDGER_CURRENCY_SYMBOL', '$')\nDJANGO_LEDGER_SPACED_CURRENCY_SYMBOL = getattr(settings, 'DJANGO_LEDGER_SPACED_CURRENCY_SYMBOL', False)\nDJANGO_LEDGER_SHOW_FEEDBACK_BUTTON = getattr(settings, 'DJANGO_LEDGER_SHOW_FEEDBACK_BUTTON', False)\nDJANGO_LEDGER_FEEDBACK_EMAIL_LIST = getattr(settings, 'DJANGO_LEDGER_FEEDBACK_EMAIL_LIST', [])\nDJANGO_LEDGER_FEEDBACK_FROM_EMAIL = getattr(settings, 'DJANGO_LEDGER_FEEDBACK_FROM_EMAIL', None)\nDJANGO_LEDGER_VALIDATE_SCHEMAS_AT_RUNTIME = getattr(settings, 'DJANGO_LEDGER_VALIDATE_SCHEMAS_AT_RUNTIME', False)\nDJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE = getattr(settings, 'DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE', Decimal('0.02'))\nDJANGO_LEDGER_TRANSACTION_CORRECTION = getattr(settings, 'DJANGO_LEDGER_TRANSACTION_CORRECTION', Decimal('0.01'))\nDJANGO_LEDGER_ACCOUNT_CODE_GENERATE = getattr(settings, 'DJANGO_LEDGER_ACCOUNT_CODE_GENERATE', True)\nDJANGO_LEDGER_ACCOUNT_CODE_GENERATE_LENGTH = getattr(settings, 'DJANGO_LEDGER_ACCOUNT_CODE_GENERATE_LENGTH', 5)\nDJANGO_LEDGER_ACCOUNT_CODE_USE_PREFIX = getattr(settings, 'DJANGO_LEDGER_ACCOUNT_CODE_GENERATE_LENGTH', True)\nDJANGO_LEDGER_JE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_JE_NUMBER_PREFIX', 'JE')\nDJANGO_LEDGER_PO_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_PO_NUMBER_PREFIX', 'PO')\nDJANGO_LEDGER_ESTIMATE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_ESTIMATE_NUMBER_PREFIX', 'E')\nDJANGO_LEDGER_INVOICE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_INVOICE_NUMBER_PREFIX', 'I')\nDJANGO_LEDGER_BILL_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_BILL_NUMBER_PREFIX', 'B')\nDJANGO_LEDGER_VENDOR_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_VENDOR_NUMBER_PREFIX', 'V')\nDJANGO_LEDGER_CUSTOMER_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_CUSTOMER_NUMBER_PREFIX', 'C')\nDJANGO_LEDGER_EXPENSE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_EXPENSE_NUMBER_PREFIX', 'IEX')\nDJANGO_LEDGER_INVENTORY_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_INVENTORY_NUMBER_PREFIX', 'INV')\nDJANGO_LEDGER_PRODUCT_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_PRODUCT_NUMBER_PREFIX', 'IPR')\nDJANGO_LEDGER_DOCUMENT_NUMBER_PADDING = getattr(settings, 'DJANGO_LEDGER_DOCUMENT_NUMBER_PADDING', 10)\nDJANGO_LEDGER_JE_NUMBER_NO_UNIT_PREFIX = getattr(settings, 'DJANGO_LEDGER_JE_NUMBER_NO_UNIT_PREFIX', '000')\nDJANGO_LEDGER_BILL_MODEL_ABSTRACT_CLASS = getattr(settings,\n 'DJANGO_LEDGER_BILL_MODEL_ABSTRACT_CLASS',\n 'django_ledger.models.bill.BillModelAbstract')\nDJANGO_LEDGER_INVOICE_MODEL_ABSTRACT_CLASS = getattr(settings,\n 'DJANGO_LEDGER_INVOICE_MODEL_ABSTRACT_CLASS',\n 'django_ledger.models.invoice.InvoiceModelAbstract')\nDJANGO_LEDGER_DEFAULT_COA = getattr(settings, 'DJANGO_LEDGER_DEFAULT_COA', None)\nDJANGO_LEDGER_FINANCIAL_ANALYSIS = {\n 'ratios': {\n 'current_ratio': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': 2,\n 'watch': 1,\n 'warning': .5,\n 'critical': .25\n }\n },\n 'quick_ratio': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': 2,\n 'watch': 1,\n 'warning': .5,\n 'critical': .25\n }\n },\n 'debt_to_equity': {\n 'good_incremental': False,\n 'ranges': {\n 'healthy': 0,\n 'watch': .25,\n 'warning': .5,\n 'critical': 1\n }\n },\n 'return_on_equity': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': .10,\n 'watch': .07,\n 'warning': .04,\n 'critical': .02\n }\n },\n 'return_on_assets': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': .10,\n 'watch': .06,\n 'warning': .04,\n 'critical': .02\n }\n },\n 'net_profit_margin': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': .10,\n 'watch': .06,\n 'warning': .04,\n 'critical': .02\n }\n },\n 'gross_profit_margin': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': .10,\n 'watch': .06,\n 'warning': .04,\n 'critical': .02\n }\n },\n }\n}" }, { "identifier": "InvalidDateInputError", "path": "django_ledger/exceptions.py", "snippet": "class InvalidDateInputError(ValidationError):\n pass" }, { "identifier": "TransactionNotInBalanceError", "path": "django_ledger/exceptions.py", "snippet": "class TransactionNotInBalanceError(ValidationError):\n pass" }, { "identifier": "roles", "path": "django_ledger/io/roles.py", "snippet": "DEBIT = 'debit'\nCREDIT = 'credit'\nASSET_CA_CASH = 'asset_ca_cash'\nASSET_CA_MKT_SECURITIES = 'asset_ca_mkt_sec'\nASSET_CA_RECEIVABLES = 'asset_ca_recv'\nASSET_CA_INVENTORY = 'asset_ca_inv'\nASSET_CA_UNCOLLECTIBLES = 'asset_ca_uncoll'\nASSET_CA_PREPAID = 'asset_ca_prepaid'\nASSET_CA_OTHER = 'asset_ca_other'\nASSET_LTI_NOTES_RECEIVABLE = 'asset_lti_notes'\nASSET_LTI_LAND = 'asset_lti_land'\nASSET_LTI_SECURITIES = 'asset_lti_sec'\nASSET_PPE_BUILDINGS = 'asset_ppe_build'\nASSET_PPE_BUILDINGS_ACCUM_DEPRECIATION = 'asset_ppe_build_accum_depr'\nASSET_PPE_EQUIPMENT = 'asset_ppe_equip'\nASSET_PPE_EQUIPMENT_ACCUM_DEPRECIATION = 'asset_ppe_equip_accum_depr'\nASSET_PPE_PLANT = 'asset_ppe_plant'\nASSET_PPE_PLANT_ACCUM_DEPRECIATION = 'asset_ppe_plant_depr'\nASSET_INTANGIBLE_ASSETS = 'asset_ia'\nASSET_INTANGIBLE_ASSETS_ACCUM_AMORTIZATION = 'asset_ia_accum_amort'\nASSET_ADJUSTMENTS = 'asset_adjustment'\nLIABILITY_CL_ACC_PAYABLE = 'lia_cl_acc_payable'\nLIABILITY_CL_WAGES_PAYABLE = 'lia_cl_wages_payable'\nLIABILITY_CL_TAXES_PAYABLE = 'lia_cl_taxes_payable'\nLIABILITY_CL_INTEREST_PAYABLE = 'lia_cl_int_payable'\nLIABILITY_CL_ST_NOTES_PAYABLE = 'lia_cl_st_notes_payable'\nLIABILITY_CL_LTD_MATURITIES = 'lia_cl_ltd_mat'\nLIABILITY_CL_DEFERRED_REVENUE = 'lia_cl_def_rev'\nLIABILITY_CL_OTHER = 'lia_cl_other'\nLIABILITY_LTL_NOTES_PAYABLE = 'lia_ltl_notes'\nLIABILITY_LTL_BONDS_PAYABLE = 'lia_ltl_bonds'\nLIABILITY_LTL_MORTGAGE_PAYABLE = 'lia_ltl_mortgage'\nEQUITY_CAPITAL = 'eq_capital'\nEQUITY_ADJUSTMENT = 'eq_adjustment'\nEQUITY_COMMON_STOCK = 'eq_stock_common'\nEQUITY_PREFERRED_STOCK = 'eq_stock_preferred'\nEQUITY_DIVIDENDS = 'eq_dividends'\nINCOME_OPERATIONAL = 'in_operational'\nINCOME_PASSIVE = 'in_passive'\nINCOME_CAPITAL_GAIN_LOSS = 'in_gain_loss'\nINCOME_INTEREST = 'in_interest'\nINCOME_OTHER = 'in_other'\nCOGS = 'cogs_regular'\nEXPENSE_OPERATIONAL = 'ex_regular'\nEXPENSE_CAPITAL = 'ex_capital'\nEXPENSE_DEPRECIATION = 'ex_depreciation'\nEXPENSE_AMORTIZATION = 'ex_amortization'\nEXPENSE_TAXES = 'ex_taxes'\nEXPENSE_INTEREST_ST = 'ex_interest_st'\nEXPENSE_INTEREST_LT = 'ex_interest'\nEXPENSE_OTHER = 'ex_other'\nROOT_COA = 'root_coa'\nROOT_ASSETS = 'root_assets'\nROOT_LIABILITIES = 'root_liabilities'\nROOT_CAPITAL = 'root_capital'\nROOT_INCOME = 'root_income'\nROOT_COGS = 'root_cogs'\nROOT_EXPENSES = 'root_expenses'\nROOT_GROUP = [\n ROOT_COA,\n ROOT_ASSETS,\n ROOT_LIABILITIES,\n ROOT_CAPITAL,\n ROOT_INCOME,\n ROOT_COGS,\n ROOT_EXPENSES\n]\nROOT_GROUP_LEVEL_2 = [\n ROOT_ASSETS,\n ROOT_LIABILITIES,\n ROOT_CAPITAL,\n ROOT_INCOME,\n ROOT_COGS,\n ROOT_EXPENSES\n]\nROOT_GROUP_META = {\n ROOT_COA: {\n 'code': '00000',\n 'title': 'CoA Root Node',\n 'balance_type': DEBIT\n },\n ROOT_ASSETS: {\n 'code': '01000',\n 'title': 'Asset Accounts Root Node',\n 'balance_type': DEBIT\n },\n ROOT_LIABILITIES: {\n 'code': '02000',\n 'title': 'Liability Accounts Root Node',\n 'balance_type': CREDIT\n },\n ROOT_CAPITAL: {\n 'code': '03000',\n 'title': 'Capital Accounts Root Node',\n 'balance_type': CREDIT\n },\n ROOT_INCOME: {\n 'code': '04000',\n 'title': 'Income Accounts Root Node',\n 'balance_type': CREDIT\n },\n ROOT_COGS: {\n 'code': '05000',\n 'title': 'COGS Accounts Root Node',\n 'balance_type': DEBIT\n },\n ROOT_EXPENSES: {\n 'code': '06000',\n 'title': 'Expense Accounts Root Node',\n 'balance_type': DEBIT\n },\n}\nGROUP_QUICK_ASSETS = [\n ASSET_CA_CASH,\n ASSET_CA_MKT_SECURITIES\n]\nGROUP_CURRENT_ASSETS = [\n ASSET_CA_CASH,\n ASSET_CA_MKT_SECURITIES,\n ASSET_CA_INVENTORY,\n ASSET_CA_RECEIVABLES,\n ASSET_CA_PREPAID,\n ASSET_CA_UNCOLLECTIBLES,\n ASSET_CA_OTHER\n]\nGROUP_NON_CURRENT_ASSETS = [\n ASSET_LTI_NOTES_RECEIVABLE,\n ASSET_LTI_LAND,\n ASSET_LTI_SECURITIES,\n ASSET_PPE_BUILDINGS,\n ASSET_PPE_BUILDINGS_ACCUM_DEPRECIATION,\n ASSET_PPE_EQUIPMENT,\n ASSET_PPE_EQUIPMENT_ACCUM_DEPRECIATION,\n ASSET_PPE_PLANT,\n ASSET_PPE_PLANT_ACCUM_DEPRECIATION,\n ASSET_INTANGIBLE_ASSETS,\n ASSET_INTANGIBLE_ASSETS_ACCUM_AMORTIZATION,\n ASSET_ADJUSTMENTS\n]\nGROUP_ASSETS = GROUP_CURRENT_ASSETS + GROUP_NON_CURRENT_ASSETS\nGROUP_CURRENT_LIABILITIES = [\n LIABILITY_CL_ACC_PAYABLE,\n LIABILITY_CL_DEFERRED_REVENUE,\n LIABILITY_CL_INTEREST_PAYABLE,\n LIABILITY_CL_LTD_MATURITIES,\n LIABILITY_CL_OTHER,\n LIABILITY_CL_ST_NOTES_PAYABLE,\n LIABILITY_CL_WAGES_PAYABLE,\n LIABILITY_CL_TAXES_PAYABLE\n]\nGROUP_LT_LIABILITIES = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE,\n LIABILITY_LTL_MORTGAGE_PAYABLE,\n]\nGROUP_LIABILITIES = GROUP_CURRENT_LIABILITIES + GROUP_LT_LIABILITIES\nGROUP_CAPITAL = [\n EQUITY_CAPITAL,\n EQUITY_COMMON_STOCK,\n EQUITY_PREFERRED_STOCK,\n EQUITY_DIVIDENDS,\n EQUITY_ADJUSTMENT\n]\nGROUP_INCOME = [\n INCOME_OPERATIONAL,\n INCOME_PASSIVE,\n INCOME_INTEREST,\n INCOME_CAPITAL_GAIN_LOSS,\n INCOME_OTHER\n]\nGROUP_COGS = [\n COGS\n]\nGROUP_EXPENSES = [\n EXPENSE_OPERATIONAL,\n EXPENSE_INTEREST_ST,\n EXPENSE_INTEREST_LT,\n EXPENSE_TAXES,\n EXPENSE_CAPITAL,\n EXPENSE_DEPRECIATION,\n EXPENSE_AMORTIZATION,\n EXPENSE_OTHER\n]\nGROUP_NET_PROFIT = [\n INCOME_OPERATIONAL,\n INCOME_PASSIVE,\n INCOME_INTEREST,\n INCOME_CAPITAL_GAIN_LOSS,\n INCOME_OTHER,\n COGS\n]\nGROUP_GROSS_PROFIT = [\n INCOME_OPERATIONAL,\n COGS\n]\nGROUP_NET_SALES = [\n INCOME_OPERATIONAL,\n INCOME_PASSIVE\n]\nGROUP_PPE_ACCUM_DEPRECIATION = [\n ASSET_PPE_BUILDINGS_ACCUM_DEPRECIATION,\n ASSET_PPE_EQUIPMENT_ACCUM_DEPRECIATION,\n ASSET_PPE_PLANT_ACCUM_DEPRECIATION\n]\nGROUP_EXPENSE_DEP_AND_AMT = [\n EXPENSE_DEPRECIATION,\n EXPENSE_AMORTIZATION\n]\nGROUP_EARNINGS = GROUP_INCOME + GROUP_COGS + GROUP_EXPENSES\nGROUP_EQUITY = GROUP_CAPITAL + GROUP_EARNINGS\nGROUP_LIABILITIES_EQUITY = GROUP_LIABILITIES + GROUP_EQUITY\nGROUP_INVOICE = [ASSET_CA_CASH, ASSET_CA_RECEIVABLES, LIABILITY_CL_DEFERRED_REVENUE]\nGROUP_BILL = [ASSET_CA_CASH, ASSET_CA_PREPAID, LIABILITY_CL_ACC_PAYABLE]\nGROUP_IC_OPERATING_REVENUES = [INCOME_OPERATIONAL]\nGROUP_IC_OPERATING_COGS = [COGS]\nGROUP_IC_OPERATING_EXPENSES = [EXPENSE_OPERATIONAL]\nGROUP_IC_OTHER_REVENUES = [\n INCOME_PASSIVE,\n INCOME_INTEREST,\n INCOME_CAPITAL_GAIN_LOSS,\n INCOME_OTHER\n]\nGROUP_IC_OTHER_EXPENSES = [\n EXPENSE_INTEREST_ST,\n EXPENSE_INTEREST_LT,\n EXPENSE_TAXES,\n EXPENSE_CAPITAL,\n EXPENSE_DEPRECIATION,\n EXPENSE_AMORTIZATION,\n EXPENSE_OTHER\n]\nGROUP_CFS_NET_INCOME = GROUP_EARNINGS\nGROUP_CFS_OP_DEPRECIATION_AMORTIZATION = [\n EXPENSE_DEPRECIATION,\n EXPENSE_AMORTIZATION\n]\nGROUP_CFS_OP_INVESTMENT_GAINS = [\n INCOME_CAPITAL_GAIN_LOSS\n]\nGROUP_CFS_OP_ACCOUNTS_RECEIVABLE = [\n ASSET_CA_RECEIVABLES\n]\nGROUP_CFS_OP_INVENTORY = [\n ASSET_CA_INVENTORY\n]\nGROUP_CFS_OP_ACCOUNTS_PAYABLE = [\n LIABILITY_CL_ACC_PAYABLE\n]\nGROUP_CFS_OP_OTHER_CURRENT_ASSETS_ADJUSTMENT = [\n ASSET_CA_PREPAID,\n ASSET_CA_UNCOLLECTIBLES,\n ASSET_CA_OTHER\n]\nGROUP_CFS_OP_OTHER_CURRENT_LIABILITIES_ADJUSTMENT = [\n LIABILITY_CL_WAGES_PAYABLE,\n LIABILITY_CL_INTEREST_PAYABLE,\n LIABILITY_CL_TAXES_PAYABLE,\n LIABILITY_CL_LTD_MATURITIES,\n LIABILITY_CL_DEFERRED_REVENUE,\n LIABILITY_CL_OTHER,\n]\nGROUP_CFS_OPERATING = list(chain.from_iterable([\n GROUP_CFS_NET_INCOME,\n GROUP_CFS_OP_DEPRECIATION_AMORTIZATION,\n GROUP_CFS_OP_INVESTMENT_GAINS,\n GROUP_CFS_OP_ACCOUNTS_RECEIVABLE,\n GROUP_CFS_OP_INVENTORY,\n GROUP_CFS_OP_ACCOUNTS_PAYABLE,\n GROUP_CFS_OP_OTHER_CURRENT_ASSETS_ADJUSTMENT,\n GROUP_CFS_OP_OTHER_CURRENT_LIABILITIES_ADJUSTMENT\n]))\nGROUP_CFS_FIN_ISSUING_EQUITY = [EQUITY_CAPITAL, EQUITY_COMMON_STOCK, EQUITY_PREFERRED_STOCK]\nGROUP_CFS_FIN_DIVIDENDS = [EQUITY_DIVIDENDS]\nGROUP_CFS_FIN_ST_DEBT_PAYMENTS = [\n LIABILITY_CL_ST_NOTES_PAYABLE,\n LIABILITY_CL_ACC_PAYABLE,\n EXPENSE_INTEREST_ST\n]\nGROUP_CFS_FIN_LT_DEBT_PAYMENTS = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE,\n LIABILITY_LTL_MORTGAGE_PAYABLE,\n EXPENSE_INTEREST_LT\n]\nGROUP_CFS_FINANCING = GROUP_CFS_FIN_ISSUING_EQUITY + GROUP_CFS_FIN_DIVIDENDS\nGROUP_CFS_INV_PURCHASE_OR_SALE_OF_PPE = [\n ASSET_PPE_BUILDINGS,\n ASSET_PPE_PLANT,\n ASSET_PPE_EQUIPMENT,\n INCOME_CAPITAL_GAIN_LOSS\n]\nGROUP_CFS_INV_LTD_OF_PPE = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_MORTGAGE_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE,\n]\nGROUP_CFS_INVESTING_PPE = GROUP_CFS_INV_PURCHASE_OR_SALE_OF_PPE + GROUP_CFS_INV_LTD_OF_PPE\nGROUP_CFS_INV_PURCHASE_OF_SECURITIES = [\n ASSET_CA_MKT_SECURITIES,\n ASSET_LTI_NOTES_RECEIVABLE,\n ASSET_LTI_SECURITIES,\n INCOME_INTEREST,\n INCOME_PASSIVE,\n]\nGROUP_CFS_INV_LTD_OF_SECURITIES = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE\n]\nGROUP_CFS_INVESTING_SECURITIES = GROUP_CFS_INV_PURCHASE_OF_SECURITIES + GROUP_CFS_INV_LTD_OF_SECURITIES\nGROUP_CFS_INVESTING = GROUP_CFS_INVESTING_PPE + GROUP_CFS_INVESTING_SECURITIES\nGROUP_CFS_INVESTING_AND_FINANCING = GROUP_CFS_INVESTING + GROUP_CFS_FINANCING\nBS_ASSET_ROLE = 'assets'\nBS_LIABILITIES_ROLE = 'liabilities'\nBS_EQUITY_ROLE = 'equity'\nACCOUNT_ROLE_CHOICES = [\n (BS_ASSET_ROLE.capitalize(), (\n # CURRENT ASSETS ----\n (ASSET_CA_CASH, _('Current Asset')),\n (ASSET_CA_MKT_SECURITIES, _('Marketable Securities')),\n (ASSET_CA_RECEIVABLES, _('Receivables')),\n (ASSET_CA_INVENTORY, _('Inventory')),\n (ASSET_CA_UNCOLLECTIBLES, _('Uncollectibles')),\n (ASSET_CA_PREPAID, _('Prepaid')),\n (ASSET_CA_OTHER, _('Other Liquid Assets')),\n\n # LONG TERM INVESTMENTS ---\n (ASSET_LTI_NOTES_RECEIVABLE, _('Notes Receivable')),\n (ASSET_LTI_LAND, _('Land')),\n (ASSET_LTI_SECURITIES, _('Securities')),\n\n # PPE ...\n (ASSET_PPE_BUILDINGS, _('Buildings')),\n (ASSET_PPE_BUILDINGS_ACCUM_DEPRECIATION, _('Buildings - Accum. Depreciation')),\n (ASSET_PPE_PLANT, _('Plant')),\n (ASSET_PPE_PLANT_ACCUM_DEPRECIATION, _('Plant - Accum. Depreciation')),\n (ASSET_PPE_EQUIPMENT, _('Equipment')),\n (ASSET_PPE_EQUIPMENT_ACCUM_DEPRECIATION, _('Equipment - Accum. Depreciation')),\n\n # Other Assets ...\n (ASSET_INTANGIBLE_ASSETS, _('Intangible Assets')),\n (ASSET_INTANGIBLE_ASSETS_ACCUM_AMORTIZATION, _('Intangible Assets - Accum. Amortization')),\n (ASSET_ADJUSTMENTS, _('Other Assets')),\n )),\n (BS_LIABILITIES_ROLE.capitalize(), (\n\n # CURRENT LIABILITIES ---\n (LIABILITY_CL_ACC_PAYABLE, _('Accounts Payable')),\n (LIABILITY_CL_WAGES_PAYABLE, _('Wages Payable')),\n (LIABILITY_CL_INTEREST_PAYABLE, _('Interest Payable')),\n (LIABILITY_CL_TAXES_PAYABLE, _('Taxes Payable')),\n (LIABILITY_CL_ST_NOTES_PAYABLE, _('Short Term Notes Payable')),\n (LIABILITY_CL_LTD_MATURITIES, _('Current Maturities of Long Tern Debt')),\n (LIABILITY_CL_DEFERRED_REVENUE, _('Deferred Revenue')),\n (LIABILITY_CL_OTHER, _('Other Liabilities')),\n\n # LONG TERM LIABILITIES ----\n (LIABILITY_LTL_NOTES_PAYABLE, _('Long Term Notes Payable')),\n (LIABILITY_LTL_BONDS_PAYABLE, _('Bonds Payable')),\n (LIABILITY_LTL_MORTGAGE_PAYABLE, _('Mortgage Payable')),\n )),\n (BS_EQUITY_ROLE.capitalize(), (\n\n # EQUITY ---\n (EQUITY_CAPITAL, _('Capital')),\n (EQUITY_COMMON_STOCK, _('Common Stock')),\n (EQUITY_PREFERRED_STOCK, _('Preferred Stock')),\n (EQUITY_ADJUSTMENT, _('Other Equity Adjustments')),\n (EQUITY_DIVIDENDS, _('Dividends & Distributions to Shareholders')),\n\n # INCOME ---\n (INCOME_OPERATIONAL, _('Operational Income')),\n (INCOME_PASSIVE, _('Investing/Passive Income')),\n (INCOME_INTEREST, _('Interest Income')),\n (INCOME_CAPITAL_GAIN_LOSS, _('Capital Gain/Loss Income')),\n (INCOME_OTHER, _('Other Income')),\n\n # COGS ----\n (COGS, _('Cost of Goods Sold')),\n\n # EXPENSES ----\n (EXPENSE_OPERATIONAL, _('Regular Expense')),\n (EXPENSE_INTEREST_ST, _('Interest Expense - Short Term Debt')),\n (EXPENSE_INTEREST_LT, _('Interest Expense - Long Term Debt')),\n (EXPENSE_TAXES, _('Tax Expense')),\n (EXPENSE_CAPITAL, _('Capital Expense')),\n (EXPENSE_DEPRECIATION, _('Depreciation Expense')),\n (EXPENSE_AMORTIZATION, _('Amortization Expense')),\n (EXPENSE_OTHER, _('Other Expense')),\n )),\n ('Root', (\n (ROOT_COA, 'CoA Root Account'),\n (ROOT_ASSETS, 'Assets Root Account'),\n (ROOT_LIABILITIES, 'Liabilities Root Account'),\n (ROOT_CAPITAL, 'Capital Root Account'),\n (ROOT_INCOME, 'Income Root Account'),\n (ROOT_COGS, 'COGS Root Account'),\n (ROOT_EXPENSES, 'Expenses Root Account'),\n ))\n]\nACCOUNT_CHOICES_NO_ROOT = [c for c in ACCOUNT_ROLE_CHOICES if c[0] != 'Root']\nROLES_ORDER_ASSETS = [a[0] for a in ACCOUNT_ROLE_CHOICES[0][1]]\nROLES_ORDER_LIABILITIES = [a[0] for a in ACCOUNT_ROLE_CHOICES[1][1]]\nROLES_ORDER_CAPITAL = [a[0] for a in ACCOUNT_ROLE_CHOICES[2][1]]\nROLES_ORDER_ALL = list(chain.from_iterable([ROLES_ORDER_ASSETS, ROLES_ORDER_LIABILITIES, ROLES_ORDER_CAPITAL]))\nACCOUNT_LIST_ROLE_ORDER = list(r[0] for r in chain.from_iterable([i[1] for i in ACCOUNT_CHOICES_NO_ROOT]))\nACCOUNT_LIST_ROLE_VERBOSE = {r[0]: r[1] for r in chain.from_iterable([i[1] for i in ACCOUNT_CHOICES_NO_ROOT])}\nROLE_TUPLES = sum([[(r[0].lower(), s[0]) for s in r[1]] for r in ACCOUNT_ROLE_CHOICES], list())\nROLE_DICT = dict([(t[0].lower(), [r[0] for r in t[1]]) for t in ACCOUNT_ROLE_CHOICES])\nVALID_ROLES = [r[1] for r in ROLE_TUPLES]\nBS_ROLES = dict((r[1], r[0]) for r in ROLE_TUPLES)\nBS_BUCKETS = {\n '0': 'Root',\n '1': 'Asset',\n '2': 'Liability',\n '3': 'Capital',\n '4': 'Income',\n '5': 'COGS',\n '6': 'Expenses'\n}\nBS_BUCKETS_ORDER = [v for _, v in BS_BUCKETS.items() if v != 'Root']\nROLES_VARS = locals().keys()\nROLES_DIRECTORY = dict()\nROLES_CATEGORIES = ['ASSET', 'LIABILITY', 'EQUITY', 'INCOME', 'COGS', 'EXPENSE']\nROLES_GROUPS = [g for g in ROLES_VARS if g.split('_')[0] == 'GROUP']\nGROUPS_DIRECTORY = dict()\ndef validate_roles(roles: Union[str, List[str]], raise_exception: bool = True) -> Set[str]:" }, { "identifier": "RoleContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class RoleContextManager:\n\n def __init__(self,\n io_data: dict,\n by_period: bool = False,\n by_unit: bool = False):\n\n self.BY_PERIOD = by_period\n self.BY_UNIT = by_unit\n\n self.DIGEST = io_data\n self.DIGEST['role_account'] = None\n self.DIGEST['role_balance'] = None\n\n self.ACCOUNTS = io_data['accounts']\n\n self.ROLES_ACCOUNTS = dict()\n self.ROLES_BALANCES = dict()\n self.ROLES_BALANCE_SHEET = dict()\n\n if self.BY_PERIOD:\n self.ROLES_BALANCES_BY_PERIOD = defaultdict(lambda: dict())\n self.DIGEST['role_balance_by_period'] = None\n if self.BY_UNIT:\n self.ROLES_BALANCES_BY_UNIT = defaultdict(lambda: dict())\n self.DIGEST['role_balance_by_unit'] = None\n\n if self.BY_PERIOD and self.BY_UNIT:\n self.ROLES_BALANCES_BY_PERIOD_AND_UNIT = defaultdict(lambda: dict())\n\n def digest(self):\n\n self.process_roles()\n self.DIGEST['role_account'] = self.ROLES_ACCOUNTS\n self.DIGEST['role_balance'] = self.ROLES_BALANCES\n\n if self.BY_PERIOD:\n self.DIGEST['role_balance_by_period'] = self.ROLES_BALANCES_BY_PERIOD\n if self.BY_UNIT:\n self.DIGEST['role_balance_by_unit'] = self.ROLES_BALANCES_BY_UNIT\n\n return self.DIGEST\n\n def process_roles(self):\n\n for c, l in roles_module.ROLES_DIRECTORY.items():\n for r in l:\n acc_list = list(acc for acc in self.ACCOUNTS if acc['role'] == getattr(roles_module, r))\n\n self.ROLES_ACCOUNTS[r] = acc_list\n self.ROLES_BALANCES[r] = sum(acc['balance'] for acc in acc_list)\n\n if self.BY_PERIOD or self.BY_UNIT:\n for acc in acc_list:\n if self.BY_PERIOD:\n key = (acc['period_year'], acc['period_month'])\n self.ROLES_BALANCES_BY_PERIOD[key][r] = sum(acc['balance'] for acc in acc_list if all([\n acc['period_year'] == key[0],\n acc['period_month'] == key[1]]\n ))\n if self.BY_UNIT:\n key = (acc['unit_uuid'], acc['unit_name'])\n self.ROLES_BALANCES_BY_UNIT[key][r] = sum(\n acc['balance'] for acc in acc_list if acc['unit_uuid'] == key[0])" }, { "identifier": "GroupContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class GroupContextManager:\n GROUP_ACCOUNTS_KEY = 'group_account'\n GROUP_BALANCE_KEY = 'group_balance'\n GROUP_BALANCE_BY_UNIT_KEY = 'group_balance_by_unit'\n GROUP_BALANCE_BY_PERIOD_KEY = 'group_balance_by_period'\n\n def __init__(self,\n io_data: dict,\n by_period: bool = False,\n by_unit: bool = False):\n\n self.BY_PERIOD = by_period\n self.BY_UNIT = by_unit\n\n self.IO_DIGEST = io_data\n\n self.IO_DIGEST[self.GROUP_ACCOUNTS_KEY] = None\n self.IO_DIGEST[self.GROUP_BALANCE_KEY] = None\n\n self.DIGEST_ACCOUNTS = io_data['accounts']\n\n self.GROUPS_ACCOUNTS = dict()\n self.GROUPS_BALANCES = dict()\n\n if self.BY_PERIOD:\n self.GROUPS_BALANCES_BY_PERIOD = defaultdict(lambda: dict())\n self.IO_DIGEST[self.GROUP_BALANCE_BY_PERIOD_KEY] = None\n\n if self.BY_UNIT:\n self.GROUPS_BALANCES_BY_UNIT = defaultdict(lambda: dict())\n self.IO_DIGEST[self.GROUP_BALANCE_BY_UNIT_KEY] = None\n\n if self.BY_PERIOD and self.BY_UNIT:\n self.GROUPS_BALANCES_BY_PERIOD_AND_UNIT = defaultdict(lambda: dict())\n self.IO_DIGEST[self.GROUP_BALANCE_BY_PERIOD_KEY] = None\n\n def digest(self):\n\n self.process_groups()\n self.IO_DIGEST[self.GROUP_ACCOUNTS_KEY] = self.GROUPS_ACCOUNTS\n self.IO_DIGEST[self.GROUP_BALANCE_KEY] = self.GROUPS_BALANCES\n\n if self.BY_PERIOD:\n self.IO_DIGEST[self.GROUP_BALANCE_BY_PERIOD_KEY] = self.GROUPS_BALANCES_BY_PERIOD\n if self.BY_UNIT:\n self.IO_DIGEST[self.GROUP_BALANCE_BY_UNIT_KEY] = self.GROUPS_BALANCES_BY_UNIT\n return self.IO_DIGEST\n\n def get_accounts_generator(self, mod, g):\n return (acc for acc in self.DIGEST_ACCOUNTS if acc['role'] in getattr(mod, g))\n\n def process_groups(self):\n for g in roles_module.ROLES_GROUPS:\n acc_list = list(self.get_accounts_generator(roles_module, g))\n self.GROUPS_ACCOUNTS[g] = acc_list\n self.GROUPS_BALANCES[g] = sum(acc['balance'] for acc in acc_list)\n\n if self.BY_PERIOD or self.BY_UNIT:\n for acc in acc_list:\n if self.BY_PERIOD:\n key = (acc['period_year'], acc['period_month'])\n self.GROUPS_BALANCES_BY_PERIOD[key][g] = sum(\n acc['balance'] for acc in acc_list if all([\n acc['period_year'] == key[0],\n acc['period_month'] == key[1]]\n ))\n if self.BY_UNIT:\n key = (acc['unit_uuid'], acc['unit_name'])\n self.GROUPS_BALANCES_BY_UNIT[key][g] = sum(\n acc['balance'] for acc in acc_list if acc['unit_uuid'] == key[0]\n )" }, { "identifier": "ActivityContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class ActivityContextManager:\n\n def __init__(self,\n io_data: dict,\n by_unit: bool = False,\n by_period: bool = False):\n\n self.DIGEST = io_data\n self.DIGEST['activity_account'] = None\n self.DIGEST['activity_balance'] = None\n\n self.BY_PERIOD = by_period\n self.BY_UNIT = by_unit\n\n self.ACCOUNTS = io_data['accounts']\n self.ACTIVITY_ACCOUNTS = dict()\n self.ACTIVITY_BALANCES = dict()\n\n if self.BY_PERIOD:\n self.ACTIVITY_BALANCES_BY_PERIOD = defaultdict(lambda: dict())\n self.DIGEST['activity_balance_by_period'] = None\n if self.BY_UNIT:\n self.ACTIVITY_BALANCES_BY_UNIT = defaultdict(lambda: dict())\n self.DIGEST['activity_balance_by_unit'] = None\n if self.BY_PERIOD and self.BY_UNIT:\n self.ROLES_BALANCES_BY_PERIOD_AND_UNIT = defaultdict(lambda: dict())\n\n def digest(self):\n\n self.process_activity()\n self.DIGEST['activity_account'] = self.ACTIVITY_ACCOUNTS\n self.DIGEST['activity_balance'] = self.ACTIVITY_BALANCES\n\n if self.BY_PERIOD:\n self.DIGEST['activity_balance_by_period'] = self.ACTIVITY_BALANCES_BY_PERIOD\n if self.BY_UNIT:\n self.DIGEST['activity_balance_by_unit'] = self.ACTIVITY_BALANCES_BY_PERIOD\n\n def get_accounts_generator(self, activity: str):\n return (acc for acc in self.ACCOUNTS if acc['activity'] == activity)\n\n def process_activity(self):\n JournalEntryModel = lazy_importer.get_journal_entry_model()\n for act in JournalEntryModel.VALID_ACTIVITIES:\n acc_list = list(self.get_accounts_generator(act))\n self.ACTIVITY_ACCOUNTS[act] = acc_list\n self.ACTIVITY_BALANCES[act] = sum(acc['balance'] for acc in acc_list)\n\n if self.BY_PERIOD or self.BY_UNIT:\n for acc in acc_list:\n if self.BY_PERIOD:\n key = (acc['period_year'], acc['period_month'])\n self.ACTIVITY_BALANCES_BY_PERIOD[key][act] = sum(acc['balance'] for acc in acc_list if all([\n acc['period_year'] == key[0],\n acc['period_month'] == key[1]]\n ))\n if self.BY_UNIT:\n key = (acc['unit_uuid'], acc['unit_name'])\n self.ACTIVITY_BALANCES_BY_UNIT[key][act] = sum(\n acc['balance'] for acc in acc_list if acc['unit_uuid'] == key[0])" }, { "identifier": "BalanceSheetStatementContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class BalanceSheetStatementContextManager:\n def __init__(self, io_data: dict):\n self.DIGEST = io_data\n\n def digest(self):\n if 'group_account' in self.DIGEST:\n gb_bs = {\n bsr: list(l) for bsr, l in groupby(\n chain.from_iterable(\n [\n self.DIGEST['group_account']['GROUP_ASSETS'],\n self.DIGEST['group_account']['GROUP_LIABILITIES'],\n self.DIGEST['group_account']['GROUP_CAPITAL'],\n ]\n ),\n key=lambda acc: acc['role_bs'])\n }\n\n bs_context = {\n bs_role: {\n 'total_balance': sum(a['balance'] for a in gb),\n 'is_block': True,\n 'roles': {\n r: {\n 'accounts': list(a)\n } for r, a in groupby(list(gb), key=lambda acc: acc['role'])\n }\n } for bs_role, gb in gb_bs.items()\n }\n\n for bs_role, bs_role_data in bs_context.items():\n for acc_role, role_data in bs_role_data['roles'].items():\n role_data['total_balance'] = sum(a['balance'] for a in role_data['accounts'])\n role_data['role_name'] = roles_module.ACCOUNT_LIST_ROLE_VERBOSE[acc_role]\n\n bs_context['equity_balance'] = self.DIGEST['group_balance']['GROUP_EQUITY']\n bs_context['retained_earnings_balance'] = self.DIGEST['group_balance']['GROUP_EARNINGS']\n bs_context['liabilities_equity_balance'] = self.DIGEST['group_balance']['GROUP_LIABILITIES_EQUITY']\n\n self.DIGEST['balance_sheet'] = bs_context\n\n return self.DIGEST" }, { "identifier": "IncomeStatementContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class IncomeStatementContextManager:\n\n def __init__(self, io_data: dict):\n self.DIGEST = io_data\n\n def digest(self):\n if 'group_account' in self.DIGEST:\n self.DIGEST['income_statement'] = {\n 'operating': {\n 'revenues': [\n acc for acc in self.DIGEST['group_account']['GROUP_INCOME'] if\n acc['role'] in roles_module.GROUP_IC_OPERATING_REVENUES\n ],\n 'cogs': [\n acc for acc in self.DIGEST['group_account']['GROUP_COGS'] if\n acc['role'] in roles_module.GROUP_IC_OPERATING_COGS\n ],\n 'expenses': [\n acc for acc in self.DIGEST['group_account']['GROUP_EXPENSES'] if\n acc['role'] in roles_module.GROUP_IC_OPERATING_EXPENSES\n ]\n },\n 'other': {\n 'revenues': [acc for acc in self.DIGEST['group_account']['GROUP_INCOME'] if\n acc['role'] in roles_module.GROUP_IC_OTHER_REVENUES],\n 'expenses': [acc for acc in self.DIGEST['group_account']['GROUP_EXPENSES'] if\n acc['role'] in roles_module.GROUP_IC_OTHER_EXPENSES],\n }\n }\n\n for activity, ic_section in self.DIGEST['income_statement'].items():\n for section, acc_list in ic_section.items():\n for acc in acc_list:\n acc['role_name'] = roles_module.ACCOUNT_LIST_ROLE_VERBOSE[acc['role']]\n\n # OPERATING INCOME...\n self.DIGEST['income_statement']['operating']['gross_profit'] = sum(\n acc['balance'] for acc in chain.from_iterable(\n [\n self.DIGEST['income_statement']['operating']['revenues'],\n self.DIGEST['income_statement']['operating']['cogs']\n ]\n ))\n self.DIGEST['income_statement']['operating']['net_operating_income'] = sum(\n acc['balance'] for acc in chain.from_iterable(\n [\n self.DIGEST['income_statement']['operating']['revenues'],\n self.DIGEST['income_statement']['operating']['cogs'],\n self.DIGEST['income_statement']['operating']['expenses'],\n ]\n ))\n self.DIGEST['income_statement']['operating']['net_operating_revenue'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['operating']['revenues']\n )\n self.DIGEST['income_statement']['operating']['net_cogs'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['operating']['cogs']\n )\n self.DIGEST['income_statement']['operating']['net_operating_expenses'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['operating']['expenses']\n )\n\n # OTHER INCOME....\n self.DIGEST['income_statement']['other']['net_other_revenues'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['other']['revenues']\n )\n self.DIGEST['income_statement']['other']['net_other_expenses'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['other']['expenses']\n )\n self.DIGEST['income_statement']['other']['net_other_income'] = sum(\n acc['balance'] for acc in chain.from_iterable(\n [\n self.DIGEST['income_statement']['other']['revenues'],\n self.DIGEST['income_statement']['other']['expenses']\n ]\n ))\n\n # NET INCOME...\n self.DIGEST['income_statement']['net_income'] = self.DIGEST['income_statement']['operating'][\n 'net_operating_income']\n self.DIGEST['income_statement']['net_income'] += self.DIGEST['income_statement']['other'][\n 'net_other_income']\n return self.DIGEST" }, { "identifier": "CashFlowStatementContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class CashFlowStatementContextManager:\n CFS_DIGEST_KEY = 'cash_flow_statement'\n\n # todo: implement by period and by unit...\n def __init__(self,\n io_data: dict,\n by_period: bool = False,\n by_unit: bool = False):\n self.IO_DIGEST = io_data\n self.CASH_ACCOUNTS = [a for a in self.IO_DIGEST['accounts'] if a['role'] == roles_module.ASSET_CA_CASH]\n self.JE_MODEL = lazy_loader.get_journal_entry_model()\n\n def check_io_digest(self):\n if GroupContextManager.GROUP_BALANCE_KEY not in self.IO_DIGEST:\n raise ValidationError(\n 'IO Digest must have groups for Cash Flow Statement'\n )\n\n def operating(self):\n group_balances = self.IO_DIGEST[GroupContextManager.GROUP_BALANCE_KEY]\n operating_activities = dict()\n operating_activities['GROUP_CFS_NET_INCOME'] = {\n 'description': 'Net Income',\n 'balance': group_balances['GROUP_CFS_NET_INCOME']\n }\n operating_activities['GROUP_CFS_OP_DEPRECIATION_AMORTIZATION'] = {\n 'description': 'Depreciation & Amortization of Assets',\n 'balance': -group_balances['GROUP_CFS_OP_DEPRECIATION_AMORTIZATION']\n }\n operating_activities['GROUP_CFS_OP_INVESTMENT_GAINS'] = {\n 'description': 'Gain/Loss Sale of Assets',\n 'balance': group_balances['GROUP_CFS_OP_INVESTMENT_GAINS']\n }\n operating_activities['GROUP_CFS_OP_ACCOUNTS_RECEIVABLE'] = {\n 'description': 'Accounts Receivable',\n 'balance': -group_balances['GROUP_CFS_OP_ACCOUNTS_RECEIVABLE']\n }\n operating_activities['GROUP_CFS_OP_INVENTORY'] = {\n 'description': 'Inventories',\n 'balance': -group_balances['GROUP_CFS_OP_INVENTORY']\n }\n\n operating_activities['GROUP_CFS_OP_ACCOUNTS_PAYABLE'] = {\n 'description': 'Accounts Payable',\n 'balance': group_balances['GROUP_CFS_OP_ACCOUNTS_PAYABLE']\n }\n operating_activities['GROUP_CFS_OP_OTHER_CURRENT_ASSETS_ADJUSTMENT'] = {\n 'description': 'Other Current Assets',\n 'balance': -group_balances['GROUP_CFS_OP_OTHER_CURRENT_ASSETS_ADJUSTMENT']\n }\n operating_activities['GROUP_CFS_OP_OTHER_CURRENT_LIABILITIES_ADJUSTMENT'] = {\n 'description': 'Other Current Liabilities',\n 'balance': group_balances['GROUP_CFS_OP_OTHER_CURRENT_LIABILITIES_ADJUSTMENT']\n }\n\n net_cash_by_op_activities = sum(i['balance'] for g, i in operating_activities.items())\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['operating'] = operating_activities\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash_by_activity'] = dict(\n OPERATING=net_cash_by_op_activities\n )\n\n def financing(self):\n group_balances = self.IO_DIGEST[GroupContextManager.GROUP_BALANCE_KEY]\n financing_activities = dict()\n financing_activities['GROUP_CFS_FIN_ISSUING_EQUITY'] = {\n 'description': 'Common Stock, Preferred Stock and Capital Raised',\n 'balance': sum(a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.FINANCING_EQUITY)\n }\n financing_activities['GROUP_CFS_FIN_DIVIDENDS'] = {\n 'description': 'Dividends Payed Out to Shareholders',\n 'balance': sum(\n a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.FINANCING_DIVIDENDS)\n }\n financing_activities['GROUP_CFS_FIN_ST_DEBT_PAYMENTS'] = {\n 'description': 'Increase/Reduction of Short-Term Debt Principal',\n 'balance': sum(a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.FINANCING_STD)\n }\n financing_activities['GROUP_CFS_FIN_LT_DEBT_PAYMENTS'] = {\n 'description': 'Increase/Reduction of Long-Term Debt Principal',\n 'balance': sum(a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.FINANCING_LTD)\n }\n\n net_cash = sum(i['balance'] for g, i in financing_activities.items())\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['financing'] = financing_activities\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash_by_activity']['FINANCING'] = net_cash\n\n def investing(self):\n group_balances = self.IO_DIGEST[GroupContextManager.GROUP_BALANCE_KEY]\n investing_activities = dict()\n investing_activities['GROUP_CFS_INVESTING_SECURITIES'] = {\n 'description': 'Purchase, Maturity and Sales of Investments & Securities',\n 'balance': sum(\n a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.INVESTING_SECURITIES)\n }\n investing_activities['GROUP_CFS_INVESTING_PPE'] = {\n 'description': 'Addition and Disposition of Property, Plant & Equipment',\n 'balance': sum(\n a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.INVESTING_PPE)\n }\n\n net_cash = sum(i['balance'] for g, i in investing_activities.items())\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['investing'] = investing_activities\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash_by_activity']['INVESTING'] = net_cash\n\n def net_cash(self):\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash'] = sum([\n bal for act, bal in self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash_by_activity'].items()\n ])\n\n def digest(self):\n self.check_io_digest()\n self.operating()\n self.financing()\n self.investing()\n self.net_cash()\n return self.IO_DIGEST" }, { "identifier": "IODigestContextManager", "path": "django_ledger/io/io_digest.py", "snippet": "class IODigestContextManager:\n\n def __init__(self, io_data: defaultdict):\n self.IO_DATA: defaultdict = io_data\n self.IO_MODEL = self.IO_DATA['io_model']\n self.TXS_QS = self.IO_DATA['txs_qs']\n self.STRFTIME_FORMAT = '%B %d, %Y'\n\n def get_io_data(self) -> defaultdict:\n return self.IO_DATA\n\n def get_strftime_format(self):\n return self.STRFTIME_FORMAT\n\n def get_from_date(self, as_str: bool = False, fmt=None) -> Optional[date]:\n from_date = self.IO_DATA['from_date']\n if from_date:\n if as_str:\n if not fmt:\n fmt = self.get_strftime_format()\n return from_date.strftime(fmt)\n return from_date\n\n def get_to_date(self, as_str: bool = False, fmt=None) -> date:\n if as_str:\n if not fmt:\n fmt = self.get_strftime_format()\n return self.IO_DATA['to_date'].strftime(fmt)\n return self.IO_DATA['to_date']\n\n def is_entity_model(self) -> bool:\n return isinstance(\n self.IO_MODEL,\n lazy_loader.get_entity_model()\n )\n\n def is_ledger_model(self) -> bool:\n return isinstance(\n self.IO_MODEL,\n lazy_loader.get_ledger_model()\n )\n\n def is_unit_model(self) -> bool:\n return isinstance(\n self.IO_MODEL,\n lazy_loader.get_unit_model()\n )\n\n def is_by_unit(self) -> bool:\n return self.IO_DATA['by_unit']\n\n def is_by_period(self) -> bool:\n return self.IO_DATA['by_period']\n\n def is_by_activity(self) -> bool:\n return self.IO_DATA['by_activity']\n\n # Balance Sheet Data...\n def has_balance_sheet(self) -> bool:\n return 'balance_sheet' in self.IO_DATA\n\n def get_balance_sheet_data(self, raise_exception: bool = True) -> Dict:\n try:\n return self.IO_DATA['balance_sheet']\n except KeyError:\n if raise_exception:\n raise IODigestValidationError(\n 'IO Digest does not have balance sheet information available.'\n )\n\n # Income Statement Data...\n def has_income_statement(self) -> bool:\n return 'income_statement' in self.IO_DATA\n\n def get_income_statement_data(self, raise_exception: bool = True) -> Dict:\n try:\n return self.IO_DATA['income_statement']\n except KeyError:\n if raise_exception:\n raise IODigestValidationError(\n 'IO Digest does not have income statement information available.'\n )\n\n # Cash Flow Statement Data...\n def has_cash_flow_statement(self):\n return 'cash_flow_statement' in self.IO_DATA\n\n def get_cash_flow_statement_data(self, raise_exception: bool = True) -> Dict:\n try:\n return self.IO_DATA['cash_flow_statement']\n except KeyError:\n if raise_exception:\n raise IODigestValidationError(\n 'IO Digest does not have cash flow statement information available.'\n )\n\n # CLOSING ENTRIES...\n\n def get_closing_entry_data(self):\n io_data = self.get_io_data()\n return io_data['accounts']" }, { "identifier": "FinancialRatioManager", "path": "django_ledger/io/ratios.py", "snippet": "class FinancialRatioManager:\n\n def __init__(self, io_data):\n self.DIGEST = io_data\n self.ACCOUNTS = io_data['accounts']\n self.RATIO_NA = RATIO_NA\n\n self.quick_assets = io_data['group_balance']['GROUP_QUICK_ASSETS']\n self.assets = io_data['group_balance']['GROUP_ASSETS']\n self.current_liabilities = io_data['group_balance']['GROUP_CURRENT_LIABILITIES']\n self.current_assets = io_data['group_balance']['GROUP_CURRENT_ASSETS']\n self.equity = io_data['group_balance']['GROUP_CAPITAL']\n self.liabilities = io_data['group_balance']['GROUP_LIABILITIES']\n self.net_income = io_data['group_balance']['GROUP_EARNINGS']\n self.net_sales = io_data['group_balance']['GROUP_NET_SALES']\n self.net_profit = io_data['group_balance']['GROUP_NET_PROFIT']\n self.gross_profit = io_data['group_balance']['GROUP_GROSS_PROFIT']\n self.RATIOS = dict()\n\n def digest(self):\n self.quick_ratio()\n self.current_ratio()\n self.debt_to_equity()\n self.return_on_equity()\n self.return_on_assets()\n self.net_profit_margin()\n self.gross_profit_margin()\n self.DIGEST['ratios'] = self.RATIOS\n return self.DIGEST\n\n # ------> SOLVENCY RATIOS <------\n def quick_ratio(self, as_percent=False):\n if self.current_liabilities == 0:\n cr = self.RATIO_NA\n else:\n cr = self.quick_assets / self.current_liabilities\n if as_percent:\n cr = cr * 100\n self.RATIOS['quick_ratio'] = cr\n\n def current_ratio(self, as_percent=False):\n if self.current_liabilities == 0:\n cr = RATIO_NA\n else:\n cr = self.current_assets / self.current_liabilities\n if as_percent:\n cr = cr * 100\n self.RATIOS['current_ratio'] = cr\n\n # ------> LEVERAGE RATIOS <------\n def debt_to_equity(self, as_percent=False):\n if self.equity == 0:\n cr = RATIO_NA\n else:\n cr = self.liabilities / self.equity\n if as_percent:\n cr = cr * 100\n self.RATIOS['debt_to_equity'] = cr\n\n # ------> PROFITABILITY RATIOS <------\n def return_on_equity(self, as_percent=False):\n if self.equity == 0:\n cr = RATIO_NA\n else:\n cr = self.net_income / self.equity\n if as_percent:\n cr = cr * 100\n self.RATIOS['return_on_equity'] = cr\n\n def return_on_assets(self, as_percent=False):\n if self.assets == 0:\n cr = RATIO_NA\n else:\n cr = self.net_income / self.assets\n if as_percent:\n cr = cr * 100\n self.RATIOS['return_on_assets'] = cr\n\n def net_profit_margin(self, as_percent=False):\n if self.net_sales == 0:\n npm = RATIO_NA\n else:\n npm = self.net_profit / self.net_sales\n if as_percent:\n npm = npm * 100\n self.RATIOS['net_profit_margin'] = npm\n\n def gross_profit_margin(self, as_percent=False):\n if self.gross_profit == 0:\n gpm = RATIO_NA\n else:\n gpm = self.gross_profit / self.net_sales\n if as_percent:\n gpm = gpm * 100\n self.RATIOS['gross_profit_margin'] = gpm" }, { "identifier": "lazy_loader", "path": "django_ledger/models/utils.py", "snippet": "class LazyLoader:\n ENTITY_MODEL = None\n ENTITY_STATE_MODEL = None\n UNIT_MODEL = None\n ACCOUNT_MODEL = None\n BANK_ACCOUNT_MODEL = None\n LEDGER_MODEL = None\n TXS_MODEL = None\n JE_MODEL = None\n ITEM_MODEL = None\n ITEM_TRANSACTION_MODEL = None\n CUSTOMER_MODEL = None\n INVOICE_MODEL = None\n BILL_MODEL = None\n UOM_MODEL = None\n VENDOR_MODEL = None\n TRANSACTION_MODEL = None\n ENTITY_UNIT_MODEL = None\n PURCHASE_ORDER_MODEL = None\n ESTIMATE_MODEL = None\n CLOSING_ENTRY_MODEL = None\n CLOSING_ENTRY_TRANSACTION_MODEL = None\n ENTITY_DATA_GENERATOR = None\n BALANCE_SHEET_REPORT_CLASS = None\n INCOME_STATEMENT_REPORT_CLASS = None\n CASH_FLOW_STATEMENT_REPORT_CLASS = None\n def get_entity_model(self):\n def get_entity_state_model(self):\n def get_bank_account_model(self):\n def get_account_model(self):\n def get_txs_model(self):\n def get_purchase_order_model(self):\n def get_ledger_model(self):\n def get_unit_model(self):\n def get_journal_entry_model(self):\n def get_item_model(self):\n def get_item_transaction_model(self):\n def get_customer_model(self):\n def get_bill_model(self):\n def get_invoice_model(self):\n def get_uom_model(self):\n def get_vendor_model(self):\n def get_transaction_model(self):\n def get_entity_unit_model(self):\n def get_estimate_model(self):\n def get_entity_data_generator(self):\n def get_closing_entry_model(self):\n def get_closing_entry_transaction_model(self):\n def get_balance_sheet_report_class(self):\n def get_income_statement_report_class(self):\n def get_cash_flow_statement_report_class(self):" } ]
from collections import defaultdict, namedtuple from datetime import datetime, date from itertools import groupby from pathlib import Path from random import choice from typing import List, Set, Union, Tuple, Optional, Dict from django.contrib.auth import get_user_model from django.core.exceptions import ValidationError, ObjectDoesNotExist from django.db.models import Sum, QuerySet from django.db.models.functions import TruncMonth from django.http import Http404 from django.utils.dateparse import parse_date, parse_datetime from django.utils.timezone import make_aware, is_naive, localtime from django.utils.translation import gettext_lazy as _ from django_ledger import settings from django_ledger.exceptions import InvalidDateInputError, TransactionNotInBalanceError from django_ledger.io import roles as roles_module from django_ledger.io.io_context import (RoleContextManager, GroupContextManager, ActivityContextManager, BalanceSheetStatementContextManager, IncomeStatementContextManager, CashFlowStatementContextManager) from django_ledger.io.io_digest import IODigestContextManager from django_ledger.io.ratios import FinancialRatioManager from django_ledger.models.utils import lazy_loader
15,722
for acc in gb_digest: if any([ all([acc['role_bs'] == roles_module.BS_ASSET_ROLE, acc['balance_type'] == TransactionModel.CREDIT]), all([acc['role_bs'] in ( roles_module.BS_LIABILITIES_ROLE, roles_module.BS_EQUITY_ROLE ), acc['balance_type'] == TransactionModel.DEBIT]) ]): acc['balance'] = -acc['balance'] return txs_queryset, gb_digest @staticmethod def aggregate_balances(k, g): gl = list(g) return { 'account_uuid': k[0], 'unit_uuid': k[1], 'unit_name': gl[0].get('journal_entry__entity_unit__name'), 'activity': gl[0].get('journal_entry__activity'), 'period_year': k[2], 'period_month': k[3], 'role_bs': roles_module.BS_ROLES.get(gl[0]['account__role']), 'role': gl[0]['account__role'], 'code': gl[0]['account__code'], 'name': gl[0]['account__name'], 'balance_type': gl[0]['account__balance_type'], 'tx_type': k[5], 'balance': sum(a['balance'] for a in gl), } def digest(self, entity_slug: str = None, unit_slug: str = None, user_model: UserModel = None, txs_queryset: QuerySet = None, as_io_digest: bool = False, accounts: Optional[Union[Set[str], List[str]]] = None, role: Optional[Union[Set[str], List[str]]] = None, activity: str = None, signs: bool = True, to_date: Union[str, datetime, date] = None, from_date: Union[str, datetime, date] = None, process_roles: bool = False, process_groups: bool = False, process_ratios: bool = False, process_activity: bool = False, equity_only: bool = False, by_period: bool = False, by_unit: bool = False, by_activity: bool = False, by_tx_type: bool = False, digest_name: str = None, balance_sheet_statement: bool = False, income_statement: bool = False, cash_flow_statement: bool = False, **kwargs) -> Union[Tuple, IODigestContextManager]: if balance_sheet_statement: from_date = None if cash_flow_statement: by_activity = True if activity: activity = validate_activity(activity) if role: role = roles_module.validate_roles(role) from_date, to_date = validate_dates(from_date, to_date) io_data = defaultdict(lambda: dict()) io_data['io_model'] = self io_data['from_date'] = from_date io_data['to_date'] = to_date io_data['by_unit'] = by_unit io_data['by_period'] = by_period io_data['by_activity'] = by_activity io_data['by_tx_type'] = by_tx_type txs_qs, accounts_digest = self.python_digest( txs_queryset=txs_queryset, user_model=user_model, accounts=accounts, role=role, activity=activity, entity_slug=entity_slug, unit_slug=unit_slug, to_date=to_date, from_date=from_date, signs=signs, equity_only=equity_only, by_period=by_period, by_unit=by_unit, by_activity=by_activity, by_tx_type=by_tx_type, **kwargs ) io_data['txs_qs'] = txs_qs io_data['accounts'] = accounts_digest if process_roles: roles_mgr = RoleContextManager( io_data=io_data, by_period=by_period, by_unit=by_unit ) # idea: change digest() name to something else? maybe aggregate, calculate?... io_data = roles_mgr.digest() if any([ process_groups, balance_sheet_statement, income_statement, cash_flow_statement ]):
""" Django Ledger created by Miguel Sanda <[email protected]>. Copyright© EDMA Group Inc licensed under the GPLv3 Agreement. Contributions to this module: * Miguel Sanda <[email protected]> """ UserModel = get_user_model() def diff_tx_data(tx_data: list, raise_exception: bool = True): IS_TX_MODEL = False TransactionModel = lazy_loader.get_txs_model() if isinstance(tx_data[0], TransactionModel): CREDITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'credit') DEBITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'debit') IS_TX_MODEL = True elif isinstance(tx_data[0], dict): CREDITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'credit') DEBITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'debit') else: raise ValidationError('Only Dictionary or TransactionModel allowed.') is_valid = (CREDITS == DEBITS) diff = CREDITS - DEBITS if not is_valid and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: if raise_exception: raise TransactionNotInBalanceError( f'Invalid tx data. Credits and debits must match. Currently cr: {CREDITS}, db {DEBITS}.' f'Max Tolerance {settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE}' ) return IS_TX_MODEL, is_valid, diff def check_tx_balance(tx_data: list, perform_correction: bool = False) -> bool: if tx_data: IS_TX_MODEL, is_valid, diff = diff_tx_data(tx_data, raise_exception=perform_correction) if not perform_correction and abs(diff): return False if not perform_correction and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: return False while not is_valid: tx_type_choice = choice(['debit', 'credit']) txs_candidates = list(tx for tx in tx_data if tx['tx_type'] == tx_type_choice) if len(txs_candidates) > 0: tx = choice(list(tx for tx in tx_data if tx['tx_type'] == tx_type_choice)) if any([diff > 0 and tx_type_choice == 'debit', diff < 0 and tx_type_choice == 'credit']): if IS_TX_MODEL: tx.amount += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION else: tx['amount'] += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION elif any([diff < 0 and tx_type_choice == 'debit', diff > 0 and tx_type_choice == 'credit']): if IS_TX_MODEL: tx.amount -= settings.DJANGO_LEDGER_TRANSACTION_CORRECTION else: tx['amount'] += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION IS_TX_MODEL, is_valid, diff = diff_tx_data(tx_data) return True def validate_io_date(dt: Union[str, date, datetime], no_parse_localdate: bool = True) -> Optional[datetime]: if not dt: return if isinstance(dt, date): dt = make_aware( value=datetime.combine( dt, datetime.min.time() )) return dt elif isinstance(dt, datetime): if is_naive(dt): return make_aware(dt) return dt elif isinstance(dt, str): # try to parse a date object from string... fdt = parse_date(dt) if not fdt: # try to parse a datetime object from string... fdt = parse_datetime(dt) if not fdt: raise InvalidDateInputError( message=f'Could not parse date from {dt}' ) elif is_naive(fdt): fdt = make_aware(fdt) return fdt if no_parse_localdate: return localtime() def validate_dates( from_date: Union[str, datetime, date] = None, to_date: Union[str, datetime, date] = None) -> Tuple[date, date]: from_date = validate_io_date(from_date, no_parse_localdate=False) to_date = validate_io_date(to_date) return from_date, to_date def validate_activity(activity: str, raise_404: bool = False): # idea: move to model???... JournalEntryModel = lazy_loader.get_journal_entry_model() valid = activity in JournalEntryModel.VALID_ACTIVITIES if activity and not valid: exception = ValidationError(f'{activity} is invalid. Choices are {JournalEntryModel.VALID_ACTIVITIES}.') if raise_404: raise Http404(exception) raise exception return activity class IOValidationError(ValidationError): pass class IODatabaseMixIn: """ Controls how transactions are recorded into the ledger. """ def is_entity_model(self): return isinstance(self, lazy_loader.get_entity_model()) def is_ledger_model(self): return isinstance(self, lazy_loader.get_ledger_model()) def is_entity_unit_model(self): return isinstance(self, lazy_loader.get_unit_model()) def get_entity_model_from_io(self): if self.is_entity_model(): return self elif self.is_ledger_model(): return self.entity elif self.is_entity_unit_model(): return self.entity # def is_time_bounded(self, from_date, to_date): def database_digest(self, txs_queryset: QuerySet, entity_slug: str = None, unit_slug: str = None, user_model: UserModel = None, from_date: date = None, to_date: date = None, activity: str = None, role: str = None, accounts: str or List[str] or Set[str] = None, posted: bool = True, exclude_zero_bal: bool = True, by_activity: bool = False, by_tx_type: bool = False, by_period: bool = False, by_unit: bool = False, **kwargs): if settings.DJANGO_LEDGER_USE_CLOSING_ENTRIES: if not from_date: entity_model = self.get_entity_model_from_io() closing_entry_date = entity_model.select_closing_entry_for_io_date(to_date=to_date) # print(closing_entry_date) # # if closing_entry_date: # closing_entry_list = entity_model.get_closing_entry_cache_for_date( # closing_date=closing_entry_date, # force_cache_update=True # ) # from_date_d = closing_entry_date + timedelta(days=1) # print('Orig From:', from_date) # print('New from:', from_date_d) # print('To Date:', to_date) # print(closing_entry_list) if not txs_queryset: TransactionModel = lazy_loader.get_txs_model() if self.is_entity_model(): if entity_slug: if entity_slug != self.slug: raise IOValidationError('Inconsistent entity_slug. ' f'Provided {entity_slug} does not match actual {self.slug}') if unit_slug: txs_queryset = TransactionModel.objects.for_unit( user_model=user_model, entity_slug=entity_slug or self.slug, unit_slug=unit_slug ) else: txs_queryset = TransactionModel.objects.for_entity( user_model=user_model, entity_slug=self ) elif self.is_ledger_model(): if not entity_slug: raise IOValidationError( 'Calling digest from Ledger Model requires entity_slug explicitly for safety') txs_queryset = TransactionModel.objects.for_ledger( user_model=user_model, entity_slug=entity_slug, ledger_model=self ) elif self.is_entity_unit_model(): if not entity_slug: raise IOValidationError( 'Calling digest from Entity Unit requires entity_slug explicitly for safety') txs_queryset = TransactionModel.objects.for_unit( user_model=user_model, entity_slug=entity_slug, unit_slug=unit_slug or self ) else: txs_queryset = TransactionModel.objects.none() txs_queryset = txs_queryset.not_closing_entry() if exclude_zero_bal: txs_queryset = txs_queryset.filter(amount__gt=0) if posted: txs_queryset = txs_queryset.posted() if from_date: txs_queryset = txs_queryset.from_date(from_date=from_date) if to_date: txs_queryset = txs_queryset.to_date(to_date=to_date) if accounts: if not isinstance(accounts, str): accounts = [accounts] txs_queryset = txs_queryset.for_accounts(account_list=accounts) if activity: if isinstance(activity, str): activity = [activity] txs_queryset = txs_queryset.for_activity(activity_list=activity) if role: txs_queryset = txs_queryset.for_roles(role_list=role) VALUES = [ 'account__uuid', 'account__balance_type', 'tx_type', 'account__code', 'account__name', 'account__role', ] ANNOTATE = {'balance': Sum('amount')} ORDER_BY = ['account__uuid'] if by_unit: ORDER_BY.append('journal_entry__entity_unit__uuid') VALUES += ['journal_entry__entity_unit__uuid', 'journal_entry__entity_unit__name'] if by_period: ORDER_BY.append('journal_entry__timestamp') ANNOTATE['dt_idx'] = TruncMonth('journal_entry__timestamp') if by_activity: ORDER_BY.append('journal_entry__activity') VALUES.append('journal_entry__activity') if by_tx_type: ORDER_BY.append('tx_type') VALUES.append('tx_type') return txs_queryset.values(*VALUES).annotate(**ANNOTATE).order_by(*ORDER_BY) def python_digest(self, txs_queryset: Optional[QuerySet] = None, user_model: Optional[UserModel] = None, to_date: date = None, from_date: date = None, equity_only: bool = False, activity: str = None, entity_slug: str = None, unit_slug: str = None, role: Optional[Union[Set[str], List[str]]] = None, accounts: Optional[Union[Set[str], List[str]]] = None, signs: bool = False, by_unit: bool = False, by_activity: bool = False, by_tx_type: bool = False, by_period: bool = False, **kwargs) -> list or tuple: if equity_only: role = roles_module.GROUP_EARNINGS txs_queryset = self.database_digest( user_model=user_model, txs_queryset=txs_queryset, to_date=to_date, from_date=from_date, entity_slug=entity_slug, unit_slug=unit_slug, activity=activity, role=role, accounts=accounts, by_unit=by_unit, by_activity=by_activity, by_tx_type=by_tx_type, by_period=by_period, **kwargs) for tx_model in txs_queryset: if tx_model['account__balance_type'] != tx_model['tx_type']: tx_model['balance'] = -tx_model['balance'] # txs_list = list(txs_queryset) # txs_list.sort(key=lambda a: ( # a['account__uuid'], # str(a.get('journal_entry__entity_unit__uuid', '')) if by_unit else '', # a['dt_idx'].year if by_period else 0, # a['dt_idx'].month if by_period else 0, # str(a['journal_entry__activity']) if by_activity else None, # a['tx_type'] if by_tx_type else '', # )) accounts_gb_code = groupby(txs_queryset, key=lambda a: ( a['account__uuid'], a.get('journal_entry__entity_unit__uuid') if by_unit else None, a.get('dt_idx').year if by_period else None, a.get('dt_idx').month if by_period else None, a.get('journal_entry__activity') if by_activity else None, a.get('tx_type') if by_tx_type else None, )) gb_digest = [self.aggregate_balances(k, g) for k, g in accounts_gb_code] for acc in gb_digest: acc['balance_abs'] = abs(acc['balance']) if signs: TransactionModel = lazy_loader.get_txs_model() for acc in gb_digest: if any([ all([acc['role_bs'] == roles_module.BS_ASSET_ROLE, acc['balance_type'] == TransactionModel.CREDIT]), all([acc['role_bs'] in ( roles_module.BS_LIABILITIES_ROLE, roles_module.BS_EQUITY_ROLE ), acc['balance_type'] == TransactionModel.DEBIT]) ]): acc['balance'] = -acc['balance'] return txs_queryset, gb_digest @staticmethod def aggregate_balances(k, g): gl = list(g) return { 'account_uuid': k[0], 'unit_uuid': k[1], 'unit_name': gl[0].get('journal_entry__entity_unit__name'), 'activity': gl[0].get('journal_entry__activity'), 'period_year': k[2], 'period_month': k[3], 'role_bs': roles_module.BS_ROLES.get(gl[0]['account__role']), 'role': gl[0]['account__role'], 'code': gl[0]['account__code'], 'name': gl[0]['account__name'], 'balance_type': gl[0]['account__balance_type'], 'tx_type': k[5], 'balance': sum(a['balance'] for a in gl), } def digest(self, entity_slug: str = None, unit_slug: str = None, user_model: UserModel = None, txs_queryset: QuerySet = None, as_io_digest: bool = False, accounts: Optional[Union[Set[str], List[str]]] = None, role: Optional[Union[Set[str], List[str]]] = None, activity: str = None, signs: bool = True, to_date: Union[str, datetime, date] = None, from_date: Union[str, datetime, date] = None, process_roles: bool = False, process_groups: bool = False, process_ratios: bool = False, process_activity: bool = False, equity_only: bool = False, by_period: bool = False, by_unit: bool = False, by_activity: bool = False, by_tx_type: bool = False, digest_name: str = None, balance_sheet_statement: bool = False, income_statement: bool = False, cash_flow_statement: bool = False, **kwargs) -> Union[Tuple, IODigestContextManager]: if balance_sheet_statement: from_date = None if cash_flow_statement: by_activity = True if activity: activity = validate_activity(activity) if role: role = roles_module.validate_roles(role) from_date, to_date = validate_dates(from_date, to_date) io_data = defaultdict(lambda: dict()) io_data['io_model'] = self io_data['from_date'] = from_date io_data['to_date'] = to_date io_data['by_unit'] = by_unit io_data['by_period'] = by_period io_data['by_activity'] = by_activity io_data['by_tx_type'] = by_tx_type txs_qs, accounts_digest = self.python_digest( txs_queryset=txs_queryset, user_model=user_model, accounts=accounts, role=role, activity=activity, entity_slug=entity_slug, unit_slug=unit_slug, to_date=to_date, from_date=from_date, signs=signs, equity_only=equity_only, by_period=by_period, by_unit=by_unit, by_activity=by_activity, by_tx_type=by_tx_type, **kwargs ) io_data['txs_qs'] = txs_qs io_data['accounts'] = accounts_digest if process_roles: roles_mgr = RoleContextManager( io_data=io_data, by_period=by_period, by_unit=by_unit ) # idea: change digest() name to something else? maybe aggregate, calculate?... io_data = roles_mgr.digest() if any([ process_groups, balance_sheet_statement, income_statement, cash_flow_statement ]):
group_mgr = GroupContextManager(
5
2023-10-20 01:07:20+00:00
24k
acolas1/KGSimple
simplify.py
[ { "identifier": "FluencyScorer", "path": "scoring/fluency_scorer.py", "snippet": "class FluencyScorer:\n def __init__(self, batch_size=1, reduce=\"mean\", log=True, laplace_smooth=False, prob_dict_path=None):\n self.device = \"cuda:1\" if torch.cuda.is_available() else \"cpu\"\n self.batch_size = batch_size\n self.reduce = reduce\n self.log = log\n self.laplace_smooth = laplace_smooth\n self.tokenizer = GPT2Tokenizer.from_pretrained(\"gpt2\")\n self.scorer = LMScorer.from_pretrained(\"gpt2\", device=self.device, batch_size=batch_size)\n self.idf_df = pd.read_csv(prob_dict_path, ',', encoding='utf-8')\n self.freq_dict = pd.Series((self.idf_df.frequency.values), index=self.idf_df.token).to_dict()\n self.num_tokens = self.idf_df.total.values[0] \n \n def unigram_score(self, sentences):\n if self.freq_dict is None:\n raise Exception(\"Probability dictionary is not defined.\") \n unigram_scores = []\n for sent in sentences:\n unigram_prob = 1\n for token in word_tokenize(sent.lower()):\n if token in self.freq_dict:\n if self.laplace_smooth:\n curr_unigram_prob = (self.freq_dict[token]+1)/(self.num_tokens+len(self.freq_dict))\n else:\n curr_unigram_prob = self.freq_dict[token]/self.num_tokens\n \n \n\n else:\n if self.laplace_smooth:\n curr_unigram_prob = (1/(self.num_tokens+len(self.freq_dict)))\n else:\n curr_unigram_prob = 1\n # unigram_prob += curr_unigram_prob\n \n \n if self.log:\n unigram_prob +=np.log(curr_unigram_prob)\n else:\n unigram_prob *= curr_unigram_prob\n uni_score = unigram_prob/len(word_tokenize(sent))\n unigram_scores.append(uni_score)\n return unigram_scores\n \n def SLOR_score(self, sentence_list, lm_score, unigram_score):\n SLOR_scores = []\n for i in range(len(sentence_list)):\n SLOR_score = lm_score[i]-unigram_score[i]\n if self.log:\n SLOR_score = math.exp(lm_score[i]-unigram_score[i])\n SLOR_scores.append(SLOR_score)\n return SLOR_scores\n \n def score_batched(self, generated_texts, source_texts=None, printing=False, **kwargs):\n sources_SLOR_score, generateds_SLOR_score = None, None\n if source_texts:\n sources_lm_prob_scores = self.scorer.sentence_score(source_texts, reduce=self.reduce, log=self.log)\n sources_unigram_scores = self.unigram_score(source_texts)\n sources_SLOR_score = self.SLOR_score(source_texts, sources_lm_prob_scores, sources_unigram_scores)\n\n\n\n generateds_lm_prob_scores = self.scorer.sentence_score(generated_texts, reduce=self.reduce, log=self.log)\n generateds_unigram_scores = self.unigram_score(generated_texts)\n generateds_SLOR_score = self.SLOR_score(generated_texts, generateds_lm_prob_scores, generateds_unigram_scores)\n \n if printing:\n print(\"[source_sents]\", source_texts)\n print(\"[source_lm]\", sources_lm_prob_scores)\n print(\"[source_unigram]\", sources_unigram_scores)\n print(\"[source_scores]\", sources_SLOR_score)\n print(\"[generated_sents]\", generated_texts)\n print(\"[generated_lm]\", generateds_lm_prob_scores)\n print(\"[generated_unigram]\", generateds_unigram_scores)\n print(\"[generated_scores]\", generateds_SLOR_score)\n return {\"scores\": generateds_SLOR_score, \"source_scores\": sources_SLOR_score}\n\n def score(self, generated_text, source_text=None, printing=False, **kwargs):\n # sources_lm_prob_score = scorer.sentence_score(source_list, reduce=\"mean\")\n \n sources_SLOR_score, generateds_SLOR_score = None, None\n if source_text:\n source_list = [source_text]\n sources_lm_prob_scores = self.scorer.sentence_score(source_list, reduce=self.reduce, log=self.log)\n sources_unigram_scores = self.unigram_score(source_list)\n sources_SLOR_score = self.SLOR_score(source_list, sources_lm_prob_scores, sources_unigram_scores)\n \n \n \n generateds_list = [generated_text]\n generateds_lm_prob_scores = self.scorer.sentence_score(generateds_list, reduce=self.reduce, log=self.log)\n generateds_unigram_scores = self.unigram_score(generateds_list)\n generateds_SLOR_score = self.SLOR_score(generateds_list, generateds_lm_prob_scores, generateds_unigram_scores)\n \n if printing:\n print(\"[source_sents]\", source_text)\n print(\"[source_lm]\", sources_lm_prob_scores)\n print(\"[source_unigram]\", sources_unigram_scores)\n print(\"[source_scores]\", sources_SLOR_score)\n print(\"[generated_sents]\", generated_text)\n print(\"[generated_lm]\", generateds_lm_prob_scores)\n print(\"[generated_unigram]\", generateds_unigram_scores)\n print(\"[generated_scores]\", generateds_SLOR_score)\n return {\"scores\": generateds_SLOR_score, \"source_scores\": sources_SLOR_score}" }, { "identifier": "SaliencyBERTScore", "path": "scoring/saliency_scorer.py", "snippet": "class SaliencyBERTScore:\n def __init__(self, lmscorer = \"bertscore\", lang=\"en\"):\n self.bertscore = evaluate.load(lmscorer)\n self.lang = lang\n\n\n def calc_BERT_score(self, predictions, references, sigmoid):\n results = self.bertscore.compute(predictions=predictions, references=references, lang=self.lang)\n if sigmoid:\n results = expit(results)\n return results\n\n def score_batched(self, generated_text, source_text=None, sigmoid=False, printing=False, **kwargs):\n gen_score, source_score = None, None\n bert_score = self.calc_BERT_score(generated_text, source_text, sigmoid)\n f1 = bert_score['f1']\n \n if printing:\n print(\"scores: \", str(f1))\n return {\"scores\": f1}\n\n def score(self, generated_text, source_text=None, sigmoid=False, printing=False, **kwargs):\n gen_score, source_score = None, None\n bert_score = self.calc_BERT_score([generated_text], [source_text], sigmoid)\n f1 = bert_score['f1']\n \n if printing:\n print(\"scores: \", str(f1))\n return {\"scores\": f1}" }, { "identifier": "SimplicityTextScore", "path": "scoring/simplicity_scorer.py", "snippet": "class SimplicityTextScore:\n def __init__(self):\n pass\n\n def calc_FRE(self, text, sigmoid):\n min_val = -30\n score = textstat.flesch_reading_ease(text)\n scaled_score = (score - min_val) / (121.22 - min_val)\n # Clamp scaled_score to the range [0, 1]\n scaled_score = max(0, min(scaled_score, 1))\n \n if sigmoid:\n scaled_score = expit(scaled_score)\n \n return scaled_score\n \n \n \n def calc_FKGL(self, text, sigmoid):\n score = max(0,textstat.flesch_kincaid_grade(text))\n if sigmoid:\n score = expit(score)\n return score\n\n def score_batched(self, generated_texts, source_texts=None, sigmoid=False, printing=False, **kwargs):\n gen_score, source_score = [],[]\n \n for text in generated_texts:\n gen_score.append(self.calc_FRE(text, sigmoid))\n \n \n if source_texts:\n for text in source_texts:\n source_score.append(self.calc_FRE(text, sigmoid))\n \n if printing:\n print(\"score: \", gen_score)\n print(\"source_score: \", source_score)\n return {\"scores\": gen_score, \"source_scores\": source_score}\n \n def score(self, generated_text, source_text=None, sigmoid=False, printing=False, **kwargs):\n gen_score, source_score = None, None\n \n gen_score = self.calc_FRE(generated_text, sigmoid)\n \n if source_text:\n source_score = self.calc_FRE(source_text, sigmoid)\n \n if printing:\n print(\"score: \", gen_score)\n print(\"source_score: \", source_score)\n return {\"scores\": gen_score, \"source_scores\": source_score}" }, { "identifier": "ScorerWrapper", "path": "scoring/aggregate_scorer.py", "snippet": "class ScorerWrapper:\n def __init__(self, scorers, scoring_method=\"logsum\", batch_size=1):\n assert scoring_method in [\"product\", \"logsum\"], \"Unrecognized `scoring_method`\"\n \n self.scorers = scorers\n self.scoring_method = scoring_method\n\n # if self.scoring_method == \"logsum\":\n # self.score_func = logsum_score\n # elif self.scoring_method == \"product\":\n # self.score_func = product_score\n \n if batch_size > 1:\n exec(\"self.score_func = {}\".format(self.scoring_method+\"_\"+\"score_batched\"))\n else:\n exec(\"self.score_func = {}\").format(self.scoring_method+\"_\"+\"score\")\n self.batch_size = batch_size\n def get_score_names(self):\n return [s[\"name\"] for s in self.scorers]\n \n def score_batched(self, input_texts=None, generated_texts=None, old_kgs=None, new_kgs=None, dels_ents=None, partial=False, printing=False, timings=False, extras={}, progress=False):\n assert len(input_texts) == len(generated_texts) == len(old_kgs) == len(new_kgs) == len(dels_ents), \"Data lengths don't match\"\n \n data_list = []\n for inp, gen, old_kg, new_kg, del_ents in zip(input_texts, generated_texts, old_kgs, new_kgs, dels_ents):\n data_list.append({\"inp\": inp, \"gen\": gen, \"old_kg\": old_kg, \"new_kg\": new_kg, \"del_ents\": del_ents})\n\n if len(data_list) == 0:\n progress = False\n \n for batch in batcher(data_list, batch_size=self.batch_size, progress=progress):\n batch_inputs = [instance_dict[\"inp\"] for instance_dict in batch]\n batch_gens = [instance_dict[\"gen\"] for instance_dict in batch]\n batch_old_kgs = [instance_dict[\"old_kg\"] for instance_dict in batch]\n batch_new_kgs = [instance_dict[\"new_kg\"] for instance_dict in batch]\n batch_dels_ents = [instance_dict[\"del_ents\"] for instance_dict in batch]\n batch_scores = self.score_func(self.scorers, batch_inputs, batch_gens, batch_old_kgs, batch_new_kgs, batch_dels_ents)\n for score_type, scores in batch_scores.items():\n if type(scores) in [torch.Tensor, np.array, np.ndarray]:\n batch_scores[score_type] = scores.tolist()\n\n if printing:\n print(\"[total]\", all_outputs[\"total_scores\"])\n return batch_scores\n \n def score(self, input_text=None, generated_text=None, old_kg=None, new_kg=None, del_ents=None):\n aggregate_score = self.score_func(self.scorers, input_text, generated_text, old_kg, new_kg, del_ents)\n return aggregate_score\n \n\n def __call__(self, graphs, input_text, generated_text, **kwargs):\n return self.score(graphs, input_text, generated_text, **kwargs)" }, { "identifier": "GAPDataloader", "path": "GAP/data_relations_as_nodes.py", "snippet": "class GAPDataloader(DataLoader):\n\n def __init__(self, args, dataset, mode):\n if mode == \"train\":\n sampler = RandomSampler(dataset)\n batch_size = args.train_batch_size\n else:\n sampler = SequentialSampler(dataset)\n batch_size = args.predict_batch_size\n super(GAPDataloader, self).__init__(dataset, sampler=sampler, batch_size=batch_size,\n num_workers=args.num_workers)" }, { "identifier": "EventDataset", "path": "GAP/data_relations_as_nodes.py", "snippet": "class EventDataset(Dataset):\n def __init__(self, logger, args, data, tokenizer, mode):\n self.data = data\n self.tokenizer = tokenizer\n self.topology = {\"entity-entity\": args.entity_entity, \n \"entity-relation\": args.entity_relation,\n \"relation-entity\": args.relation_entity,\n \"relation-relation\": args.relation_relation\n } \n \n \n \n print(\"Total samples = {}\".format(len(self.data)))\n\n \n assert type(self.data) == list\n self.args = args\n self.data_type = mode\n self.metric = \"BLEU\"\n self.head_ids, self.rel_ids, self.tail_ids = self.tokenizer.encode(' [head]', add_special_tokens=False), \\\n self.tokenizer.encode(' [relation]', add_special_tokens=False), \\\n self.tokenizer.encode(' [tail]', add_special_tokens=False)\n self.graph_ids, self.text_ids = self.tokenizer.encode(' [graph]', add_special_tokens=False), \\\n self.tokenizer.encode(' [text]', add_special_tokens=False)\n\n if self.args.model_name == \"bart\":\n self.mask_token = self.tokenizer.mask_token\n self.mask_token_id = self.tokenizer.mask_token_id\n else:\n self.mask_token = self.tokenizer.additional_special_tokens[0]\n self.mask_token_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.additional_special_tokens[0])\n\n if self.args.model_name == \"bart\":\n if self.args.append_another_bos:\n self.add_bos_id = [self.tokenizer.bos_token_id] * 2\n else:\n self.add_bos_id = [self.tokenizer.bos_token_id]\n else:\n self.add_bos_id = []\n\n def __len__(self):\n return len(self.data)\n \n def graph_size(self,idx):\n entry = self.data[idx]\n kg = entry[0]\n \n kg_list = []\n triple_list = kg.split('<S>')\n triple_list = [triple_list[0]] + ['<S>'+triple for triple in triple_list[1:]]\n triple_list = list(filter(None,triple_list))\n for triple in triple_list:\n head = re.search('<S>(.*)<P>', triple).group(1).strip()\n rel = re.search('<P>(.*)<O>', triple).group(1).strip()\n tail = re.search('<O>(.*)', triple).group(1).strip()\n kg_list.append([head,rel,tail])\n \n \n\n strings_label = []\n node_ids = []\n edge_ids = []\n strings_label_tokens = ''\n\n \n text_entity, text_relation = self.get_all_entities_per_sample(kg_list)\n entity_change, relation_change = self.get_change_per_sample(text_entity, text_relation)\n return len(entity_change)\n\n def graph_linearize(self, triple, entity_change, head_ids, rel_ids, tail_ids,\n relation_change, cnt_edge, adj_matrix):\n # string_label: encoder ids\n # string_label_tokens: encoder tokens\n if len(triple[0]) == 0:\n return [], '', [], [], cnt_edge, adj_matrix\n nodes, edges = [], []\n string_label = copy.deepcopy(head_ids)\n string_label_tokens = ' <S>'\n nodes.extend([-1] * len(string_label))\n edges.extend([-1] * len(string_label))\n\n\n string_label += entity_change[triple[0]][0]\n string_label_tokens += ' {}'.format(triple[0])\n nodes.extend([entity_change[triple[0]][1]] * len(entity_change[triple[0]][0]))\n edges.extend([-1] * len(entity_change[triple[0]][0]))\n\n\n if len(triple[1]) != 0 and len(triple[2]) != 0:\n rel_label = relation_change[triple[1]]\n rel_ent_label = entity_change[triple[1]][1]\n rel_label_token = copy.deepcopy(triple[1])\n words_label = rel_ids + rel_label + tail_ids + entity_change[triple[2]][0]\n words_label_tokens = ' <P> {} <O> {}'.format(rel_label_token, triple[2])\n nodes.extend(\n ([-1] * len(rel_ids)) + ([entity_change[triple[1]][1]] * len(rel_label)) + ([-1] * len(tail_ids)) + ([entity_change[triple[2]][1]] * len(\n entity_change[triple[2]][0])))\n edges.extend([-1] * len(rel_ids) + [cnt_edge] * len(rel_label) + [-1] * (\n len(tail_ids) + len(entity_change[triple[2]][0])))\n if entity_change[triple[0]][1] < len(adj_matrix) and entity_change[triple[2]][1] < len(adj_matrix):\n\n\n if self.topology['entity-entity']:\n adj_matrix[entity_change[triple[0]][1]][entity_change[triple[2]][1]] = 1\n adj_matrix[entity_change[triple[2]][1]][entity_change[triple[0]][1]] = 1\n\n if self.topology['entity-relation']:\n adj_matrix[entity_change[triple[0]][1]][entity_change[triple[1]][1]] = 2\n adj_matrix[entity_change[triple[2]][1]][entity_change[triple[1]][1]] = 2\n\n if self.topology['relation-entity']:\n adj_matrix[entity_change[triple[1]][1]][entity_change[triple[0]][1]] = 3\n adj_matrix[entity_change[triple[2]][1]][entity_change[triple[1]][1]] = 3\n \n if not self.topology['relation-entity'] and not self.topology['relation-relation']:\n adj_matrix[entity_change[triple[1]][1]][entity_change[triple[1]][1]] = 10\n\n if not self.topology['entity-relation'] and not self.topology['entity-entity']:\n adj_matrix[entity_change[triple[0]][1]][entity_change[triple[0]][1]] = 10\n adj_matrix[entity_change[triple[2]][1]][entity_change[triple[2]][1]] = 10\n\n cnt_edge += 1\n string_label += words_label\n string_label_tokens += words_label_tokens\n\n assert len(string_label) == len(nodes) == len(edges)\n\n return string_label, string_label_tokens, nodes, edges, cnt_edge, adj_matrix\n\n def relation_to_relation_fill(self, node_dict, rel_dict, adj_matrix):\n adj_matrix_temp = np.array(adj_matrix)\n rel_idx_list = []\n for rel in rel_dict.keys():\n rel_idx = node_dict[rel][1]\n rel_idx_list.append(rel_idx)\n adj_matrix_np = np.array(adj_matrix)\n adj_matrix_np_bool = (adj_matrix_np==-1)\n #reassign -1s to 0s\n adj_matrix_np[adj_matrix_np_bool] = 0\n #get squared matrix for r-r\n adj_matrix_sq = adj_matrix_np@adj_matrix_np\n \n #old adj_matrix + squared matrix only r-r\n rel_idx_list = np.array(rel_idx_list, dtype=np.intp)\n adj_matrix_temp[rel_idx_list[:,np.newaxis], rel_idx_list] = (adj_matrix_sq[rel_idx_list][:,rel_idx_list] > 0)*4\n adj_matrix_new = adj_matrix_temp.tolist()\n \n return adj_matrix_new\n \n def get_all_entities_per_sample(self, triple_list):\n text_entity = set()\n text_relation = set()\n for triple in triple_list:\n if len(triple[0]) == 0:\n continue\n if len(triple[1]) != 0 and len(triple[2]) != 0:\n text_relation.add(triple[1])\n text_entity.add(triple[0])\n text_entity.add(triple[2])\n \n text_entity_list = list(text_entity)+list(text_relation)\n text_relation_list = list(text_relation)\n \n return text_entity_list, text_relation_list\n\n def get_change_per_sample(self, text_entity, text_relation):\n # during fine-tuning, we don't mask entities or relations\n ent_change = {}\n total_entity = text_entity\n\n for ent_id in range(len(total_entity)):\n entity_toks = self.tokenizer.encode(\" {}\".format(total_entity[ent_id]), add_special_tokens=False)\n ent_change[total_entity[ent_id]] = [entity_toks, ent_id]\n \n # relation change only includes the relation tokens and ids\n rel_change = {}\n for rel_id in range(len(text_relation)):\n rel_change[text_relation[rel_id]] = self.tokenizer.encode(' {}'.format(text_relation[rel_id]),\n add_special_tokens=False)\n\n return ent_change, rel_change\n\n def truncate_pair_ar(self, a, add_bos_id, graph_ids, text_ids, node_ids, edge_ids):\n # add_bos_id + graph_ids + a + text_ids + b + eos_token_id\n length_a_b = self.args.max_input_length - len(add_bos_id) - len(graph_ids) - len(text_ids) - 1\n if len(a) > length_a_b:\n a = a[:length_a_b]\n node_ids = node_ids[:length_a_b]\n edge_ids = edge_ids[:length_a_b]\n input_ids = add_bos_id + graph_ids + a + text_ids + [self.tokenizer.eos_token_id]\n input_node_ids = [-1] * (len(add_bos_id) + len(graph_ids)) + node_ids + [-1] * (len(text_ids) + 1)\n input_edge_ids = [-1] * (len(add_bos_id) + len(graph_ids)) + edge_ids + [-1] * (len(text_ids) + 1)\n attn_mask = [1] * len(input_ids) + [0] * (self.args.max_input_length - len(input_ids))\n input_ids += [self.tokenizer.pad_token_id] * (self.args.max_input_length - len(input_ids))\n input_node_ids += [-1] * (self.args.max_input_length - len(input_node_ids))\n input_edge_ids += [-1] * (self.args.max_input_length - len(input_edge_ids))\n assert len(input_ids) == len(attn_mask) == self.args.max_input_length == len(input_node_ids) == len(\n input_edge_ids)\n return input_ids, attn_mask, input_node_ids, input_edge_ids\n\n \n def ar_prep_data(self, questions, add_bos_id, graph_ids, text_ids, node_ids, edge_ids):\n input_ids, input_attn_mask, input_node_ids, input_edge_ids = self.truncate_pair_ar(questions, add_bos_id,\n graph_ids, text_ids,\n node_ids, edge_ids)\n\n return input_ids, input_attn_mask, input_node_ids, input_edge_ids\n\n\n\n def __getitem__(self, idx):\n kg = self.data[idx]\n # print(\"KG: \", kg)\n kg_list = []\n triple_list = kg.split('<S>')\n triple_list = [triple_list[0]] + ['<S>'+triple for triple in triple_list[1:]]\n triple_list = list(filter(None,triple_list))\n for triple in triple_list:\n head = re.search('<S>(.*)<P>', triple).group(1).strip()\n rel = re.search('<P>(.*)<O>', triple).group(1).strip()\n tail = re.search('<O>(.*)', triple).group(1).strip()\n kg_list.append([head,rel,tail])\n \n strings_label = []\n node_ids = []\n edge_ids = []\n strings_label_tokens = ''\n\n # print(\"kg_list: \", kg_list)\n text_entity, text_relation = self.get_all_entities_per_sample(kg_list)\n entity_change, relation_change = self.get_change_per_sample(text_entity, text_relation)\n adj_matrix = [[-1] * (self.args.max_node_length + 1) for _ in range(self.args.max_node_length + 1)]\n\n cnt_edge = 0\n\n for i, triple in enumerate(kg_list):\n string_label, string_label_tokens, nodes, edges, cnt_edge, adj_matrix = self.graph_linearize(\n triple,\n entity_change,\n self.head_ids,\n self.rel_ids, self.tail_ids,\n relation_change, cnt_edge, adj_matrix)\n \n strings_label += string_label\n strings_label_tokens += string_label_tokens\n node_ids += nodes\n edge_ids += edges\n if self.topology['relation-relation']:\n adj_matrix = self.relation_to_relation_fill(entity_change, relation_change, adj_matrix)\n \n words_label_ids, words_label_tokens, words_input_ids, words_input_tokens = [], '', [], ''\n# current_text = entry[1]\n \n# for word in current_text.split():\n# word_label_ids = self.tokenizer.encode(\" {}\".format(word), add_special_tokens=False)\n# word_label_tokens = copy.deepcopy(word)\n\n# words_label_ids += word_label_ids\n# words_label_tokens += ' ' + word_label_tokens\n # print(\"strings_label: \", strings_label)\n # print(\"node_ids: \", node_ids)\n # print(\"edge_ids: \", edge_ids)\n # print(\"self.add_bos_id: \", self.add_bos_id)\n # print(\"self.graph_ids: \", self.graph_ids)\n input_ids_ar, attn_mask_ar, input_node_ids_ar, input_edge_ids_ar = \\\n self.ar_prep_data(strings_label, self.add_bos_id, self.graph_ids,\n self.text_ids, node_ids, edge_ids)\n node_length_ar = max(input_node_ids_ar) + 1\n edge_length_ar = max(input_edge_ids_ar) + 1\n \n\n def masked_fill(src, masked_value, fill_value):\n return [src[src_id] if src[src_id] != masked_value and src[src_id] < fill_value else fill_value for src_id\n in range(len(src))]\n\n input_node_ids_ar, input_edge_ids_ar = masked_fill(input_node_ids_ar, -1, self.args.max_node_length), \\\n masked_fill(input_edge_ids_ar, -1, self.args.max_edge_length)\n\n def masked_fill_matrix(adj_matrix_input, masked_value, fill_value):\n adj_matrix_tmp = copy.deepcopy(adj_matrix_input)\n for a_id in range(len(adj_matrix_tmp)):\n for b_id in range(len(adj_matrix_tmp)):\n if adj_matrix_tmp[a_id][b_id] == masked_value or adj_matrix_tmp[a_id][b_id] > fill_value:\n adj_matrix_tmp[a_id][b_id] = fill_value\n return adj_matrix_tmp\n\n adj_matrix_ar = masked_fill_matrix(adj_matrix, -1, self.args.max_edge_length)\n\n assert len(input_ids_ar) == len(attn_mask_ar) == self.args.max_input_length == len(input_node_ids_ar) == len(\n input_edge_ids_ar)\n\n input_ids_ar = torch.LongTensor(input_ids_ar)\n attn_mask_ar = torch.LongTensor(attn_mask_ar)\n \n input_node_ids_ar = torch.LongTensor(input_node_ids_ar)\n input_edge_ids_ar = torch.LongTensor(input_edge_ids_ar)\n node_length_ar = torch.LongTensor([node_length_ar])\n edge_length_ar = torch.LongTensor([edge_length_ar])\n adj_matrix_ar = torch.LongTensor(adj_matrix_ar)\n \n return input_ids_ar, attn_mask_ar, input_node_ids_ar, node_length_ar, adj_matrix_ar" }, { "identifier": "WebNLGDataset", "path": "GAP/data_relations_as_nodes.py", "snippet": "class WebNLGDataset(Dataset):\n def __init__(self, logger, args, data_path, tokenizer, mode):\n self.data_path = data_path\n self.tokenizer = tokenizer\n self.topology = {\"entity-entity\": args.entity_entity, \n \"entity-relation\": args.entity_relation,\n \"relation-entity\": args.relation_entity,\n \"relation-relation\": args.relation_relation\n } \n \n with open(self.data_path + '.json', 'r') as f:\n self.data = json.load(f)\n\n print(\"Total samples = {}\".format(len(self.data)))\n\n assert type(self.data) == list\n assert all([\"id\" in d for d in self.data]), self.data[0].keys()\n if type(self.data[0][\"id\"]) == int:\n for i in range(len(self.data)):\n self.data[i][\"id\"] = str(self.data[i][\"id\"])\n\n self.args = args\n self.data_type = mode\n self.metric = \"BLEU\"\n\n self.head_ids, self.rel_ids, self.tail_ids = self.tokenizer.encode(' [head]', add_special_tokens=False), \\\n self.tokenizer.encode(' [relation]', add_special_tokens=False), \\\n self.tokenizer.encode(' [tail]', add_special_tokens=False)\n\n self.graph_ids, self.text_ids = self.tokenizer.encode(' [graph]', add_special_tokens=False), \\\n self.tokenizer.encode(' [text]', add_special_tokens=False)\n\n if self.args.model_name == \"bart\":\n self.mask_token = self.tokenizer.mask_token\n self.mask_token_id = self.tokenizer.mask_token_id\n else:\n self.mask_token = self.tokenizer.additional_special_tokens[0]\n self.mask_token_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.additional_special_tokens[0])\n\n if self.args.model_name == \"bart\":\n if self.args.append_another_bos:\n self.add_bos_id = [self.tokenizer.bos_token_id] * 2\n else:\n self.add_bos_id = [self.tokenizer.bos_token_id]\n else:\n self.add_bos_id = []\n\n def __len__(self):\n return len(self.data)\n\n def linearize_v2(self, entity, entity_change, head_ids, rel_ids, tail_ids,\n relation_change, cnt_edge, adj_matrix):\n # string_label: encoder ids\n # string_label_tokens: encoder tokens\n\n if len(entity[0]) == 0:\n return [], '', [], [], cnt_edge, adj_matrix\n nodes, edges = [], []\n string_label = copy.deepcopy(head_ids)\n string_label_tokens = ' [head]'\n nodes.extend([-1] * len(string_label))\n edges.extend([-1] * len(string_label))\n\n\n string_label += entity_change[entity[0]][0]\n string_label_tokens += ' {}'.format(entity[0])\n nodes.extend([entity_change[entity[0]][1]] * len(entity_change[entity[0]][0]))\n edges.extend([-1] * len(entity_change[entity[0]][0]))\n\n\n for rel in entity[2]:\n if len(rel[0]) != 0 and len(rel[1]) != 0:\n rel_label = relation_change[rel[0]]\n rel_ent_label = entity_change[rel[0]][1]\n rel_label_token = copy.deepcopy(rel[0])\n words_label = rel_ids + rel_label + tail_ids + entity_change[rel[1]][0]\n words_label_tokens = ' [relation] {} [tail] {}'.format(rel_label_token, rel[1])\n nodes.extend(\n ([-1] * len(rel_ids)) + ([entity_change[rel[0]][1]] * len(rel_label)) + ([-1] * len(tail_ids)) + ([entity_change[rel[1]][1]] * len(\n entity_change[rel[1]][0])))\n\n \n edges.extend([-1] * len(rel_ids) + [cnt_edge] * len(rel_label) + [-1] * (\n len(tail_ids) + len(entity_change[rel[1]][0])))\n if entity_change[entity[0]][1] < len(adj_matrix) and entity_change[rel[1]][1] < len(adj_matrix):\n if self.topology['entity-entity']:\n adj_matrix[entity_change[entity[0]][1]][entity_change[rel[1]][1]] = 1\n adj_matrix[entity_change[rel[1]][1]][entity_change[entity[0]][1]] = 1\n\n if self.topology['entity-relation']:\n adj_matrix[entity_change[entity[0]][1]][entity_change[rel[0]][1]] = 2\n adj_matrix[entity_change[rel[1]][1]][entity_change[rel[0]][1]] = 2\n \n if self.topology['relation-entity']:\n adj_matrix[entity_change[rel[0]][1]][entity_change[entity[0]][1]] = 3\n adj_matrix[entity_change[rel[0]][1]][entity_change[rel[1]][1]] = 3\n \n if not self.topology['relation-entity'] and not self.topology['relation-relation']:\n adj_matrix[entity_change[rel[0]][1]][entity_change[rel[0]][1]] = 10\n \n if not self.topology['entity-relation'] and not self.topology['entity-entity']:\n adj_matrix[entity_change[entity[0]][1]][entity_change[entity[0]][1]] = 10\n adj_matrix[entity_change[rel[1]][1]][entity_change[rel[1]][1]] = 10\n\n cnt_edge += 1\n string_label += words_label\n string_label_tokens += words_label_tokens\n\n assert len(string_label) == len(nodes) == len(edges)\n\n return string_label, string_label_tokens, nodes, edges, cnt_edge, adj_matrix\n\n \n def relation_to_relation_fill(self, node_dict, rel_dict, adj_matrix):\n adj_matrix_temp = np.array(adj_matrix)\n rel_idx_list = []\n for rel in rel_dict.keys():\n rel_idx = node_dict[rel][1]\n rel_idx_list.append(rel_idx)\n adj_matrix_np = np.array(adj_matrix)\n adj_matrix_np_bool = (adj_matrix_np==-1)\n #reassign -1s to 0s\n adj_matrix_np[adj_matrix_np_bool] = 0\n #get squared matrix for r-r\n adj_matrix_sq = adj_matrix_np@adj_matrix_np\n \n #old adj_matrix + squared matrix only r-r\n rel_idx_list = np.array(rel_idx_list, dtype=np.intp)\n adj_matrix_temp[rel_idx_list[:,np.newaxis], rel_idx_list] = (adj_matrix_sq[rel_idx_list][:,rel_idx_list] > 0)*4\n adj_matrix_new = adj_matrix_temp.tolist()\n \n return adj_matrix_new\n \n \n def get_all_entities_per_sample(self, mark_entity_number, mark_entity, entry):\n text_entity = set()\n text_relation = set()\n for entity_id in mark_entity_number:\n entity = entry['kbs'][entity_id]\n if len(entity[0]) == 0:\n continue\n for rel in entity[2]:\n if len(rel[0]) != 0 and len(rel[1]) != 0:\n text_relation.add(rel[0])\n text_entity.add(rel[1])\n\n text_entity_list = list(text_entity)+list(text_relation)\n text_relation_list = list(text_relation)\n for entity_ele in mark_entity:\n if entity_ele in text_entity_list:\n text_entity_list.remove(entity_ele)\n \n return text_entity_list, text_relation_list\n\n def get_change_per_sample(self, mark_entity, text_entity, text_relation):\n # during fine-tuning, we don't mask entities or relations\n ent_change = {}\n total_entity = mark_entity + text_entity\n\n for ent_id in range(len(total_entity)):\n entity_toks = self.tokenizer.encode(\" {}\".format(total_entity[ent_id]), add_special_tokens=False)\n ent_change[total_entity[ent_id]] = [entity_toks, ent_id]\n # relation change only includes the relation tokens and ids\n rel_change = {}\n for rel_id in range(len(text_relation)):\n rel_change[text_relation[rel_id]] = self.tokenizer.encode(' {}'.format(text_relation[rel_id]),\n add_special_tokens=False)\n return ent_change, rel_change\n\n def truncate_pair_ar(self, a, add_bos_id, graph_ids, text_ids, node_ids, edge_ids):\n # add_bos_id + graph_ids + a + text_ids + b + eos_token_id\n length_a_b = self.args.max_input_length - len(add_bos_id) - len(graph_ids) - len(text_ids) - 1\n if len(a) > length_a_b:\n a = a[:length_a_b]\n node_ids = node_ids[:length_a_b]\n edge_ids = edge_ids[:length_a_b]\n input_ids = add_bos_id + graph_ids + a + text_ids + [self.tokenizer.eos_token_id]\n input_node_ids = [-1] * (len(add_bos_id) + len(graph_ids)) + node_ids + [-1] * (len(text_ids) + 1)\n input_edge_ids = [-1] * (len(add_bos_id) + len(graph_ids)) + edge_ids + [-1] * (len(text_ids) + 1)\n attn_mask = [1] * len(input_ids) + [0] * (self.args.max_input_length - len(input_ids))\n input_ids += [self.tokenizer.pad_token_id] * (self.args.max_input_length - len(input_ids))\n input_node_ids += [-1] * (self.args.max_input_length - len(input_node_ids))\n input_edge_ids += [-1] * (self.args.max_input_length - len(input_edge_ids))\n assert len(input_ids) == len(attn_mask) == self.args.max_input_length == len(input_node_ids) == len(\n input_edge_ids)\n return input_ids, attn_mask, input_node_ids, input_edge_ids\n\n def ar_prep_data(self, questions, add_bos_id, graph_ids, text_ids, node_ids, edge_ids):\n input_ids, input_attn_mask, input_node_ids, input_edge_ids = self.truncate_pair_ar(questions, add_bos_id,\n graph_ids, text_ids,\n node_ids, edge_ids)\n\n return input_ids, input_attn_mask, input_node_ids, input_edge_ids\n \n\n\n def __getitem__(self, idx):\n\n entry = self.data[idx]\n\n entities = []\n for _ in entry['kbs']:\n entities.append(_)\n\n strings_label = []\n node_ids = []\n edge_ids = []\n strings_label_tokens = ''\n\n # mark_entity: entities with KB numbers which are important for this task\n # text_entity: entities without KB numbers but only with text, which are less important\n mark_entity = [entry['kbs'][ele_entity][0] for ele_entity in entities]\n mark_entity_number = entities\n text_entity, text_relation = self.get_all_entities_per_sample(mark_entity_number, mark_entity, entry)\n entity_change, relation_change = self.get_change_per_sample(mark_entity, text_entity, text_relation)\n total_entity = mark_entity + text_entity\n adj_matrix = [[-1] * (self.args.max_node_length + 1) for _ in range(self.args.max_node_length + 1)]\n\n cnt_edge = 0\n\n if 'title' in entry:\n entity = self.knowledge[entry['title_kb_id']]\n string_label, string_label_tokens, nodes, edges, cnt_edge, adj_matrix = self.linearize_v2(\n entity,\n entity_change,\n self.head_ids,\n self.rel_ids, self.tail_ids,\n relation_change, cnt_edge, adj_matrix)\n\n strings_label += string_label\n strings_label_tokens += string_label_tokens\n\n for i, entity_id in enumerate(entities):\n entity = entry['kbs'][entity_id]\n string_label, string_label_tokens, nodes, edges, cnt_edge, adj_matrix = self.linearize_v2(\n entity,\n entity_change,\n self.head_ids,\n self.rel_ids, self.tail_ids,\n relation_change, cnt_edge, adj_matrix)\n \n strings_label += string_label\n strings_label_tokens += string_label_tokens\n node_ids += nodes\n edge_ids += edges\n \n if self.topology['relation-relation']:\n adj_matrix = self.relation_to_relation_fill(entity_change, relation_change, adj_matrix)\n \n\n words_label_ids, words_label_tokens, words_input_ids, words_input_tokens = [], '', [], ''\n\n\n input_ids_ar, attn_mask_ar, input_node_ids_ar, input_edge_ids_ar = \\\n self.ar_prep_data(strings_label, self.add_bos_id, self.graph_ids,\n self.text_ids, node_ids, edge_ids)\n\n node_length_ar = max(input_node_ids_ar) + 1\n edge_length_ar = max(input_edge_ids_ar) + 1\n \n\n def masked_fill(src, masked_value, fill_value):\n return [src[src_id] if src[src_id] != masked_value and src[src_id] < fill_value else fill_value for src_id\n in range(len(src))]\n\n input_node_ids_ar, input_edge_ids_ar = masked_fill(input_node_ids_ar, -1, self.args.max_node_length), \\\n masked_fill(input_edge_ids_ar, -1, self.args.max_edge_length)\n\n def masked_fill_matrix(adj_matrix_input, masked_value, fill_value):\n adj_matrix_tmp = copy.deepcopy(adj_matrix_input)\n for a_id in range(len(adj_matrix_tmp)):\n for b_id in range(len(adj_matrix_tmp)):\n if adj_matrix_tmp[a_id][b_id] == masked_value or adj_matrix_tmp[a_id][b_id] > fill_value:\n adj_matrix_tmp[a_id][b_id] = fill_value\n return adj_matrix_tmp\n\n adj_matrix_ar = masked_fill_matrix(adj_matrix, -1, self.args.max_edge_length)\n\n assert len(input_ids_ar) == len(attn_mask_ar) == self.args.max_input_length == len(input_node_ids_ar) == len(\n input_edge_ids_ar)\n\n input_ids_ar = torch.LongTensor(input_ids_ar)\n attn_mask_ar = torch.LongTensor(attn_mask_ar)\n \n input_node_ids_ar = torch.LongTensor(input_node_ids_ar)\n input_edge_ids_ar = torch.LongTensor(input_edge_ids_ar)\n node_length_ar = torch.LongTensor([node_length_ar])\n edge_length_ar = torch.LongTensor([edge_length_ar])\n adj_matrix_ar = torch.LongTensor(adj_matrix_ar)\n \n return input_ids_ar, attn_mask_ar, input_node_ids_ar, node_length_ar, adj_matrix_ar" }, { "identifier": "evaluate_bleu", "path": "GAP/data_relations_as_nodes.py", "snippet": "def evaluate_bleu(data_ref, data_sys):\n coco_eval = run_coco_eval(data_ref, data_sys)\n scores = {metric: score for metric, score in list(coco_eval.eval.items())}\n return scores[\"Bleu_4\"]" }, { "identifier": "get_t_emb_dim", "path": "GAP/data_relations_as_nodes.py", "snippet": "def get_t_emb_dim(args):\n t_emb_dim = int(args.entity_entity)+int(args.entity_relation)\\\n +int(args.relation_entity)+int(args.relation_relation)+1\n return t_emb_dim" }, { "identifier": "GAPBartForConditionalGeneration", "path": "GAP/modeling_gap_type.py", "snippet": "class GAPBartForConditionalGeneration(BartForConditionalGeneration):\n def __init__(self, config, **kwargs):\n super().__init__(config)\n base_model = GAPBartModel(config,**kwargs)\n self.model = base_model\n self.register_buffer(\"final_logits_bias\", torch.zeros((1, self.model.shared.num_embeddings)))\n \n def forward(self, input_ids, attention_mask=None, encoder_outputs=None,\n decoder_input_ids=None, decoder_attention_mask=None, input_node_ids=None,\n node_length=None, adj_matrix=None, decoder_whole_ids=None, decoder_cached_states=None,\n use_cache=False, is_training=False):\n\n if is_training:\n _decoder_input_ids = shift_tokens_right(decoder_input_ids, self.config.pad_token_id)\n else:\n _decoder_input_ids = decoder_input_ids\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n encoder_outputs=encoder_outputs,\n decoder_input_ids=_decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n input_node_ids=input_node_ids,\n node_length=node_length,\n adj_matrix=adj_matrix,\n decoder_cached_states=decoder_cached_states,\n use_cache=use_cache,\n )\n lm_logits = F.linear(outputs[0], self.model.shared.weight, bias=self.final_logits_bias)\n if is_training:\n loss_fct = nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id)\n loss = loss_fct(lm_logits.view(-1, self.config.vocab_size),\n decoder_input_ids.view(-1))\n return loss\n return (lm_logits, ) + outputs[1:]\n\n @torch.no_grad()\n def generate(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n max_length: Optional[int] = None,\n min_length: Optional[int] = None,\n do_sample: Optional[bool] = None,\n early_stopping: Optional[bool] = None,\n num_beams: Optional[int] = None,\n temperature: Optional[float] = None,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n repetition_penalty: Optional[float] = None,\n bad_words_ids: Optional[Iterable[int]] = None,\n bos_token_id: Optional[int] = None,\n pad_token_id: Optional[int] = None,\n eos_token_id: Optional[int] = None,\n length_penalty: Optional[float] = None,\n no_repeat_ngram_size: Optional[int] = None,\n num_return_sequences: Optional[int] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n input_node_ids=None,\n node_length=None,\n adj_matrix=None,\n decoder_start_token_id: Optional[int] = None,\n use_cache: Optional[bool] = None,\n **model_specific_kwargs\n ) -> torch.LongTensor:\n r\"\"\" Generates sequences for models with a LM head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.\n\n Adapted in part from `Facebook's XLM beam search code`_.\n\n .. _`Facebook's XLM beam search code`:\n https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529\n\n\n Parameters:\n\n input_ids: (`optional`) `torch.LongTensor` of shape `(batch_size, sequence_length)`\n The sequence used as a prompt for the generation. If `None` the method initializes\n it as an empty `torch.LongTensor` of shape `(1,)`.\n\n max_length: (`optional`) int\n The max length of the sequence to be generated. Between `min_length` and infinity. Default to 20.\n\n min_length: (`optional`) int\n The min length of the sequence to be generated. Between 0 and infinity. Default to 0.\n\n do_sample: (`optional`) bool\n If set to `False` greedy decoding is used. Otherwise sampling is used. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.\n\n early_stopping: (`optional`) bool\n if set to `True` beam search is stopped when at least `num_beams` sentences finished per batch. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.\n\n num_beams: (`optional`) int\n Number of beams for beam search. Must be between 1 and infinity. 1 means no beam search. Default to 1.\n\n temperature: (`optional`) float\n The value used to module the next token probabilities. Must be strictly positive. Default to 1.0.\n\n top_k: (`optional`) int\n The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.\n\n top_p: (`optional`) float\n The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.\n\n repetition_penalty: (`optional`) float\n The parameter for repetition penalty. Between 1.0 and infinity. 1.0 means no penalty. Default to 1.0.\n\n pad_token_id: (`optional`) int\n Padding token. Default to specicic model pad_token_id or None if it does not exist.\n\n bos_token_id: (`optional`) int\n BOS token. Defaults to `bos_token_id` as defined in the models config.\n\n eos_token_id: (`optional`) int\n EOS token. Defaults to `eos_token_id` as defined in the models config.\n\n length_penalty: (`optional`) float\n Exponential penalty to the length. Default to 1.\n\n no_repeat_ngram_size: (`optional`) int\n If set to int > 0, all ngrams of size `no_repeat_ngram_size` can only occur once.\n bad_words_ids: (`optional`) list of lists of int\n `bad_words_ids` contains tokens that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use `tokenizer.encode(bad_word, add_prefix_space=True)`.\n\n num_return_sequences: (`optional`) int\n The number of independently computed returned sequences for each element in the batch. Default to 1.\n\n attention_mask (`optional`) obj: `torch.LongTensor` of same shape as `input_ids`\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n Defaults to `None`.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n\n decoder_start_token_id=None: (`optional`) int\n Start token id for the decoder. Defaults to ``decoder_start_token_id`` as defined the model's config or to the ``bos_token_id``\n if no ``decoder_start_token_id`` is found in the config.\n This is only relevant for encoder-decoder models.\n\n use_cache: (`optional`) bool\n If `use_cache` is True, past key values are used to speed up decoding if applicable to model. Defaults to `True`.\n\n model_specific_kwargs: (`optional`) dict\n Additional model specific kwargs will be forwarded to the `forward` function of the model.\n\n Return:\n\n output: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`\n sequence_length is either equal to max_length or shorter if all batches finished early due to the `eos_token_id`\n\n Examples::\n\n from transformers import AutoTokenizer, AutoModelForCausalLM\n\n tokenizer = AutoTokenizer. ('distilgpt2') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.\n outputs = model.generate(max_length=40) # do greedy decoding\n print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.\n input_context = 'The dog'\n input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context\n outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'\n for i in range(3): # 3 output sequences were generated\n print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.\n input_context = 'The dog'\n input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context\n outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3, do_sample=True) # 3 generate sequences using by sampling\n for i in range(3): # 3 output sequences were generated\n print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('ctrl') # Download model and configuration from S3 and cache.\n input_context = 'Legal My neighbor is' # \"Legal\" is one of the control codes for ctrl\n input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context\n outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences\n print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('gpt2') # Download model and configuration from S3 and cache.\n input_context = 'My cute dog' # \"Legal\" is one of the control codes for ctrl\n bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']]\n input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context\n outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated\n \"\"\"\n\n # We cannot generate if the model does not have a LM head\n if self.get_output_embeddings() is None:\n raise AttributeError(\n \"You tried to generate sequences with a model that does not have a LM Head.\"\n \"Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`, `XLMWithLMHeadModel`, `BartForConditionalGeneration` )\"\n )\n\n max_length = max_length if max_length is not None else self.config.max_length\n min_length = min_length if min_length is not None else self.config.min_length\n do_sample = do_sample if do_sample is not None else self.config.do_sample\n early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n num_beams = num_beams if num_beams is not None else self.config.num_beams\n temperature = temperature if temperature is not None else self.config.temperature\n top_k = top_k if top_k is not None else self.config.top_k\n top_p = top_p if top_p is not None else self.config.top_p\n repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty\n bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id\n pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id\n eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id\n length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty\n no_repeat_ngram_size = (\n no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size\n )\n bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids\n num_return_sequences = (\n num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences\n )\n decoder_start_token_id = (\n decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id\n )\n\n if input_ids is not None:\n batch_size = input_ids.shape[0] # overriden by the input batch_size\n else:\n batch_size = 1\n\n assert isinstance(max_length, int) and max_length > 0, \"`max_length` should be a strictly positive integer.\"\n assert isinstance(min_length, int) and min_length >= 0, \"`min_length` should be a positive integer.\"\n assert isinstance(do_sample, bool), \"`do_sample` should be a boolean.\"\n assert isinstance(early_stopping, bool), \"`early_stopping` should be a boolean.\"\n assert isinstance(use_cache, bool), \"`use_cache` should be a boolean.\"\n assert isinstance(num_beams, int) and num_beams > 0, \"`num_beams` should be a strictly positive integer.\"\n assert temperature > 0, \"`temperature` should be strictly positive.\"\n assert isinstance(top_k, int) and top_k >= 0, \"`top_k` should be a positive integer.\"\n assert 0 <= top_p <= 1, \"`top_p` should be between 0 and 1.\"\n assert repetition_penalty >= 1.0, \"`repetition_penalty` should be >= 1.\"\n assert input_ids is not None or (\n isinstance(bos_token_id, int) and bos_token_id >= 0\n ), \"If input_ids is not defined, `bos_token_id` should be a positive integer.\"\n assert pad_token_id is None or (\n isinstance(pad_token_id, int) and (pad_token_id >= 0)\n ), \"`pad_token_id` should be a positive integer.\"\n assert (eos_token_id is None) or (\n isinstance(eos_token_id, int) and (eos_token_id >= 0)\n ), \"`eos_token_id` should be a positive integer.\"\n assert length_penalty > 0, \"`length_penalty` should be strictly positive.\"\n assert (\n isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0\n ), \"`no_repeat_ngram_size` should be a positive integer.\"\n assert (\n isinstance(num_return_sequences, int) and num_return_sequences > 0\n ), \"`num_return_sequences` should be a strictly positive integer.\"\n assert (\n bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list)\n ), \"`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated\"\n\n if input_ids is None:\n assert isinstance(bos_token_id, int) and bos_token_id >= 0, (\n \"you should either supply a context to complete as `input_ids` input \"\n \"or a `bos_token_id` (integer >= 0) as a first token to start the generation.\"\n )\n input_ids = torch.full(\n (batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device,\n )\n else:\n assert input_ids.dim() == 2, \"Input prompt should be of shape (batch_size, sequence length).\"\n\n # not allow to duplicate outputs when greedy decoding\n if do_sample is False:\n if num_beams == 1:\n # no_beam_search greedy generation conditions\n assert (\n num_return_sequences == 1\n ), \"Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1\"\n\n else:\n # beam_search greedy generation conditions\n assert (\n num_beams >= num_return_sequences\n ), \"Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences\"\n\n # create attention mask if necessary\n # TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140\n if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids):\n attention_mask = input_ids.ne(pad_token_id).long()\n elif attention_mask is None:\n attention_mask = input_ids.new_ones(input_ids.shape)\n\n # set pad_token_id to eos_token_id if not set. Important that this is done after\n # attention_mask is created\n if pad_token_id is None and eos_token_id is not None:\n logger.warning(\n \"Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence\".format(eos_token_id)\n )\n pad_token_id = eos_token_id\n\n # current position and vocab size\n if hasattr(self.config, \"vocab_size\"):\n vocab_size = self.config.vocab_size\n elif (\n self.config.is_encoder_decoder\n and hasattr(self.config, \"decoder\")\n and hasattr(self.config.decoder, \"vocab_size\")\n ):\n vocab_size = self.config.decoder.vocab_size\n\n # set effective batch size and effective batch multiplier according to do_sample\n if do_sample:\n effective_batch_size = batch_size * num_return_sequences\n effective_batch_mult = num_return_sequences\n else:\n effective_batch_size = batch_size\n effective_batch_mult = 1\n\n if self.config.is_encoder_decoder:\n if decoder_start_token_id is None:\n decoder_start_token_id = bos_token_id\n\n assert (\n decoder_start_token_id is not None\n ), \"decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation\"\n assert hasattr(self, \"get_encoder\"), \"{} should have a 'get_encoder' function defined\".format(self)\n assert callable(self.get_encoder), \"{} should be a method\".format(self.get_encoder)\n\n # get encoder and store encoder outputs\n encoder = self.get_encoder()\n\n # add structural information when encoding\n encoder_outputs: tuple = encoder(input_ids, attention_mask=attention_mask, input_node_ids=input_node_ids,\n node_length=node_length, adj_matrix=adj_matrix)\n\n # Expand input ids if num_beams > 1 or num_return_sequences > 1\n if num_return_sequences > 1 or num_beams > 1:\n input_ids_len = input_ids.shape[-1]\n input_ids = input_ids.unsqueeze(1).expand(batch_size, effective_batch_mult * num_beams, input_ids_len)\n attention_mask = attention_mask.unsqueeze(1).expand(\n batch_size, effective_batch_mult * num_beams, input_ids_len\n )\n\n input_ids = input_ids.contiguous().view(\n effective_batch_size * num_beams, input_ids_len\n ) # shape: (batch_size * num_return_sequences * num_beams, cur_len)\n attention_mask = attention_mask.contiguous().view(\n effective_batch_size * num_beams, input_ids_len\n ) # shape: (batch_size * num_return_sequences * num_beams, cur_len)\n\n if self.config.is_encoder_decoder:\n # create empty decoder_input_ids\n input_ids = torch.full(\n (effective_batch_size * num_beams, 1),\n decoder_start_token_id,\n dtype=torch.long,\n device=next(self.parameters()).device,\n )\n cur_len = 1\n\n assert (\n batch_size == encoder_outputs[0].shape[0]\n ), f\"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} \"\n\n # expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1)\n expanded_batch_idxs = (\n torch.arange(batch_size)\n .view(-1, 1)\n .repeat(1, num_beams * effective_batch_mult)\n .view(-1)\n .to(input_ids.device)\n )\n # expand encoder_outputs\n encoder_outputs = (encoder_outputs[0].index_select(0, expanded_batch_idxs), *encoder_outputs[1:])\n\n else:\n encoder_outputs = None\n cur_len = input_ids.shape[-1]\n\n if num_beams > 1:\n output = self._generate_beam_search(\n input_ids,\n cur_len=cur_len,\n max_length=max_length,\n min_length=min_length,\n do_sample=do_sample,\n early_stopping=early_stopping,\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n repetition_penalty=repetition_penalty,\n no_repeat_ngram_size=no_repeat_ngram_size,\n bad_words_ids=bad_words_ids,\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n batch_size=effective_batch_size,\n num_return_sequences=num_return_sequences,\n length_penalty=length_penalty,\n num_beams=num_beams,\n vocab_size=vocab_size,\n encoder_outputs=encoder_outputs,\n attention_mask=attention_mask,\n use_cache=use_cache,\n model_specific_kwargs=model_specific_kwargs,\n )\n else:\n output = self._generate_no_beam_search(\n input_ids,\n cur_len=cur_len,\n max_length=max_length,\n min_length=min_length,\n do_sample=do_sample,\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n repetition_penalty=repetition_penalty,\n no_repeat_ngram_size=no_repeat_ngram_size,\n bad_words_ids=bad_words_ids,\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n batch_size=effective_batch_size,\n encoder_outputs=encoder_outputs,\n attention_mask=attention_mask,\n use_cache=use_cache,\n model_specific_kwargs=model_specific_kwargs,\n )\n\n return output" }, { "identifier": "GAPBartForConditionalGeneration", "path": "GAP/modeling_gap.py", "snippet": "class GAPBartForConditionalGeneration(BartForConditionalGeneration):\n def __init__(self, config):\n super().__init__(config)\n base_model = GAPBartModel(config)\n self.model = base_model\n self.register_buffer(\"final_logits_bias\", torch.zeros((1, self.model.shared.num_embeddings)))\n\n def forward(self, input_ids, attention_mask=None, encoder_outputs=None,\n decoder_input_ids=None, decoder_attention_mask=None, input_node_ids=None, \n node_length=None, adj_matrix=None, decoder_whole_ids=None, decoder_cached_states=None,\n use_cache=False, is_training=False):\n\n if is_training:\n _decoder_input_ids = shift_tokens_right(decoder_input_ids, self.config.pad_token_id)\n else:\n _decoder_input_ids = decoder_input_ids\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n encoder_outputs=encoder_outputs,\n decoder_input_ids=_decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n input_node_ids=input_node_ids,\n node_length=node_length,\n adj_matrix=adj_matrix,\n decoder_cached_states=decoder_cached_states,\n use_cache=use_cache,\n )\n lm_logits = F.linear(outputs[0], self.model.shared.weight, bias=self.final_logits_bias)\n if is_training:\n loss_fct = nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id)\n loss = loss_fct(lm_logits.view(-1, self.config.vocab_size),\n decoder_input_ids.view(-1))\n return loss\n return (lm_logits, ) + outputs[1:]\n\n @torch.no_grad()\n def generate(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n max_length: Optional[int] = None,\n min_length: Optional[int] = None,\n do_sample: Optional[bool] = None,\n early_stopping: Optional[bool] = None,\n num_beams: Optional[int] = None,\n temperature: Optional[float] = None,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n repetition_penalty: Optional[float] = None,\n bad_words_ids: Optional[Iterable[int]] = None,\n bos_token_id: Optional[int] = None,\n pad_token_id: Optional[int] = None,\n eos_token_id: Optional[int] = None,\n length_penalty: Optional[float] = None,\n no_repeat_ngram_size: Optional[int] = None,\n num_return_sequences: Optional[int] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n input_node_ids=None,\n node_length=None,\n adj_matrix=None,\n decoder_start_token_id: Optional[int] = None,\n use_cache: Optional[bool] = None,\n **model_specific_kwargs\n ) -> torch.LongTensor:\n r\"\"\" Generates sequences for models with a LM head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.\n\n Adapted in part from `Facebook's XLM beam search code`_.\n\n .. _`Facebook's XLM beam search code`:\n https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529\n\n\n Parameters:\n\n input_ids: (`optional`) `torch.LongTensor` of shape `(batch_size, sequence_length)`\n The sequence used as a prompt for the generation. If `None` the method initializes\n it as an empty `torch.LongTensor` of shape `(1,)`.\n\n max_length: (`optional`) int\n The max length of the sequence to be generated. Between `min_length` and infinity. Default to 20.\n\n min_length: (`optional`) int\n The min length of the sequence to be generated. Between 0 and infinity. Default to 0.\n\n do_sample: (`optional`) bool\n If set to `False` greedy decoding is used. Otherwise sampling is used. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.\n\n early_stopping: (`optional`) bool\n if set to `True` beam search is stopped when at least `num_beams` sentences finished per batch. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.\n\n num_beams: (`optional`) int\n Number of beams for beam search. Must be between 1 and infinity. 1 means no beam search. Default to 1.\n\n temperature: (`optional`) float\n The value used to module the next token probabilities. Must be strictly positive. Default to 1.0.\n\n top_k: (`optional`) int\n The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.\n\n top_p: (`optional`) float\n The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.\n\n repetition_penalty: (`optional`) float\n The parameter for repetition penalty. Between 1.0 and infinity. 1.0 means no penalty. Default to 1.0.\n\n pad_token_id: (`optional`) int\n Padding token. Default to specicic model pad_token_id or None if it does not exist.\n\n bos_token_id: (`optional`) int\n BOS token. Defaults to `bos_token_id` as defined in the models config.\n\n eos_token_id: (`optional`) int\n EOS token. Defaults to `eos_token_id` as defined in the models config.\n\n length_penalty: (`optional`) float\n Exponential penalty to the length. Default to 1.\n\n no_repeat_ngram_size: (`optional`) int\n If set to int > 0, all ngrams of size `no_repeat_ngram_size` can only occur once.\n bad_words_ids: (`optional`) list of lists of int\n `bad_words_ids` contains tokens that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use `tokenizer.encode(bad_word, add_prefix_space=True)`.\n\n num_return_sequences: (`optional`) int\n The number of independently computed returned sequences for each element in the batch. Default to 1.\n\n attention_mask (`optional`) obj: `torch.LongTensor` of same shape as `input_ids`\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n Defaults to `None`.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n\n decoder_start_token_id=None: (`optional`) int\n Start token id for the decoder. Defaults to ``decoder_start_token_id`` as defined the model's config or to the ``bos_token_id``\n if no ``decoder_start_token_id`` is found in the config.\n This is only relevant for encoder-decoder models.\n\n use_cache: (`optional`) bool\n If `use_cache` is True, past key values are used to speed up decoding if applicable to model. Defaults to `True`.\n\n model_specific_kwargs: (`optional`) dict\n Additional model specific kwargs will be forwarded to the `forward` function of the model.\n\n Return:\n\n output: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`\n sequence_length is either equal to max_length or shorter if all batches finished early due to the `eos_token_id`\n\n Examples::\n\n from transformers import AutoTokenizer, AutoModelForCausalLM\n\n tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.\n outputs = model.generate(max_length=40) # do greedy decoding\n print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.\n input_context = 'The dog'\n input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context\n outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'\n for i in range(3): # 3 output sequences were generated\n print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.\n input_context = 'The dog'\n input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context\n outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3, do_sample=True) # 3 generate sequences using by sampling\n for i in range(3): # 3 output sequences were generated\n print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('ctrl') # Download model and configuration from S3 and cache.\n input_context = 'Legal My neighbor is' # \"Legal\" is one of the control codes for ctrl\n input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context\n outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences\n print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('gpt2') # Download model and configuration from S3 and cache.\n input_context = 'My cute dog' # \"Legal\" is one of the control codes for ctrl\n bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']]\n input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context\n outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated\n \"\"\"\n\n # We cannot generate if the model does not have a LM head\n if self.get_output_embeddings() is None:\n raise AttributeError(\n \"You tried to generate sequences with a model that does not have a LM Head.\"\n \"Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`, `XLMWithLMHeadModel`, `BartForConditionalGeneration` )\"\n )\n\n max_length = max_length if max_length is not None else self.config.max_length\n min_length = min_length if min_length is not None else self.config.min_length\n do_sample = do_sample if do_sample is not None else self.config.do_sample\n early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n num_beams = num_beams if num_beams is not None else self.config.num_beams\n temperature = temperature if temperature is not None else self.config.temperature\n top_k = top_k if top_k is not None else self.config.top_k\n top_p = top_p if top_p is not None else self.config.top_p\n repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty\n bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id\n pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id\n eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id\n length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty\n no_repeat_ngram_size = (\n no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size\n )\n bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids\n num_return_sequences = (\n num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences\n )\n decoder_start_token_id = (\n decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id\n )\n\n if input_ids is not None:\n batch_size = input_ids.shape[0] # overriden by the input batch_size\n else:\n batch_size = 1\n\n assert isinstance(max_length, int) and max_length > 0, \"`max_length` should be a strictly positive integer.\"\n assert isinstance(min_length, int) and min_length >= 0, \"`min_length` should be a positive integer.\"\n assert isinstance(do_sample, bool), \"`do_sample` should be a boolean.\"\n assert isinstance(early_stopping, bool), \"`early_stopping` should be a boolean.\"\n assert isinstance(use_cache, bool), \"`use_cache` should be a boolean.\"\n assert isinstance(num_beams, int) and num_beams > 0, \"`num_beams` should be a strictly positive integer.\"\n assert temperature > 0, \"`temperature` should be strictly positive.\"\n assert isinstance(top_k, int) and top_k >= 0, \"`top_k` should be a positive integer.\"\n assert 0 <= top_p <= 1, \"`top_p` should be between 0 and 1.\"\n assert repetition_penalty >= 1.0, \"`repetition_penalty` should be >= 1.\"\n assert input_ids is not None or (\n isinstance(bos_token_id, int) and bos_token_id >= 0\n ), \"If input_ids is not defined, `bos_token_id` should be a positive integer.\"\n assert pad_token_id is None or (\n isinstance(pad_token_id, int) and (pad_token_id >= 0)\n ), \"`pad_token_id` should be a positive integer.\"\n assert (eos_token_id is None) or (\n isinstance(eos_token_id, int) and (eos_token_id >= 0)\n ), \"`eos_token_id` should be a positive integer.\"\n assert length_penalty > 0, \"`length_penalty` should be strictly positive.\"\n assert (\n isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0\n ), \"`no_repeat_ngram_size` should be a positive integer.\"\n assert (\n isinstance(num_return_sequences, int) and num_return_sequences > 0\n ), \"`num_return_sequences` should be a strictly positive integer.\"\n assert (\n bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list)\n ), \"`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated\"\n\n if input_ids is None:\n assert isinstance(bos_token_id, int) and bos_token_id >= 0, (\n \"you should either supply a context to complete as `input_ids` input \"\n \"or a `bos_token_id` (integer >= 0) as a first token to start the generation.\"\n )\n input_ids = torch.full(\n (batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device,\n )\n else:\n assert input_ids.dim() == 2, \"Input prompt should be of shape (batch_size, sequence length).\"\n\n # not allow to duplicate outputs when greedy decoding\n if do_sample is False:\n if num_beams == 1:\n # no_beam_search greedy generation conditions\n assert (\n num_return_sequences == 1\n ), \"Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1\"\n\n else:\n # beam_search greedy generation conditions\n assert (\n num_beams >= num_return_sequences\n ), \"Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences\"\n\n # create attention mask if necessary\n # TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140\n if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids):\n attention_mask = input_ids.ne(pad_token_id).long()\n elif attention_mask is None:\n attention_mask = input_ids.new_ones(input_ids.shape)\n\n # set pad_token_id to eos_token_id if not set. Important that this is done after\n # attention_mask is created\n if pad_token_id is None and eos_token_id is not None:\n logger.warning(\n \"Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence\".format(eos_token_id)\n )\n pad_token_id = eos_token_id\n\n # current position and vocab size\n if hasattr(self.config, \"vocab_size\"):\n vocab_size = self.config.vocab_size\n elif (\n self.config.is_encoder_decoder\n and hasattr(self.config, \"decoder\")\n and hasattr(self.config.decoder, \"vocab_size\")\n ):\n vocab_size = self.config.decoder.vocab_size\n\n # set effective batch size and effective batch multiplier according to do_sample\n if do_sample:\n effective_batch_size = batch_size * num_return_sequences\n effective_batch_mult = num_return_sequences\n else:\n effective_batch_size = batch_size\n effective_batch_mult = 1\n\n if self.config.is_encoder_decoder:\n if decoder_start_token_id is None:\n decoder_start_token_id = bos_token_id\n\n assert (\n decoder_start_token_id is not None\n ), \"decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation\"\n assert hasattr(self, \"get_encoder\"), \"{} should have a 'get_encoder' function defined\".format(self)\n assert callable(self.get_encoder), \"{} should be a method\".format(self.get_encoder)\n\n # get encoder and store encoder outputs\n encoder = self.get_encoder()\n\n # add structural information when encoding\n encoder_outputs: tuple = encoder(input_ids, attention_mask=attention_mask, input_node_ids=input_node_ids,\n node_length=node_length, adj_matrix=adj_matrix)\n\n # Expand input ids if num_beams > 1 or num_return_sequences > 1\n if num_return_sequences > 1 or num_beams > 1:\n input_ids_len = input_ids.shape[-1]\n input_ids = input_ids.unsqueeze(1).expand(batch_size, effective_batch_mult * num_beams, input_ids_len)\n attention_mask = attention_mask.unsqueeze(1).expand(\n batch_size, effective_batch_mult * num_beams, input_ids_len\n )\n\n input_ids = input_ids.contiguous().view(\n effective_batch_size * num_beams, input_ids_len\n ) # shape: (batch_size * num_return_sequences * num_beams, cur_len)\n attention_mask = attention_mask.contiguous().view(\n effective_batch_size * num_beams, input_ids_len\n ) # shape: (batch_size * num_return_sequences * num_beams, cur_len)\n\n if self.config.is_encoder_decoder:\n # create empty decoder_input_ids\n input_ids = torch.full(\n (effective_batch_size * num_beams, 1),\n decoder_start_token_id,\n dtype=torch.long,\n device=next(self.parameters()).device,\n )\n cur_len = 1\n\n assert (\n batch_size == encoder_outputs[0].shape[0]\n ), f\"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} \"\n\n # expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1)\n expanded_batch_idxs = (\n torch.arange(batch_size)\n .view(-1, 1)\n .repeat(1, num_beams * effective_batch_mult)\n .view(-1)\n .to(input_ids.device)\n )\n # expand encoder_outputs\n encoder_outputs = (encoder_outputs[0].index_select(0, expanded_batch_idxs), *encoder_outputs[1:])\n\n else:\n encoder_outputs = None\n cur_len = input_ids.shape[-1]\n\n if num_beams > 1:\n output = self._generate_beam_search(\n input_ids,\n cur_len=cur_len,\n max_length=max_length,\n min_length=min_length,\n do_sample=do_sample,\n early_stopping=early_stopping,\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n repetition_penalty=repetition_penalty,\n no_repeat_ngram_size=no_repeat_ngram_size,\n bad_words_ids=bad_words_ids,\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n batch_size=effective_batch_size,\n num_return_sequences=num_return_sequences,\n length_penalty=length_penalty,\n num_beams=num_beams,\n vocab_size=vocab_size,\n encoder_outputs=encoder_outputs,\n attention_mask=attention_mask,\n use_cache=use_cache,\n model_specific_kwargs=model_specific_kwargs,\n )\n else:\n output = self._generate_no_beam_search(\n input_ids,\n cur_len=cur_len,\n max_length=max_length,\n min_length=min_length,\n do_sample=do_sample,\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n repetition_penalty=repetition_penalty,\n no_repeat_ngram_size=no_repeat_ngram_size,\n bad_words_ids=bad_words_ids,\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n batch_size=effective_batch_size,\n encoder_outputs=encoder_outputs,\n attention_mask=attention_mask,\n use_cache=use_cache,\n model_specific_kwargs=model_specific_kwargs,\n )\n\n return output" } ]
import os import json import numpy as np import pandas as pd import torch import random from collections import defaultdict from transformers import BartTokenizer, T5Tokenizer from transformers import AdamW, get_linear_schedule_with_warmup from utils import * from scoring.fluency_scorer import FluencyScorer from scoring.saliency_scorer import SaliencyBERTScore from scoring.simplicity_scorer import SimplicityTextScore from scoring.guardrails import * from scoring.aggregate_scorer import ScorerWrapper from GAP.data_relations_as_nodes import GAPDataloader, EventDataset, WebNLGDataset from GAP.data_relations_as_nodes import evaluate_bleu, get_t_emb_dim from tqdm import tqdm, trange from rake_nltk import Rake from evaluate import load from sentence_similarity import sentence_similarity from GAP.modeling_gap_type import GAPBartForConditionalGeneration as GAP_Type_model from GAP.modeling_gap import GAPBartForConditionalGeneration as GAP_model
21,336
# import yake bertscore = load("bertscore") ## sentence model for merge phrase_model = sentence_similarity(model_name='distilbert-base-uncased',embedding_type='cls_token_embedding') ## for sentence checking ner_check = NERInaccuracyPenalty() def run(args, logger): #load in model for graph-to-text and tokenizer checkpoint = args.model_path tokenizer_path = args.tokenizer_path tokenizer = BartTokenizer.from_pretrained(tokenizer_path) n_gpu = torch.cuda.device_count() if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if args.type_encoding: t_emb_dim = get_t_emb_dim(args)
# import yake bertscore = load("bertscore") ## sentence model for merge phrase_model = sentence_similarity(model_name='distilbert-base-uncased',embedding_type='cls_token_embedding') ## for sentence checking ner_check = NERInaccuracyPenalty() def run(args, logger): #load in model for graph-to-text and tokenizer checkpoint = args.model_path tokenizer_path = args.tokenizer_path tokenizer = BartTokenizer.from_pretrained(tokenizer_path) n_gpu = torch.cuda.device_count() if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if args.type_encoding: t_emb_dim = get_t_emb_dim(args)
model = GAP_Type_model.from_pretrained(checkpoint,t_emb_dim=t_emb_dim)
8
2023-10-24 13:24:23+00:00
24k
ForceFledgling/proxyhub
proxyhub/api.py
[ { "identifier": "Checker", "path": "proxyhub/checker.py", "snippet": "class Checker:\n \"\"\"Proxy checker.\"\"\"\n\n def __init__(\n self,\n judges,\n max_tries=3,\n timeout=8,\n verify_ssl=False,\n strict=False,\n dnsbl=None,\n real_ext_ip=None,\n types=None,\n post=False,\n loop=None,\n ):\n Judge.clear()\n self._judges = get_judges(judges, timeout, verify_ssl)\n self._method = 'POST' if post else 'GET'\n self._max_tries = max_tries\n self._real_ext_ip = real_ext_ip\n self._strict = strict\n self._dnsbl = dnsbl or []\n self._types = types or {}\n self._loop = loop or asyncio.get_event_loop()\n self._resolver = Resolver(loop=self._loop)\n\n self._req_http_proto = not types or bool(\n ('HTTP', 'CONNECT:80', 'SOCKS4', 'SOCKS5') & types.keys()\n )\n self._req_https_proto = not types or bool(('HTTPS',) & types.keys())\n self._req_smtp_proto = not types or bool(('CONNECT:25',) & types.keys()) # noqa\n\n self._ngtrs = {proto for proto in types or NGTRS}\n\n async def check_judges(self):\n # TODO: need refactoring\n log.debug('Start check judges')\n stime = time.time()\n await asyncio.gather(\n *[j.check(real_ext_ip=self._real_ext_ip) for j in self._judges]\n )\n\n self._judges = [j for j in self._judges if j.is_working]\n log.debug(\n '%d judges added. Runtime: %.4f;' % (len(self._judges), time.time() - stime)\n )\n\n nojudges = []\n disable_protocols = []\n\n if len(Judge.available['HTTP']) == 0:\n nojudges.append('HTTP')\n disable_protocols.extend(['HTTP', 'CONNECT:80', 'SOCKS4', 'SOCKS5'])\n self._req_http_proto = False\n # for coroutines, which is already waiting\n Judge.ev['HTTP'].set()\n if len(Judge.available['HTTPS']) == 0:\n nojudges.append('HTTPS')\n disable_protocols.append('HTTPS')\n self._req_https_proto = False\n # for coroutines, which is already waiting\n Judge.ev['HTTPS'].set()\n if len(Judge.available['SMTP']) == 0:\n # nojudges.append('SMTP')\n disable_protocols.append('SMTP')\n self._req_smtp_proto = False\n # for coroutines, which is already waiting\n Judge.ev['SMTP'].set()\n\n for proto in disable_protocols:\n if proto in self._ngtrs:\n self._ngtrs.remove(proto)\n\n if nojudges:\n warnings.warn(\n 'Not found judges for the {nojudges} protocol.\\n'\n 'Checking proxy on protocols {disp} is disabled.'.format(\n nojudges=nojudges, disp=disable_protocols\n ),\n UserWarning,\n )\n if self._judges:\n log.debug('Loaded: %d proxy judges' % len(set(self._judges)))\n else:\n RuntimeError('Not found judges')\n\n def _types_passed(self, proxy):\n if not self._types:\n return True\n for proto, lvl in proxy.types.copy().items():\n req_levels = self._types.get(proto)\n if not req_levels or (lvl in req_levels):\n if not self._strict:\n return True\n else:\n if self._strict:\n del proxy.types[proto]\n if self._strict and proxy.types:\n return True\n proxy.log('Protocol or the level of anonymity differs from the requested')\n return False\n\n async def _in_DNSBL(self, host):\n _host = '.'.join(reversed(host.split('.'))) # reverse address\n tasks = []\n for domain in self._dnsbl:\n query = '.'.join([_host, domain])\n tasks.append(self._resolver.resolve(query, logging=False))\n responses = await asyncio.gather(*tasks, return_exceptions=True)\n if any([r for r in responses if not isinstance(r, ResolveError)]):\n return True\n return False\n\n async def check(self, proxy):\n if self._dnsbl:\n if await self._in_DNSBL(proxy.host):\n proxy.log('Found in DNSBL')\n return False\n\n if self._req_http_proto:\n await Judge.ev['HTTP'].wait()\n if self._req_https_proto:\n await Judge.ev['HTTPS'].wait()\n if self._req_smtp_proto:\n await Judge.ev['SMTP'].wait()\n\n if proxy.expected_types:\n ngtrs = proxy.expected_types & self._ngtrs\n else:\n ngtrs = self._ngtrs\n\n results = []\n for proto in ngtrs:\n if proto == 'CONNECT:25':\n result = await self._check_conn_25(proxy, proto)\n else:\n result = await self._check(proxy, proto)\n results.append(result)\n\n proxy.is_working = True if any(results) else False\n\n if proxy.is_working and self._types_passed(proxy):\n return True\n return False\n\n async def _check_conn_25(self, proxy, proto):\n judge = Judge.get_random(proto)\n proxy.log('Selected judge: %s' % judge)\n result = False\n for attempt in range(self._max_tries):\n try:\n proxy.ngtr = proto\n await proxy.connect()\n await proxy.ngtr.negotiate(host=judge.host, ip=judge.ip)\n except ProxyTimeoutError:\n continue\n except (\n ProxyConnError,\n ProxyRecvError,\n ProxySendError,\n ProxyEmptyRecvError,\n BadStatusError,\n BadResponseError,\n ):\n break\n else:\n proxy.types[proxy.ngtr.name] = None\n result = True\n break\n finally:\n proxy.close()\n return result\n\n async def _check(self, proxy, proto):\n judge = Judge.get_random(proto)\n proxy.log('Selected judge: %s' % judge)\n result = False\n for attempt in range(self._max_tries):\n try:\n proxy.ngtr = proto\n await proxy.connect()\n await proxy.ngtr.negotiate(host=judge.host, ip=judge.ip)\n headers, content, rv = await _send_test_request(\n self._method, proxy, judge\n )\n except ProxyTimeoutError:\n continue\n except (\n ProxyConnError,\n ProxyRecvError,\n ProxySendError,\n ProxyEmptyRecvError,\n BadStatusError,\n BadResponseError,\n ):\n break\n else:\n content = _decompress_content(headers, content)\n result = _check_test_response(proxy, headers, content, rv)\n if result:\n if proxy.ngtr.check_anon_lvl:\n lvl = _get_anonymity_lvl(\n self._real_ext_ip, proxy, judge, content\n )\n else:\n lvl = None\n proxy.types[proxy.ngtr.name] = lvl\n break\n finally:\n proxy.close()\n return result" }, { "identifier": "ResolveError", "path": "proxyhub/errors.py", "snippet": "class ResolveError(Exception):\n pass" }, { "identifier": "PROVIDERS", "path": "proxyhub/providers.py", "snippet": "PROVIDERS = [\n Provider(\n url='http://www.proxylists.net/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 49\n Provider(\n url='https://api.proxyscrape.com/?request=getproxies&proxytype=http',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # added by ZerGo0\n Provider(\n url='https://api.proxyscrape.com/?request=getproxies&proxytype=socks4',\n proto=('SOCKS4'),\n ), # added by ZerGo0\n Provider(\n url='https://api.proxyscrape.com/?request=getproxies&proxytype=socks5',\n proto=('SOCKS5'),\n ), # added by ZerGo0\n Provider(\n url='http://ipaddress.com/proxy-list/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 53\n Provider(\n url='https://www.sslproxies.org/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 100\n Provider(\n url='https://freshfreeproxylist.wordpress.com/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 50\n Provider(\n url='http://proxytime.ru/http',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 1400\n Provider(\n url='https://free-proxy-list.net/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 300\n Provider(\n url='https://us-proxy.org/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 200\n Provider(\n url='http://fineproxy.org/eng/fresh-proxies/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 5500\n Provider(url='https://socks-proxy.net/', proto=('SOCKS4', 'SOCKS5')), # 80\n Provider(\n url='http://www.httptunnel.ge/ProxyListForFree.aspx',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 200\n Provider(\n url='http://cn-proxy.com/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 70\n Provider(\n url='https://hugeproxies.com/home/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 800\n Provider(\n url='http://proxy.rufey.ru/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 153\n Provider(\n url='https://geekelectronics.org/my-servisy/proxy',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 400\n Provider(\n url='http://pubproxy.com/api/proxy?limit=20&format=txt',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 20\n Proxy_list_org(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 140\n Xseo_in(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 240\n Spys_ru(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 660\n Proxylistplus_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 450\n Proxylist_me(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 2872\n Foxtools_ru(\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'), max_conn=1\n ), # noqa; 500\n Gatherproxy_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 3212\n Nntime_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 1050\n Blogspot_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 24800\n Gatherproxy_com_socks(proto=('SOCKS4', 'SOCKS5')), # noqa; 30\n Blogspot_com_socks(proto=('SOCKS4', 'SOCKS5')), # noqa; 1486\n Tools_rosinstrument_com(\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')\n ), # noqa; 4000\n Tools_rosinstrument_com_socks(proto=('SOCKS4', 'SOCKS5')), # noqa; 1800\n My_proxy_com(max_conn=2), # noqa; 1000\n Checkerproxy_net(), # noqa; 60000\n Aliveproxy_com(), # noqa; 210\n Freeproxylists_com(), # noqa; 1338\n Webanetlabs_net(), # noqa; 5000\n Maxiproxies_com(), # noqa; 430\n Proxylist_download(), # noqa; 35590\n # # Bad...\n # http://www.proxylist.ro/\n # Provider(url='http://proxydb.net/',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS',\n # 'CONNECT:25', 'SOCKS4', 'SOCKS5')),\n # Provider(url='http://www.cybersyndrome.net/pla6.html',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 1100\n # Provider(url='https://www.ip-adress.com/proxy-list',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 57\n # Provider(url='https://www.marcosbl.com/lab/proxies/',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 89\n # Provider(url='http://go4free.xyz/Free-Proxy/',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 196\n # Provider(url='http://blackstarsecurity.com/proxy-list.txt'), # 7014\n # Provider(url='http://www.get-proxy.net/proxy-archives'), # 519\n # Proxyb_net(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 857\n # Proxz_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n # max_conn=2), # 443\n # Proxynova_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 818\n # _50kproxies_com(), # 822\n # Free_proxy_cz(), # 420\n]" }, { "identifier": "Provider", "path": "proxyhub/providers.py", "snippet": "class Provider:\n \"\"\"Proxy provider.\n\n Provider - a website that publish free public proxy lists.\n\n :param str url: Url of page where to find proxies\n :param tuple proto:\n (optional) List of the types (protocols) that may be supported\n by proxies returned by the provider. Then used as :attr:`Proxy.types`\n :param int max_conn:\n (optional) The maximum number of concurrent connections on the provider\n :param int max_tries:\n (optional) The maximum number of attempts to receive response\n :param int timeout:\n (optional) Timeout of a request in seconds\n \"\"\"\n\n _pattern = IPPortPatternGlobal\n\n def __init__(\n self, url=None, proto=(), max_conn=4, max_tries=3, timeout=20, loop=None\n ):\n if url:\n self.domain = urlparse(url).netloc\n self.url = url\n self.proto = proto\n self._max_tries = max_tries\n self._timeout = timeout\n self._session = None\n self._cookies = {}\n self._proxies = set()\n # concurrent connections on the current provider\n self._sem_provider = asyncio.Semaphore(max_conn)\n self._loop = loop or asyncio.get_event_loop()\n\n @property\n def proxies(self):\n \"\"\"Return all found proxies.\n\n :return:\n Set of tuples with proxy hosts, ports and types (protocols)\n that may be supported (from :attr:`.proto`).\n\n For example:\n {('192.168.0.1', '80', ('HTTP', 'HTTPS'), ...)}\n\n :rtype: set\n \"\"\"\n return self._proxies\n\n @proxies.setter\n def proxies(self, new):\n new = [(host, port, self.proto) for host, port in new if port]\n self._proxies.update(new)\n\n async def get_proxies(self):\n \"\"\"Receive proxies from the provider and return them.\n\n :return: :attr:`.proxies`\n \"\"\"\n log.debug('Try to get proxies from %s' % self.domain)\n\n async with aiohttp.ClientSession(\n headers=get_headers(), cookies=self._cookies, loop=self._loop\n ) as self._session:\n await self._pipe()\n\n log.debug(\n '%d proxies received from %s: %s'\n % (len(self.proxies), self.domain, self.proxies)\n )\n return self.proxies\n\n async def _pipe(self):\n await self._find_on_page(self.url)\n\n async def _find_on_pages(self, urls):\n if not urls:\n return\n tasks = []\n if not isinstance(urls[0], dict):\n urls = set(urls)\n for url in urls:\n if isinstance(url, dict):\n tasks.append(self._find_on_page(**url))\n else:\n tasks.append(self._find_on_page(url))\n await asyncio.gather(*tasks)\n\n async def _find_on_page(self, url, data=None, headers=None, method='GET'):\n page = await self.get(url, data=data, headers=headers, method=method)\n oldcount = len(self.proxies)\n try:\n received = self.find_proxies(page)\n except Exception as e:\n received = []\n log.error(\n 'Error when executing find_proxies.'\n 'Domain: %s; Error: %r' % (self.domain, e)\n )\n self.proxies = received\n added = len(self.proxies) - oldcount\n log.debug(\n '%d(%d) proxies added(received) from %s' % (added, len(received), url)\n )\n\n async def get(self, url, data=None, headers=None, method='GET'):\n for _ in range(self._max_tries):\n page = await self._get(url, data=data, headers=headers, method=method)\n if page:\n break\n return page\n\n async def _get(self, url, data=None, headers=None, method='GET'):\n page = ''\n try:\n timeout = aiohttp.ClientTimeout(total=self._timeout)\n async with self._sem_provider, self._session.request(\n method, url, data=data, headers=headers, timeout=timeout\n ) as resp:\n page = await resp.text()\n if resp.status != 200:\n log.debug(\n 'url: %s\\nheaders: %s\\ncookies: %s\\npage:\\n%s'\n % (url, resp.headers, resp.cookies, page)\n )\n raise BadStatusError('Status: %s' % resp.status)\n except (\n UnicodeDecodeError,\n BadStatusError,\n asyncio.TimeoutError,\n aiohttp.ClientOSError,\n aiohttp.ClientResponseError,\n aiohttp.ServerDisconnectedError,\n ) as e:\n page = ''\n log.debug('%s is failed. Error: %r;' % (url, e))\n return page\n\n def find_proxies(self, page):\n return self._find_proxies(page)\n\n def _find_proxies(self, page):\n proxies = self._pattern.findall(page)\n return proxies" }, { "identifier": "Proxy", "path": "proxyhub/proxy.py", "snippet": "class Proxy:\n \"\"\"Proxy.\n\n :param str host: IP address of the proxy\n :param int port: Port of the proxy\n :param tuple types:\n (optional) List of types (protocols) which may be supported\n by the proxy and which can be checked to work with the proxy\n :param int timeout:\n (optional) Timeout of a connection and receive a response in seconds\n :param bool verify_ssl:\n (optional) Flag indicating whether to check the SSL certificates.\n Set to True to check ssl certifications\n\n :raises ValueError: If the host not is IP address, or if the port > 65535\n \"\"\"\n\n @classmethod\n async def create(cls, host, *args, **kwargs):\n \"\"\"Asynchronously create a :class:`Proxy` object.\n\n :param str host: A passed host can be a domain or IP address.\n If the host is a domain, try to resolve it\n :param str *args:\n (optional) Positional arguments that :class:`Proxy` takes\n :param str **kwargs:\n (optional) Keyword arguments that :class:`Proxy` takes\n\n :return: :class:`Proxy` object\n :rtype: proxyhub.Proxy\n\n :raises ResolveError: If could not resolve the host\n :raises ValueError: If the port > 65535\n \"\"\" # noqa: W605\n loop = kwargs.pop('loop', None)\n resolver = kwargs.pop('resolver', Resolver(loop=loop))\n try:\n _host = await resolver.resolve(host)\n self = cls(_host, *args, **kwargs)\n except (ResolveError, ValueError) as e:\n log.error('%s:%s: Error at creating: %s' % (host, args[0], e))\n raise\n return self\n\n def __init__(self, host=None, port=None, types=(), timeout=8, verify_ssl=False):\n self.host = host\n if not Resolver.host_is_ip(self.host):\n raise ValueError(\n 'The host of proxy should be the IP address. '\n 'Try Proxy.create() if the host is a domain'\n )\n\n self.port = int(port)\n if self.port > 65535:\n raise ValueError('The port of proxy cannot be greater than 65535')\n\n self.expected_types = set(types) & {\n 'HTTP',\n 'HTTPS',\n 'CONNECT:80',\n 'CONNECT:25',\n 'SOCKS4',\n 'SOCKS5',\n }\n self._timeout = timeout\n self._ssl_context = True if verify_ssl else _ssl._create_unverified_context()\n self._types = {}\n self._is_working = False\n self.stat = {'requests': 0, 'errors': Counter()}\n self._ngtr = None\n self._geo = Resolver.get_ip_info(self.host)\n self._log = []\n self._runtimes = []\n self._schemes = ()\n self._closed = True\n self._reader = {'conn': None, 'ssl': None}\n self._writer = {'conn': None, 'ssl': None}\n\n def __repr__(self):\n \"\"\"Class representation\n e.g. <Proxy US 1.12 [HTTP: Anonymous, HTTPS] 10.0.0.1:8080>\n \"\"\"\n tpinfo = []\n order = lambda tp_lvl: (len(tp_lvl[0]), tp_lvl[0][-1]) # noqa: 731\n for tp, lvl in sorted(self.types.items(), key=order):\n s = '{tp}: {lvl}' if lvl else '{tp}'\n s = s.format(tp=tp, lvl=lvl)\n tpinfo.append(s)\n tpinfo = ', '.join(tpinfo)\n return '<Proxy {code} {avg:.2f}s [{types}] {host}:{port}>'.format(\n code=self._geo.code,\n types=tpinfo,\n host=self.host,\n port=self.port,\n avg=self.avg_resp_time,\n )\n\n @property\n def types(self):\n \"\"\"Types (protocols) supported by the proxy.\n\n | Where key is type, value is level of anonymity\n (only for HTTP, for other types level always is None).\n | Available types: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25\n | Available levels: Transparent, Anonymous, High.\n\n :rtype: dict\n \"\"\"\n return self._types\n\n @property\n def is_working(self):\n \"\"\"True if the proxy is working, False otherwise.\n\n :rtype: bool\n \"\"\"\n return self._is_working\n\n @is_working.setter\n def is_working(self, val):\n self._is_working = val\n\n @property\n def writer(self):\n return self._writer.get('ssl') or self._writer.get('conn')\n\n @property\n def reader(self):\n return self._reader.get('ssl') or self._reader.get('conn')\n\n @property\n def priority(self):\n return (self.error_rate, self.avg_resp_time)\n\n @property\n def error_rate(self):\n \"\"\"Error rate: from 0 to 1.\n\n For example: 0.7 = 70% requests ends with error.\n\n :rtype: float\n\n .. versionadded:: 0.2.0\n \"\"\"\n if not self.stat['requests']:\n return 0\n return round(sum(self.stat['errors'].values()) / self.stat['requests'], 2)\n\n @property\n def schemes(self):\n \"\"\"Return supported schemes.\"\"\"\n if not self._schemes:\n _schemes = []\n if self.types.keys() & _HTTP_PROTOS:\n _schemes.append('HTTP')\n if self.types.keys() & _HTTPS_PROTOS:\n _schemes.append('HTTPS')\n self._schemes = tuple(_schemes)\n return self._schemes\n\n @property\n def avg_resp_time(self):\n \"\"\"The average connection/response time.\n\n :rtype: float\n \"\"\"\n if not self._runtimes:\n return 0\n return round(sum(self._runtimes) / len(self._runtimes), 2)\n\n @property\n def avgRespTime(self):\n \"\"\"\n .. deprecated:: 2.0\n Use :attr:`avg_resp_time` instead.\n \"\"\"\n warnings.warn(\n '`avgRespTime` property is deprecated, ' 'use `avg_resp_time` instead.',\n DeprecationWarning,\n )\n return self.avg_resp_time\n\n @property\n def geo(self):\n \"\"\"Geo information about IP address of the proxy.\n\n :return:\n Named tuple with fields:\n * ``code`` - ISO country code\n * ``name`` - Full name of country\n * ``region_code`` - ISO region code\n * ``region_name`` - Full name of region\n * ``city_name`` - Full name of city\n :rtype: collections.namedtuple\n\n .. versionchanged:: 0.2.0\n In previous versions return a dictionary, now named tuple.\n \"\"\"\n return self._geo\n\n @property\n def ngtr(self):\n return self._ngtr\n\n @ngtr.setter\n def ngtr(self, proto):\n self._ngtr = NGTRS[proto](self)\n\n def as_json(self):\n \"\"\"Return the proxy's properties in JSON format.\n\n :rtype: dict\n \"\"\"\n info = {\n 'host': self.host,\n 'port': self.port,\n 'geo': {\n 'country': {'code': self._geo.code, 'name': self._geo.name},\n 'region': {\n 'code': self._geo.region_code,\n 'name': self._geo.region_name,\n },\n 'city': self._geo.city_name,\n },\n 'types': [],\n 'avg_resp_time': self.avg_resp_time,\n 'error_rate': self.error_rate,\n }\n\n order = lambda tp_lvl: (len(tp_lvl[0]), tp_lvl[0][-1]) # noqa: 731\n for tp, lvl in sorted(self.types.items(), key=order):\n info['types'].append({'type': tp, 'level': lvl or ''})\n return info\n\n def as_text(self):\n \"\"\"\n Return proxy as host:port\n\n :rtype: str\n \"\"\"\n return \"{}:{}\\n\".format(self.host, self.port)\n\n def log(self, msg, stime=0, err=None):\n ngtr = self.ngtr.name if self.ngtr else 'INFO'\n runtime = time.time() - stime if stime else 0\n log.debug(\n '{h}:{p} [{n}]: {msg}; Runtime: {rt:.2f}'.format(\n h=self.host, p=self.port, n=ngtr, msg=msg, rt=runtime\n )\n )\n trunc = '...' if len(msg) > 58 else ''\n msg = '{msg:.60s}{trunc}'.format(msg=msg, trunc=trunc)\n self._log.append((ngtr, msg, runtime))\n if err:\n self.stat['errors'][err.errmsg] += 1\n if runtime and 'timeout' not in msg:\n self._runtimes.append(runtime)\n\n def get_log(self):\n \"\"\"Proxy log.\n\n :return: The proxy log in format: (negotaitor, msg, runtime)\n :rtype: tuple\n\n .. versionadded:: 0.2.0\n \"\"\"\n return self._log\n\n async def connect(self, ssl=False):\n err = None\n msg = '%s' % 'SSL: ' if ssl else ''\n stime = time.time()\n self.log('%sInitial connection' % msg)\n try:\n if ssl:\n _type = 'ssl'\n sock = self._writer['conn'].get_extra_info('socket')\n params = {\n 'ssl': self._ssl_context,\n 'sock': sock,\n 'server_hostname': self.host,\n }\n else:\n _type = 'conn'\n params = {'host': self.host, 'port': self.port}\n self._reader[_type], self._writer[_type] = await asyncio.wait_for(\n asyncio.open_connection(**params), timeout=self._timeout\n )\n except asyncio.TimeoutError:\n msg += 'Connection: timeout'\n err = ProxyTimeoutError(msg)\n raise err\n except (ConnectionRefusedError, OSError, _ssl.SSLError):\n msg += 'Connection: failed'\n err = ProxyConnError(msg)\n raise err\n # except asyncio.CancelledError:\n # log.debug('Cancelled in proxy.connect()')\n # raise ProxyConnError()\n else:\n msg += 'Connection: success'\n self._closed = False\n finally:\n self.stat['requests'] += 1\n self.log(msg, stime, err=err)\n\n def close(self):\n if self._closed:\n return\n self._closed = True\n if self.writer:\n # try:\n self.writer.close()\n # except RuntimeError:\n # print('Try proxy.close() when loop is closed:',\n # asyncio.get_event_loop()._closed)\n self._reader = {'conn': None, 'ssl': None}\n self._writer = {'conn': None, 'ssl': None}\n self.log('Connection: closed')\n self._ngtr = None\n\n async def send(self, req):\n msg, err = '', None\n _req = req.encode() if not isinstance(req, bytes) else req\n try:\n self.writer.write(_req)\n await self.writer.drain()\n except ConnectionResetError:\n msg = '; Sending: failed'\n err = ProxySendError(msg)\n raise err\n finally:\n self.log('Request: %s%s' % (req, msg), err=err)\n\n async def recv(self, length=0, head_only=False):\n resp, msg, err = b'', '', None\n stime = time.time()\n try:\n resp = await asyncio.wait_for(\n self._recv(length, head_only), timeout=self._timeout\n )\n except asyncio.TimeoutError:\n msg = 'Received: timeout'\n err = ProxyTimeoutError(msg)\n raise err\n except (ConnectionResetError, OSError):\n msg = 'Received: failed' # (connection is reset by the peer)\n err = ProxyRecvError(msg)\n raise err\n else:\n msg = 'Received: %s bytes' % len(resp)\n if not resp:\n err = ProxyEmptyRecvError(msg)\n raise err\n finally:\n if resp:\n msg += ': %s' % resp[:12]\n self.log(msg, stime, err=err)\n return resp\n\n async def _recv(self, length=0, head_only=False):\n resp = b''\n if length:\n try:\n resp = await self.reader.readexactly(length)\n except asyncio.IncompleteReadError as e:\n resp = e.partial\n else:\n body_size, body_recv, chunked = 0, 0, None\n while not self.reader.at_eof():\n line = await self.reader.readline()\n resp += line\n if body_size:\n body_recv += len(line)\n if body_recv >= body_size:\n break\n elif chunked and line == b'0\\r\\n':\n break\n elif not body_size and line == b'\\r\\n':\n if head_only:\n break\n headers = parse_headers(resp)\n body_size = int(headers.get('Content-Length', 0))\n if not body_size:\n chunked = headers.get('Transfer-Encoding') == 'chunked'\n return resp" }, { "identifier": "Resolver", "path": "proxyhub/resolver.py", "snippet": "class Resolver:\n \"\"\"Async host resolver based on aiodns.\"\"\"\n\n _cached_hosts = {}\n _ip_hosts = [\n 'https://wtfismyip.com/text',\n 'http://api.ipify.org/',\n 'http://ipinfo.io/ip',\n 'http://ipv4.icanhazip.com/',\n 'http://myexternalip.com/raw',\n 'http://ipinfo.io/ip',\n 'http://ifconfig.io/ip',\n ]\n # the list of resolvers will point a copy of original one\n _temp_host = []\n\n def __init__(self, timeout=5, loop=None):\n self._timeout = timeout\n self._loop = loop or asyncio.get_event_loop()\n self._resolver = aiodns.DNSResolver(loop=self._loop)\n\n @staticmethod\n def host_is_ip(host):\n \"\"\"Check a host is IP address.\"\"\"\n # TODO: add IPv6 support\n try:\n host = '.'.join(f'{int(n)}' for n in host.split('.'))\n ipaddress.IPv4Address(host)\n except (ipaddress.AddressValueError, ValueError):\n return False\n else:\n return True\n\n @staticmethod\n def get_ip_info(ip):\n \"\"\"Return geo information about IP address.\n\n `code` - ISO country code\n `name` - Full name of country\n `region_code` - ISO region code\n `region_name` - Full name of region\n `city_name` - Full name of city\n \"\"\"\n # from pprint import pprint\n try:\n ipInfo = _mmdb_reader.get(ip) or {}\n except (maxminddb.errors.InvalidDatabaseError, ValueError):\n ipInfo = {}\n\n code, name = '--', 'Unknown'\n city_name, region_code, region_name = ('Unknown',) * 3\n if 'country' in ipInfo:\n code = ipInfo['country']['iso_code']\n name = ipInfo['country']['names']['en']\n elif 'continent' in ipInfo:\n code = ipInfo['continent']['code']\n name = ipInfo['continent']['names']['en']\n if 'city' in ipInfo:\n city_name = ipInfo['city']['names']['en']\n if 'subdivisions' in ipInfo:\n region_code = ipInfo['subdivisions'][0]['iso_code']\n region_name = ipInfo['subdivisions'][0]['names']['en']\n return GeoData(code, name, region_code, region_name, city_name)\n\n def _pop_random_ip_host(self):\n host = random.choice(self._temp_host)\n self._temp_host.remove(host)\n return host\n\n async def get_real_ext_ip(self):\n \"\"\"Return real external IP address.\"\"\"\n # make a copy of original one to temp one\n # so original one will stay no change\n self._temp_host = self._ip_hosts.copy()\n while self._temp_host:\n try:\n timeout = aiohttp.ClientTimeout(total=self._timeout)\n async with aiohttp.ClientSession(\n timeout=timeout, loop=self._loop\n ) as session, session.get(self._pop_random_ip_host()) as resp:\n ip = await resp.text()\n except asyncio.TimeoutError:\n pass\n else:\n ip = ip.strip()\n if self.host_is_ip(ip):\n log.debug('Real external IP: %s', ip)\n break\n else:\n raise RuntimeError('Could not get the external IP')\n return ip\n\n async def resolve(self, host, port=80, family=None, qtype='A', logging=True):\n \"\"\"Return resolving IP address(es) from host name.\"\"\"\n if self.host_is_ip(host):\n return host\n\n _host = self._cached_hosts.get(host)\n if _host:\n return _host\n\n resp = await self._resolve(host, qtype)\n\n if resp:\n hosts = [\n {\n 'hostname': host,\n 'host': r.host,\n 'port': port,\n 'family': family,\n 'proto': socket.IPPROTO_IP,\n 'flags': socket.AI_NUMERICHOST,\n }\n for r in resp\n ]\n if family:\n self._cached_hosts[host] = hosts\n else:\n self._cached_hosts[host] = hosts[0]['host']\n if logging:\n log.debug('%s: Host resolved: %s' % (host, self._cached_hosts[host]))\n else:\n if logging:\n log.warning('%s: Could not resolve host' % host)\n return self._cached_hosts.get(host)\n\n async def _resolve(self, host, qtype):\n try:\n resp = await asyncio.wait_for(\n self._resolver.query(host, qtype), timeout=self._timeout\n )\n except (aiodns.error.DNSError, asyncio.TimeoutError):\n raise ResolveError\n else:\n return resp" }, { "identifier": "Server", "path": "proxyhub/server.py", "snippet": "class Server:\n \"\"\"Server distributes incoming requests to a pool of found proxies.\"\"\"\n\n def __init__(\n self,\n host,\n port,\n proxies,\n timeout=8,\n max_tries=3,\n min_queue=5,\n min_req_proxy=5,\n max_error_rate=0.5,\n max_resp_time=8,\n prefer_connect=False,\n http_allowed_codes=None,\n backlog=100,\n loop=None,\n **kwargs,\n ):\n self.host = host\n self.port = int(port)\n self._loop = loop or asyncio.get_event_loop()\n self._timeout = timeout\n self._max_tries = max_tries\n self._backlog = backlog\n self._prefer_connect = prefer_connect\n\n self._server = None\n self._connections = {}\n self._proxy_pool = ProxyPool(\n proxies, min_req_proxy, max_error_rate, max_resp_time, min_queue\n )\n self._resolver = Resolver(loop=self._loop)\n self._http_allowed_codes = http_allowed_codes or []\n\n def start(self):\n\n srv = asyncio.start_server(\n self._accept,\n host=self.host,\n port=self.port,\n backlog=self._backlog,\n loop=self._loop,\n )\n self._server = self._loop.run_until_complete(srv)\n\n log.info(\n 'Listening established on {0}'.format(self._server.sockets[0].getsockname())\n )\n\n def stop(self):\n if not self._server:\n return\n for conn in self._connections:\n if not conn.done():\n conn.cancel()\n self._server.close()\n if not self._loop.is_running():\n self._loop.run_until_complete(self._server.wait_closed())\n # Time to close the running futures in self._connections\n self._loop.run_until_complete(asyncio.sleep(0.5))\n self._server = None\n self._loop.stop()\n log.info('Server is stopped')\n\n def _accept(self, client_reader, client_writer):\n def _on_completion(f):\n reader, writer = self._connections.pop(f)\n writer.close()\n log.debug('client: %d; closed' % id(client_reader))\n try:\n exc = f.exception()\n except asyncio.CancelledError:\n log.debug('CancelledError in server._handle:_on_completion')\n exc = None\n if exc:\n if isinstance(exc, NoProxyError):\n self.stop()\n else:\n raise exc\n\n f = asyncio.ensure_future(self._handle(client_reader, client_writer))\n f.add_done_callback(_on_completion)\n self._connections[f] = (client_reader, client_writer)\n\n async def _handle(self, client_reader, client_writer):\n log.debug(\n 'Accepted connection from %s' % (client_writer.get_extra_info('peername'),)\n )\n\n request, headers = await self._parse_request(client_reader)\n scheme = self._identify_scheme(headers)\n client = id(client_reader)\n log.debug(\n 'client: %d; request: %s; headers: %s; scheme: %s'\n % (client, request, headers, scheme)\n )\n\n # API for controlling proxyhub2\n if headers['Host'] == 'proxycontrol':\n _api, _operation, _params = headers['Path'].split('/', 5)[3:]\n if _api == 'api':\n if _operation == 'remove':\n proxy_host, proxy_port = _params.split(':', 1)\n self._proxy_pool.remove(proxy_host, int(proxy_port))\n log.debug(\n 'Remove Proxy: client: %d; request: %s; headers: %s; scheme: %s; proxy_host: %s; proxy_port: %s'\n % (client, request, headers, scheme, proxy_host, proxy_port)\n )\n client_writer.write(b'HTTP/1.1 204 No Content\\r\\n\\r\\n')\n await client_writer.drain()\n return\n elif _operation == 'history':\n query_type, url = _params.split(':', 1)\n if query_type == 'url':\n previous_proxy = history.get(\n f\"{client_reader._transport.get_extra_info('peername')[0]}-{url}\"\n )\n if previous_proxy is None:\n client_writer.write(b'HTTP/1.1 204 No Content\\r\\n\\r\\n')\n await client_writer.drain()\n return\n else:\n previous_proxy_bytestring = (\n '{\"proxy\": \"%s\"}' % previous_proxy\n ).encode()\n client_writer.write(b'HTTP/1.1 200 OK\\r\\n')\n client_writer.write(b'Content-Type: application/json\\r\\n')\n client_writer.write(\n f\"Content-Length: {str(len(previous_proxy_bytestring) + 2).encode()}\\r\\n\"\n )\n client_writer.write(b'Access-Control-Allow-Origin: *\\r\\n')\n client_writer.write(\n b'Access-Control-Allow-Credentials: true\\r\\n\\r\\n'\n )\n\n client_writer.write(previous_proxy_bytestring + b'\\r\\n')\n await client_writer.drain()\n return\n\n for attempt in range(self._max_tries):\n stime, err = 0, None\n proxy = await self._proxy_pool.get(scheme)\n proto = self._choice_proto(proxy, scheme)\n log.debug(\n 'client: %d; attempt: %d; proxy: %s; proto: %s'\n % (client, attempt, proxy, proto)\n )\n\n try:\n await proxy.connect()\n\n if proto in ('CONNECT:80', 'SOCKS4', 'SOCKS5'):\n host = headers.get('Host')\n port = headers.get('Port', 80)\n try:\n ip = await self._resolver.resolve(host)\n except ResolveError:\n return\n proxy.ngtr = proto\n await proxy.ngtr.negotiate(host=host, port=port, ip=ip)\n if scheme == 'HTTPS' and proto in ('SOCKS4', 'SOCKS5'):\n client_writer.write(CONNECTED)\n await client_writer.drain()\n else: # HTTP\n await proxy.send(request)\n else: # proto: HTTP & HTTPS\n await proxy.send(request)\n\n history[\n f\"{client_reader._transport.get_extra_info('peername')[0]}-{headers['Path']}\"\n ] = (proxy.host + ':' + str(proxy.port))\n inject_resp_header = {\n 'headers': {'X-Proxy-Info': proxy.host + ':' + str(proxy.port)}\n }\n\n stime = time.time()\n stream = [\n asyncio.ensure_future(\n self._stream(reader=client_reader, writer=proxy.writer)\n ),\n asyncio.ensure_future(\n self._stream(\n reader=proxy.reader,\n writer=client_writer,\n scheme=scheme,\n inject=inject_resp_header,\n )\n ),\n ]\n await asyncio.gather(*stream, loop=self._loop)\n except asyncio.CancelledError:\n log.debug('Cancelled in server._handle')\n break\n except (\n ProxyTimeoutError,\n ProxyConnError,\n ProxyRecvError,\n ProxySendError,\n ProxyEmptyRecvError,\n BadStatusError,\n BadResponseError,\n ) as e:\n log.debug('client: %d; error: %r' % (client, e))\n continue\n except ErrorOnStream as e:\n log.debug(\n 'client: %d; error: %r; EOF: %s'\n % (client, e, client_reader.at_eof())\n )\n for task in stream:\n if not task.done():\n task.cancel()\n if client_reader.at_eof() and 'Timeout' in repr(e):\n # Proxy may not be able to receive EOF and weel be raised a\n # TimeoutError, but all the data has already successfully\n # returned, so do not consider this error of proxy\n break\n err = e\n if scheme == 'HTTPS': # SSL Handshake probably failed\n break\n else:\n break\n finally:\n proxy.log(request.decode(), stime, err=err)\n proxy.close()\n self._proxy_pool.put(proxy)\n\n async def _parse_request(self, reader, length=65536):\n request = await reader.read(length)\n headers = parse_headers(request)\n if headers['Method'] == 'POST' and request.endswith(b'\\r\\n\\r\\n'):\n # For aiohttp. POST data returns on second reading\n request += await reader.read(length)\n return request, headers\n\n def _identify_scheme(self, headers):\n if headers['Method'] == 'CONNECT':\n return 'HTTPS'\n else:\n return 'HTTP'\n\n def _choice_proto(self, proxy, scheme):\n if scheme == 'HTTP':\n if self._prefer_connect and ('CONNECT:80' in proxy.types):\n proto = 'CONNECT:80'\n else:\n relevant = {\n 'HTTP',\n 'CONNECT:80',\n 'SOCKS4',\n 'SOCKS5',\n } & proxy.types.keys()\n proto = relevant.pop()\n else: # HTTPS\n relevant = {'HTTPS', 'SOCKS4', 'SOCKS5'} & proxy.types.keys()\n proto = relevant.pop()\n return proto\n\n async def _stream(self, reader, writer, length=65536, scheme=None, inject=None):\n checked = False\n\n try:\n while not reader.at_eof():\n data = await asyncio.wait_for(reader.read(length), self._timeout)\n if not data:\n writer.close()\n break\n elif scheme and not checked:\n self._check_response(data, scheme)\n\n if inject.get('headers') is not None and len(inject['headers']) > 0:\n data = self._inject_headers(data, scheme, inject['headers'])\n\n checked = True\n\n writer.write(data)\n await writer.drain()\n\n except (\n asyncio.TimeoutError,\n ConnectionResetError,\n OSError,\n ProxyRecvError,\n BadStatusError,\n BadResponseError,\n ) as e:\n raise ErrorOnStream(e)\n\n def _check_response(self, data, scheme):\n if scheme == 'HTTP' and self._http_allowed_codes:\n line = data.split(b'\\r\\n', 1)[0].decode()\n try:\n header = parse_status_line(line)\n except BadStatusLine:\n raise BadResponseError\n if header['Status'] not in self._http_allowed_codes:\n raise BadStatusError(\n '%r not in %r' % (header['Status'], self._http_allowed_codes)\n )\n\n def _inject_headers(self, data, scheme, headers):\n custom_lines = []\n\n if scheme == 'HTTP' or scheme == 'HTTPS':\n status_line, rest_lines = data.split(b'\\r\\n', 1)\n custom_lines.append(status_line)\n\n for k, v in headers.items():\n custom_lines.append(('%s: %s' % (k, v)).encode())\n\n custom_lines.append(rest_lines)\n data = b'\\r\\n'.join(custom_lines)\n\n return data" }, { "identifier": "IPPortPatternLine", "path": "proxyhub/utils.py", "snippet": "BASE_DIR = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\nDATA_DIR = os.path.join(BASE_DIR, 'data')\ndef get_headers(rv=False):\ndef get_all_ip(page):\ndef get_status_code(resp, start=9, stop=12):\ndef parse_status_line(line):\ndef parse_headers(headers):\ndef update_geoip_db():" } ]
import asyncio import io import signal import warnings from collections import Counter, defaultdict from functools import partial from pprint import pprint from .checker import Checker from .errors import ResolveError from .providers import PROVIDERS, Provider from .proxy import Proxy from .resolver import Resolver from .server import Server from .utils import IPPortPatternLine, log
14,879
strict=strict, dnsbl=dnsbl, loop=self._loop, ) self._countries = countries self._limit = limit tasks = [asyncio.ensure_future(self._checker.check_judges())] if data: task = asyncio.ensure_future(self._load(data, check=True)) else: task = asyncio.ensure_future(self._grab(types, check=True)) tasks.append(task) self._all_tasks.extend(tasks) def serve(self, host='127.0.0.1', port=8888, limit=100, **kwargs): """Start a local proxy server. The server distributes incoming requests to a pool of found proxies. When the server receives an incoming request, it chooses the optimal proxy (based on the percentage of errors and average response time) and passes to it the incoming request. In addition to the parameters listed below are also accept all the parameters of the :meth:`.find` method and passed it to gather proxies to a pool. :ref:`Example of usage <proxyhub-examples-server>`. :param str host: (optional) Host of local proxy server :param int port: (optional) Port of local proxy server :param int limit: (optional) When will be found a requested number of working proxies, checking of new proxies will be lazily paused. Checking will be resumed if all the found proxies will be discarded in the process of working with them (see :attr:`max_error_rate`, :attr:`max_resp_time`). And will continue until it finds one working proxy and paused again. The default value is 100 :param int max_tries: (optional) The maximum number of attempts to handle an incoming request. If not specified, it will use the value specified during the creation of the :class:`Broker` object. Attempts can be made with different proxies. The default value is 3 :param int strategy: (optional) The strategy used for picking proxy from pool. The default value is 'best' :param int min_queue: (optional) The minimum number of proxies to choose from before deciding which is the most suitable to use. The default value is 5 :param int min_req_proxy: (optional) The minimum number of processed requests to estimate the quality of proxy (in accordance with :attr:`max_error_rate` and :attr:`max_resp_time`). The default value is 5 :param int max_error_rate: (optional) The maximum percentage of requests that ended with an error. For example: 0.5 = 50%. If proxy.error_rate exceeds this value, proxy will be removed from the pool. The default value is 0.5 :param int max_resp_time: (optional) The maximum response time in seconds. If proxy.avg_resp_time exceeds this value, proxy will be removed from the pool. The default value is 8 :param bool prefer_connect: (optional) Flag that indicates whether to use the CONNECT method if possible. For example: If is set to True and a proxy supports HTTP proto (GET or POST requests) and CONNECT method, the server will try to use CONNECT method and only after that send the original request. The default value is False :param list http_allowed_codes: (optional) Acceptable HTTP codes returned by proxy on requests. If a proxy return code, not included in this list, it will be considered as a proxy error, not a wrong/unavailable address. For example, if a proxy will return a ``404 Not Found`` response - this will be considered as an error of a proxy. Checks only for HTTP protocol, HTTPS not supported at the moment. By default the list is empty and the response code is not verified :param int backlog: (optional) The maximum number of queued connections passed to listen. The default value is 100 :raises ValueError: If :attr:`limit` is less than or equal to zero. Because a parsing of providers will be endless .. versionadded:: 0.2.0 """ if limit <= 0: raise ValueError( 'In serve mode value of the limit cannot be less than or ' 'equal to zero. Otherwise, a parsing of providers will be ' 'endless' ) self._server = Server( host=host, port=port, proxies=self._proxies, timeout=self._timeout, max_tries=kwargs.pop('max_tries', self._max_tries), loop=self._loop, **kwargs, ) self._server.start() task = asyncio.ensure_future(self.find(limit=limit, **kwargs)) self._all_tasks.append(task) async def _load(self, data, check=True): """Looking for proxies in the passed data. Transform the passed data from [raw string | file-like object | list] to set {(host, port), ...}: {('192.168.0.1', '80'), } """ log.debug('Load proxies from the raw data') if isinstance(data, io.TextIOWrapper): data = data.read() if isinstance(data, str):
# Pause between grabbing cycles; in seconds. GRAB_PAUSE = 180 # The maximum number of providers that are parsed concurrently MAX_CONCURRENT_PROVIDERS = 3 class Broker: """The Broker. | One broker to rule them all, one broker to find them, | One broker to bring them all and in the darkness bind them. :param asyncio.Queue queue: (optional) Queue of found/checked proxies :param int timeout: (optional) Timeout of a request in seconds :param int max_conn: (optional) The maximum number of concurrent checks of proxies :param int max_tries: (optional) The maximum number of attempts to check a proxy :param list judges: (optional) Urls of pages that show HTTP headers and IP address. Or :class:`~proxyhub.judge.Judge` objects :param list providers: (optional) Urls of pages where to find proxies. Or :class:`~proxyhub.providers.Provider` objects :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :param loop: (optional) asyncio compatible event loop :param stop_broker_on_sigint: (optional) whether set SIGINT signal on broker object. Useful for a thread other than main thread. .. deprecated:: 0.2.0 Use :attr:`max_conn` and :attr:`max_tries` instead of :attr:`max_concurrent_conn` and :attr:`attempts_conn`. """ def __init__( self, queue=None, timeout=8, max_conn=200, max_tries=3, judges=None, providers=None, verify_ssl=False, loop=None, stop_broker_on_sigint=True, **kwargs, ): self._loop = loop or asyncio.get_event_loop_policy().get_event_loop() self._proxies = queue or asyncio.Queue() self._resolver = Resolver(loop=self._loop) self._timeout = timeout self._verify_ssl = verify_ssl self.unique_proxies = {} self._all_tasks = [] self._checker = None self._server = None self._limit = 0 # not limited self._countries = None max_concurrent_conn = kwargs.get('max_concurrent_conn') if max_concurrent_conn: warnings.warn( '`max_concurrent_conn` is deprecated, use `max_conn` instead', DeprecationWarning, ) if isinstance(max_concurrent_conn, asyncio.Semaphore): max_conn = max_concurrent_conn._value else: max_conn = max_concurrent_conn attempts_conn = kwargs.get('attempts_conn') if attempts_conn: warnings.warn( '`attempts_conn` is deprecated, use `max_tries` instead', DeprecationWarning, ) max_tries = attempts_conn # The maximum number of concurrent checking proxies self._on_check = asyncio.Queue(maxsize=max_conn) self._max_tries = max_tries self._judges = judges self._providers = [ p if isinstance(p, Provider) else Provider(p) for p in (providers or PROVIDERS) ] if stop_broker_on_sigint: try: self._loop.add_signal_handler(signal.SIGINT, self.stop) # add_signal_handler() is not implemented on Win # https://docs.python.org/3.5/library/asyncio-eventloops.html#windows except NotImplementedError: pass async def grab(self, *, countries=None, limit=0): """Gather proxies from the providers without checking. :param list countries: (optional) List of ISO country codes where should be located proxies :param int limit: (optional) The maximum number of proxies :ref:`Example of usage <proxyhub-examples-grab>`. """ self._countries = countries self._limit = limit task = asyncio.ensure_future(self._grab(check=False)) self._all_tasks.append(task) async def find( self, *, types=None, data=None, countries=None, post=False, strict=False, dnsbl=None, limit=0, **kwargs, ): """Gather and check proxies from providers or from a passed data. :ref:`Example of usage <proxyhub-examples-find>`. :param list types: Types (protocols) that need to be check on support by proxy. Supported: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25 And levels of anonymity (HTTP only): Transparent, Anonymous, High :param data: (optional) String or list with proxies. Also can be a file-like object supports `read()` method. Used instead of providers :param list countries: (optional) List of ISO country codes where should be located proxies :param bool post: (optional) Flag indicating use POST instead of GET for requests when checking proxies :param bool strict: (optional) Flag indicating that anonymity levels of types (protocols) supported by a proxy must be equal to the requested types and levels of anonymity. By default, strict mode is off and for a successful check is enough to satisfy any one of the requested types :param list dnsbl: (optional) Spam databases for proxy checking. `Wiki <https://en.wikipedia.org/wiki/DNSBL>`_ :param int limit: (optional) The maximum number of proxies :raises ValueError: If :attr:`types` not given. .. versionchanged:: 0.2.0 Added: :attr:`post`, :attr:`strict`, :attr:`dnsbl`. Changed: :attr:`types` is required. """ ip = await self._resolver.get_real_ext_ip() types = _update_types(types) if not types: raise ValueError('`types` is required') self._checker = Checker( judges=self._judges, timeout=self._timeout, verify_ssl=self._verify_ssl, max_tries=self._max_tries, real_ext_ip=ip, types=types, post=post, strict=strict, dnsbl=dnsbl, loop=self._loop, ) self._countries = countries self._limit = limit tasks = [asyncio.ensure_future(self._checker.check_judges())] if data: task = asyncio.ensure_future(self._load(data, check=True)) else: task = asyncio.ensure_future(self._grab(types, check=True)) tasks.append(task) self._all_tasks.extend(tasks) def serve(self, host='127.0.0.1', port=8888, limit=100, **kwargs): """Start a local proxy server. The server distributes incoming requests to a pool of found proxies. When the server receives an incoming request, it chooses the optimal proxy (based on the percentage of errors and average response time) and passes to it the incoming request. In addition to the parameters listed below are also accept all the parameters of the :meth:`.find` method and passed it to gather proxies to a pool. :ref:`Example of usage <proxyhub-examples-server>`. :param str host: (optional) Host of local proxy server :param int port: (optional) Port of local proxy server :param int limit: (optional) When will be found a requested number of working proxies, checking of new proxies will be lazily paused. Checking will be resumed if all the found proxies will be discarded in the process of working with them (see :attr:`max_error_rate`, :attr:`max_resp_time`). And will continue until it finds one working proxy and paused again. The default value is 100 :param int max_tries: (optional) The maximum number of attempts to handle an incoming request. If not specified, it will use the value specified during the creation of the :class:`Broker` object. Attempts can be made with different proxies. The default value is 3 :param int strategy: (optional) The strategy used for picking proxy from pool. The default value is 'best' :param int min_queue: (optional) The minimum number of proxies to choose from before deciding which is the most suitable to use. The default value is 5 :param int min_req_proxy: (optional) The minimum number of processed requests to estimate the quality of proxy (in accordance with :attr:`max_error_rate` and :attr:`max_resp_time`). The default value is 5 :param int max_error_rate: (optional) The maximum percentage of requests that ended with an error. For example: 0.5 = 50%. If proxy.error_rate exceeds this value, proxy will be removed from the pool. The default value is 0.5 :param int max_resp_time: (optional) The maximum response time in seconds. If proxy.avg_resp_time exceeds this value, proxy will be removed from the pool. The default value is 8 :param bool prefer_connect: (optional) Flag that indicates whether to use the CONNECT method if possible. For example: If is set to True and a proxy supports HTTP proto (GET or POST requests) and CONNECT method, the server will try to use CONNECT method and only after that send the original request. The default value is False :param list http_allowed_codes: (optional) Acceptable HTTP codes returned by proxy on requests. If a proxy return code, not included in this list, it will be considered as a proxy error, not a wrong/unavailable address. For example, if a proxy will return a ``404 Not Found`` response - this will be considered as an error of a proxy. Checks only for HTTP protocol, HTTPS not supported at the moment. By default the list is empty and the response code is not verified :param int backlog: (optional) The maximum number of queued connections passed to listen. The default value is 100 :raises ValueError: If :attr:`limit` is less than or equal to zero. Because a parsing of providers will be endless .. versionadded:: 0.2.0 """ if limit <= 0: raise ValueError( 'In serve mode value of the limit cannot be less than or ' 'equal to zero. Otherwise, a parsing of providers will be ' 'endless' ) self._server = Server( host=host, port=port, proxies=self._proxies, timeout=self._timeout, max_tries=kwargs.pop('max_tries', self._max_tries), loop=self._loop, **kwargs, ) self._server.start() task = asyncio.ensure_future(self.find(limit=limit, **kwargs)) self._all_tasks.append(task) async def _load(self, data, check=True): """Looking for proxies in the passed data. Transform the passed data from [raw string | file-like object | list] to set {(host, port), ...}: {('192.168.0.1', '80'), } """ log.debug('Load proxies from the raw data') if isinstance(data, io.TextIOWrapper): data = data.read() if isinstance(data, str):
data = IPPortPatternLine.findall(data)
7
2023-11-05 13:28:57+00:00
24k
TheFunny/ArisuAutoSweeper
module/webui/app.py
[ { "identifier": "AzurLaneConfig", "path": "module/config/config.py", "snippet": "class AzurLaneConfig(ConfigUpdater, ManualConfig, GeneratedConfig, ConfigWatcher):\n stop_event: threading.Event = None\n bound = {}\n\n # Class property\n is_hoarding_task = True\n\n def __setattr__(self, key, value):\n if key in self.bound:\n path = self.bound[key]\n self.modified[path] = value\n if self.auto_update:\n self.update()\n else:\n super().__setattr__(key, value)\n\n def __init__(self, config_name, task=None):\n logger.attr(\"Lang\", self.LANG)\n # This will read ./config/<config_name>.json\n self.config_name = config_name\n # Raw json data in yaml file.\n self.data = {}\n # Modified arguments. Key: Argument path in yaml file. Value: Modified value.\n # All variable modifications will be record here and saved in method `save()`.\n self.modified = {}\n # Key: Argument name in GeneratedConfig. Value: Path in `data`.\n self.bound = {}\n # If write after every variable modification.\n self.auto_update = True\n # Force override variables\n # Key: Argument name in GeneratedConfig. Value: Modified value.\n self.overridden = {}\n # Scheduler queue, will be updated in `get_next_task()`, list of Function objects\n # pending_task: Run time has been reached, but haven't been run due to task scheduling.\n # waiting_task: Run time haven't been reached, wait needed.\n self.pending_task = []\n self.waiting_task = []\n # Task to run and bind.\n # Task means the name of the function to run in AzurLaneAutoScript class.\n self.task: Function\n # Template config is used for dev tools\n self.is_template_config = config_name.startswith(\"template\")\n\n if self.is_template_config:\n # For dev tools\n logger.info(\"Using template config, which is read only\")\n self.auto_update = False\n self.task = name_to_function(\"template\")\n else:\n self.load()\n if task is None:\n # Bind `Alas` by default which includes emulator settings.\n task = name_to_function(\"Alas\")\n else:\n # Bind a specific task for debug purpose.\n task = name_to_function(task)\n self.bind(task)\n self.task = task\n self.save()\n\n def load(self):\n self.data = self.read_file(self.config_name)\n self.config_override()\n\n for path, value in self.modified.items():\n deep_set(self.data, keys=path, value=value)\n\n def bind(self, func, func_list=None):\n \"\"\"\n Args:\n func (str, Function): Function to run\n func_list (set): Set of tasks to be bound\n \"\"\"\n if func_list is None:\n func_list = [\"Alas\"]\n if isinstance(func, Function):\n func = func.command\n func_list.append(func)\n logger.info(f\"Bind task {func_list}\")\n\n # Bind arguments\n visited = set()\n self.bound.clear()\n for func in func_list:\n func_data = self.data.get(func, {})\n for group, group_data in func_data.items():\n for arg, value in group_data.items():\n path = f\"{group}.{arg}\"\n if path in visited:\n continue\n arg = path_to_arg(path)\n super().__setattr__(arg, value)\n self.bound[arg] = f\"{func}.{path}\"\n visited.add(path)\n\n # Override arguments\n for arg, value in self.overridden.items():\n super().__setattr__(arg, value)\n\n @property\n def hoarding(self):\n minutes = int(\n deep_get(\n self.data, keys=\"Alas.Optimization.TaskHoardingDuration\", default=0\n )\n )\n return timedelta(minutes=max(minutes, 0))\n\n @property\n def close_game(self):\n return deep_get(\n self.data, keys=\"Alas.Optimization.CloseGameDuringWait\", default=False\n )\n\n @cached_property\n def stored(self) -> StoredGenerated:\n stored = StoredGenerated()\n # Bind config\n for _, value in iter_attribute(stored):\n value._bind(self)\n del_cached_property(value, '_stored')\n return stored\n\n def get_next_task(self):\n \"\"\"\n Calculate tasks, set pending_task and waiting_task\n \"\"\"\n pending = []\n waiting = []\n error = []\n now = datetime.now()\n if AzurLaneConfig.is_hoarding_task:\n now -= self.hoarding\n for func in self.data.values():\n func = Function(func)\n if not func.enable:\n continue\n if not isinstance(func.next_run, datetime):\n error.append(func)\n elif func.next_run < now:\n pending.append(func)\n else:\n waiting.append(func)\n\n f = Filter(regex=r\"(.*)\", attr=[\"command\"])\n f.load(self.SCHEDULER_PRIORITY)\n if pending:\n pending = f.apply(pending)\n if waiting:\n waiting = f.apply(waiting)\n waiting = sorted(waiting, key=operator.attrgetter(\"next_run\"))\n if error:\n pending = error + pending\n\n self.pending_task = pending\n self.waiting_task = waiting\n\n def get_next(self):\n \"\"\"\n Returns:\n Function: Command to run\n \"\"\"\n self.get_next_task()\n\n if self.pending_task:\n AzurLaneConfig.is_hoarding_task = False\n logger.info(f\"Pending tasks: {[f.command for f in self.pending_task]}\")\n task = self.pending_task[0]\n logger.attr(\"Task\", task)\n return task\n else:\n AzurLaneConfig.is_hoarding_task = True\n\n if self.waiting_task:\n logger.info(\"No task pending\")\n task = copy.deepcopy(self.waiting_task[0])\n task.next_run = (task.next_run + self.hoarding).replace(microsecond=0)\n logger.attr(\"Task\", task)\n return task\n else:\n logger.critical(\"No task waiting or pending\")\n logger.critical(\"Please enable at least one task\")\n raise RequestHumanTakeover\n\n def save(self, mod_name='alas'):\n if not self.modified:\n return False\n\n for path, value in self.modified.items():\n deep_set(self.data, keys=path, value=value)\n\n logger.info(\n f\"Save config {filepath_config(self.config_name, mod_name)}, {dict_to_kv(self.modified)}\"\n )\n # Don't use self.modified = {}, that will create a new object.\n self.modified.clear()\n del_cached_property(self, 'stored')\n self.write_file(self.config_name, data=self.data)\n\n def update(self):\n self.load()\n self.config_override()\n self.bind(self.task)\n self.save()\n\n def config_override(self):\n now = datetime.now().replace(microsecond=0)\n limited = set()\n\n def limit_next_run(tasks, limit):\n for task in tasks:\n if task in limited:\n continue\n limited.add(task)\n next_run = deep_get(\n self.data, keys=f\"{task}.Scheduler.NextRun\", default=None\n )\n if isinstance(next_run, datetime) and next_run > limit:\n deep_set(self.data, keys=f\"{task}.Scheduler.NextRun\", value=now)\n\n limit_next_run(['BattlePass'], limit=now + timedelta(days=31, seconds=-1))\n limit_next_run(self.args.keys(), limit=now + timedelta(hours=24, seconds=-1))\n\n def override(self, **kwargs):\n \"\"\"\n Override anything you want.\n Variables stall remain overridden even config is reloaded from yaml file.\n Note that this method is irreversible.\n \"\"\"\n for arg, value in kwargs.items():\n self.overridden[arg] = value\n super().__setattr__(arg, value)\n\n def set_record(self, **kwargs):\n \"\"\"\n Args:\n **kwargs: For example, `Emotion1_Value=150`\n will set `Emotion1_Value=150` and `Emotion1_Record=now()`\n \"\"\"\n with self.multi_set():\n for arg, value in kwargs.items():\n record = arg.replace(\"Value\", \"Record\")\n self.__setattr__(arg, value)\n self.__setattr__(record, datetime.now().replace(microsecond=0))\n\n def multi_set(self):\n \"\"\"\n Set multiple arguments but save once.\n\n Examples:\n with self.config.multi_set():\n self.config.foo1 = 1\n self.config.foo2 = 2\n \"\"\"\n return MultiSetWrapper(main=self)\n\n def cross_get(self, keys, default=None):\n \"\"\"\n Get configs from other tasks.\n\n Args:\n keys (str, list[str]): Such as `{task}.Scheduler.Enable`\n default:\n\n Returns:\n Any:\n \"\"\"\n return deep_get(self.data, keys=keys, default=default)\n\n def cross_set(self, keys, value):\n \"\"\"\n Set configs to other tasks.\n\n Args:\n keys (str, list[str]): Such as `{task}.Scheduler.Enable`\n value (Any):\n\n Returns:\n Any:\n \"\"\"\n self.modified[keys] = value\n if self.auto_update:\n self.update()\n\n def task_delay(self, success=None, server_update=None, target=None, minute=None, task=None):\n \"\"\"\n Set Scheduler.NextRun\n Should set at least one arguments.\n If multiple arguments are set, use the nearest.\n\n Args:\n success (bool):\n If True, delay Scheduler.SuccessInterval\n If False, delay Scheduler.FailureInterval\n server_update (bool, list, str):\n If True, delay to nearest Scheduler.ServerUpdate\n If type is list or str, delay to such server update\n target (datetime.datetime, str, list):\n Delay to such time.\n minute (int, float, tuple):\n Delay several minutes.\n task (str):\n Set across task. None for current task.\n \"\"\"\n\n def ensure_delta(delay):\n return timedelta(seconds=int(ensure_time(delay, precision=3) * 60))\n\n run = []\n if success is not None:\n interval = (\n 120\n if success\n else 30\n )\n run.append(datetime.now() + ensure_delta(interval))\n if server_update is not None:\n if server_update is True:\n server_update = self.Scheduler_ServerUpdate\n run.append(get_server_next_update(server_update))\n if target is not None:\n target = [target] if not isinstance(target, list) else target\n target = nearest_future(target)\n run.append(target)\n if minute is not None:\n run.append(datetime.now() + ensure_delta(minute))\n\n if len(run):\n run = min(run).replace(microsecond=0)\n kv = dict_to_kv(\n {\n \"success\": success,\n \"server_update\": server_update,\n \"target\": target,\n \"minute\": minute,\n },\n allow_none=False,\n )\n if task is None:\n task = self.task.command\n logger.info(f\"Delay task `{task}` to {run} ({kv})\")\n self.modified[f'{task}.Scheduler.NextRun'] = run\n self.update()\n else:\n raise ScriptError(\n \"Missing argument in delay_next_run, should set at least one\"\n )\n\n def task_call(self, task, force_call=True):\n \"\"\"\n Call another task to run.\n\n That task will run when current task finished.\n But it might not be run because:\n - Other tasks should run first according to SCHEDULER_PRIORITY\n - Task is disabled by user\n\n Args:\n task (str): Task name to call, such as `Restart`\n force_call (bool):\n\n Returns:\n bool: If called.\n \"\"\"\n if deep_get(self.data, keys=f\"{task}.Scheduler.NextRun\", default=None) is None:\n raise ScriptError(f\"Task to call: `{task}` does not exist in user config\")\n\n if force_call or self.is_task_enabled(task):\n logger.info(f\"Task call: {task}\")\n self.modified[f\"{task}.Scheduler.NextRun\"] = datetime.now().replace(\n microsecond=0\n )\n self.modified[f\"{task}.Scheduler.Enable\"] = True\n if self.auto_update:\n self.update()\n return True\n else:\n logger.info(f\"Task call: {task} (skipped because disabled by user)\")\n return False\n\n @staticmethod\n def task_stop(message=\"\"):\n \"\"\"\n Stop current task.\n\n Raises:\n TaskEnd:\n \"\"\"\n if message:\n raise TaskEnd(message)\n else:\n raise TaskEnd\n\n def task_switched(self):\n \"\"\"\n Check if needs to switch task.\n\n Raises:\n bool: If task switched\n \"\"\"\n # Update event\n if self.stop_event is not None:\n if self.stop_event.is_set():\n return True\n prev = self.task\n self.load()\n new = self.get_next()\n if prev == new:\n logger.info(f\"Continue task `{new}`\")\n return False\n else:\n logger.info(f\"Switch task `{prev}` to `{new}`\")\n return True\n\n def check_task_switch(self, message=\"\"):\n \"\"\"\n Stop current task when task switched.\n\n Raises:\n TaskEnd:\n \"\"\"\n if self.task_switched():\n self.task_stop(message=message)\n\n def is_task_enabled(self, task):\n return bool(self.cross_get(keys=[task, 'Scheduler', 'Enable'], default=False))\n\n def update_daily_quests(self):\n \"\"\"\n Raises:\n TaskEnd: Call task `DailyQuest` and stop current task\n \"\"\"\n if self.stored.DailyActivity.is_expired():\n logger.info('DailyActivity expired, call task to update')\n self.task_call('DailyQuest')\n self.task_stop()\n if self.stored.DailyQuest.is_expired():\n logger.info('DailyQuest expired, call task to update')\n self.task_call('DailyQuest')\n self.task_stop()\n\n @property\n def DEVICE_SCREENSHOT_METHOD(self):\n return self.Emulator_ScreenshotMethod\n\n @property\n def DEVICE_CONTROL_METHOD(self):\n return self.Emulator_ControlMethod\n\n def temporary(self, **kwargs):\n \"\"\"\n Cover some settings, and recover later.\n\n Usage:\n backup = self.config.cover(ENABLE_DAILY_REWARD=False)\n # do_something()\n backup.recover()\n\n Args:\n **kwargs:\n\n Returns:\n ConfigBackup:\n \"\"\"\n backup = ConfigBackup(config=self)\n backup.cover(**kwargs)\n return backup" }, { "identifier": "Function", "path": "module/config/config.py", "snippet": "class Function:\n def __init__(self, data):\n self.enable = deep_get(data, keys=\"Scheduler.Enable\", default=False)\n self.command = deep_get(data, keys=\"Scheduler.Command\", default=\"Unknown\")\n self.next_run = deep_get(data, keys=\"Scheduler.NextRun\", default=DEFAULT_TIME)\n\n def __str__(self):\n enable = \"Enable\" if self.enable else \"Disable\"\n return f\"{self.command} ({enable}, {str(self.next_run)})\"\n\n __repr__ = __str__\n\n def __eq__(self, other):\n if not isinstance(other, Function):\n return False\n\n if self.command == other.command and self.next_run == other.next_run:\n return True\n else:\n return False" }, { "identifier": "alas_instance", "path": "module/config/utils.py", "snippet": "def alas_instance():\n \"\"\"\n Returns:\n list[str]: Name of all Alas instances, except `template`.\n \"\"\"\n out = []\n for file in os.listdir('./config'):\n name, extension = os.path.splitext(file)\n config_name, mod_name = os.path.splitext(name)\n mod_name = mod_name[1:]\n if name != 'template' and extension == '.json' and mod_name == '':\n out.append(name)\n\n # out.extend(mod_instance())\n\n if not len(out):\n out = ['aas']\n\n return out" }, { "identifier": "alas_template", "path": "module/config/utils.py", "snippet": "def alas_template():\n \"\"\"\n Returns:\n list[str]: Name of all Alas instances, except `template`.\n \"\"\"\n out = []\n for file in os.listdir('./config'):\n name, extension = os.path.splitext(file)\n if name == 'template' and extension == '.json':\n out.append(f'{name}-aas')\n\n # out.extend(mod_template())\n\n return out" }, { "identifier": "deep_get", "path": "module/config/utils.py", "snippet": "def deep_get(d, keys, default=None):\n \"\"\"\n Get values in dictionary safely.\n https://stackoverflow.com/questions/25833613/safe-method-to-get-value-of-nested-dictionary\n\n Args:\n d (dict):\n keys (str, list): Such as `Scheduler.NextRun.value`\n default: Default return if key not found.\n\n Returns:\n\n \"\"\"\n if isinstance(keys, str):\n keys = keys.split('.')\n assert type(keys) is list\n if d is None:\n return default\n if not keys:\n return d\n return deep_get(d.get(keys[0]), keys[1:], default)" }, { "identifier": "deep_iter", "path": "module/config/utils.py", "snippet": "def deep_iter(data, depth=0, current_depth=1):\n \"\"\"\n Iter a dictionary safely.\n\n Args:\n data (dict):\n depth (int): Maximum depth to iter\n current_depth (int):\n\n Returns:\n list: Key path\n Any:\n \"\"\"\n if isinstance(data, dict) \\\n and (depth and current_depth <= depth):\n for key, value in data.items():\n for child_path, child_value in deep_iter(value, depth=depth, current_depth=current_depth + 1):\n yield [key] + child_path, child_value\n else:\n yield [], data" }, { "identifier": "deep_set", "path": "module/config/utils.py", "snippet": "def deep_set(d, keys, value):\n \"\"\"\n Set value into dictionary safely, imitating deep_get().\n \"\"\"\n if isinstance(keys, str):\n keys = keys.split('.')\n assert type(keys) is list\n if not keys:\n return value\n if not isinstance(d, dict):\n d = {}\n d[keys[0]] = deep_set(d.get(keys[0], {}), keys[1:], value)\n return d" }, { "identifier": "dict_to_kv", "path": "module/config/utils.py", "snippet": "def dict_to_kv(dictionary, allow_none=True):\n \"\"\"\n Args:\n dictionary: Such as `{'path': 'Scheduler.ServerUpdate', 'value': True}`\n allow_none (bool):\n\n Returns:\n str: Such as `path='Scheduler.ServerUpdate', value=True`\n \"\"\"\n return ', '.join([f'{k}={repr(v)}' for k, v in dictionary.items() if allow_none or v is not None])" }, { "identifier": "filepath_args", "path": "module/config/utils.py", "snippet": "def filepath_args(filename='args', mod_name='alas'):\n return f'./module/config/argument/{filename}.json'" }, { "identifier": "filepath_config", "path": "module/config/utils.py", "snippet": "def filepath_config(filename, mod_name='alas'):\n if mod_name == 'alas':\n return os.path.join('./config', f'{filename}.json')\n else:\n return os.path.join('./config', f'{filename}.{mod_name}.json')" }, { "identifier": "read_file", "path": "module/config/utils.py", "snippet": "def read_file(file):\n \"\"\"\n Read a file, support both .yaml and .json format.\n Return empty dict if file not exists.\n\n Args:\n file (str):\n\n Returns:\n dict, list:\n \"\"\"\n folder = os.path.dirname(file)\n if not os.path.exists(folder):\n os.mkdir(folder)\n\n if not os.path.exists(file):\n return {}\n\n _, ext = os.path.splitext(file)\n lock = FileLock(f\"{file}.lock\")\n with lock:\n print(f'read: {file}')\n if ext == '.yaml':\n with open(file, mode='r', encoding='utf-8') as f:\n s = f.read()\n data = list(yaml.safe_load_all(s))\n if len(data) == 1:\n data = data[0]\n if not data:\n data = {}\n return data\n elif ext == '.json':\n with open(file, mode='r', encoding='utf-8') as f:\n s = f.read()\n return json.loads(s)\n else:\n print(f'Unsupported config file extension: {ext}')\n return {}" }, { "identifier": "logger", "path": "module/logger/logger.py", "snippet": "def empty_function(*args, **kwargs):\n def __init__(self, *args, func: Callable[[ConsoleRenderable], None] = None, **kwargs):\n def emit(self, record: logging.LogRecord) -> None:\n def handle(self, record: logging.LogRecord) -> bool:\n def options(self) -> ConsoleOptions:\ndef _set_file_logger(name=pyw_name):\ndef set_file_logger(name=pyw_name):\ndef set_func_logger(func):\ndef _get_renderables(\n self: Console, *objects, sep=\" \", end=\"\\n\", justify=None, emoji=None, markup=None, highlight=None,\n) -> List[ConsoleRenderable]:\ndef print(*objects: ConsoleRenderable, **kwargs):\ndef rule(title=\"\", *, characters=\"─\", style=\"rule.line\", end=\"\\n\", align=\"center\"):\ndef hr(title, level=3):\ndef attr(name, text):\ndef attr_align(name, text, front='', align=22):\ndef show():\ndef error_convert(func):\n def error_wrapper(msg, *args, **kwargs):\nclass RichFileHandler(RichHandler):\nclass RichRenderableHandler(RichHandler):\nclass HTMLConsole(Console):\nclass Highlighter(RegexHighlighter):\nWEB_THEME = Theme({\n \"web.brace\": Style(bold=True),\n \"web.bool_true\": Style(color=\"bright_green\", italic=True),\n \"web.bool_false\": Style(color=\"bright_red\", italic=True),\n \"web.none\": Style(color=\"magenta\", italic=True),\n \"web.path\": Style(color=\"magenta\"),\n \"web.filename\": Style(color=\"bright_magenta\"),\n \"web.str\": Style(color=\"green\", italic=False, bold=False),\n \"web.time\": Style(color=\"cyan\"),\n \"rule.text\": Style(bold=True),\n})" }, { "identifier": "Frame", "path": "module/webui/base.py", "snippet": "class Frame(Base):\n def __init__(self) -> None:\n super().__init__()\n self.page = \"Home\"\n\n def init_aside(self, expand_menu: bool = True, name: str = None) -> None:\n \"\"\"\n Call this in aside button callback function.\n Args:\n expand_menu: expand menu\n name: button name(label) to be highlight\n \"\"\"\n self.visible = True\n self.scope_clear()\n self.task_handler.remove_pending_task()\n clear(\"menu\")\n if expand_menu:\n self.expand_menu()\n if name:\n self.active_button(\"aside\", name)\n set_localstorage(\"aside\", name)\n\n def init_menu(self, collapse_menu: bool = True, name: str = None) -> None:\n \"\"\"\n Call this in menu button callback function.\n Args:\n collapse_menu: collapse menu\n name: button name(label) to be highlight\n \"\"\"\n self.visible = True\n self.page = name\n self.scope_clear()\n self.task_handler.remove_pending_task()\n clear(\"content\")\n if collapse_menu:\n self.collapse_menu()\n if name:\n self.active_button(\"menu\", name)\n\n @staticmethod\n @use_scope(\"ROOT\", clear=True)\n def _show() -> None:\n put_scope(\n \"header\",\n [\n put_html(Icon.ALAS).style(\"--header-icon--\"),\n put_text(\"AAS\").style(\"--header-text--\"),\n put_scope(\"header_status\"),\n put_scope(\"header_title\"),\n ],\n )\n put_scope(\n \"contents\",\n [\n put_scope(\"aside\"),\n put_scope(\"menu\"),\n put_scope(\"content\"),\n ],\n )\n\n @staticmethod\n @use_scope(\"header_title\", clear=True)\n def set_title(text=\"\"):\n put_text(text)\n\n @staticmethod\n def collapse_menu() -> None:\n run_js(\n f\"\"\"\n $(\"#pywebio-scope-menu\").addClass(\"container-menu-collapsed\");\n $(\".container-content-collapsed\").removeClass(\"container-content-collapsed\");\n \"\"\"\n )\n\n @staticmethod\n def expand_menu() -> None:\n run_js(\n f\"\"\"\n $(\".container-menu-collapsed\").removeClass(\"container-menu-collapsed\");\n $(\"#pywebio-scope-content\").addClass(\"container-content-collapsed\");\n \"\"\"\n )\n\n @staticmethod\n def active_button(position, value) -> None:\n run_js(\n f\"\"\"\n $(\"button.btn-{position}\").removeClass(\"btn-{position}-active\");\n $(\"div[style*='--{position}-{value}--']>button\").addClass(\"btn-{position}-active\");\n \"\"\"\n )\n\n @staticmethod\n def pin_set_invalid_mark(keys) -> None:\n if isinstance(keys, str):\n keys = [keys]\n keys = [\"_\".join(key.split(\".\")) for key in keys]\n js = \"\".join(\n [\n f\"\"\"$(\".form-control[name='{key}']\").addClass('is-invalid');\"\"\"\n for key in keys\n ]\n )\n if js:\n run_js(js)\n # for key in keys:\n # pin_update(key, valid_status=False)\n\n @staticmethod\n def pin_remove_invalid_mark(keys) -> None:\n if isinstance(keys, str):\n keys = [keys]\n keys = [\"_\".join(key.split(\".\")) for key in keys]\n js = \"\".join(\n [\n f\"\"\"$(\".form-control[name='{key}']\").removeClass('is-invalid');\"\"\"\n for key in keys\n ]\n )\n if js:\n run_js(js)\n # for key in keys:\n # pin_update(key, valid_status=0)" }, { "identifier": "get_config_mod", "path": "module/webui/fake.py", "snippet": "def get_config_mod(config_name):\n \"\"\"\n Args:\n config_name (str):\n \"\"\"\n return 'alas'" }, { "identifier": "load_config", "path": "module/webui/fake.py", "snippet": "def load_config(config_name):\n return AzurLaneConfig(config_name, '')" }, { "identifier": "asgi_app", "path": "module/webui/fastapi.py", "snippet": "def asgi_app(\n applications,\n cdn=True,\n static_dir=None,\n debug=False,\n allowed_origins=None,\n check_origin=None,\n **starlette_settings\n):\n debug = Session.debug = os.environ.get(\"PYWEBIO_DEBUG\", debug)\n cdn = cdn_validation(cdn, \"warn\")\n if cdn is False:\n cdn = \"pywebio_static\"\n routes = webio_routes(\n applications,\n cdn=cdn,\n allowed_origins=allowed_origins,\n check_origin=check_origin,\n )\n if static_dir:\n routes.append(\n Mount(\"/static\", app=StaticFiles(directory=static_dir), name=\"static\")\n )\n routes.append(\n Mount(\n \"/pywebio_static\",\n app=StaticFiles(directory=STATIC_PATH),\n name=\"pywebio_static\",\n )\n )\n middleware = [Middleware(HeaderMiddleware)]\n return Starlette(\n routes=routes, middleware=middleware, debug=debug, **starlette_settings\n )" }, { "identifier": "_t", "path": "module/webui/lang.py", "snippet": "def _t(s, lang=None):\n \"\"\"\n Get translation, ignore TRANSLATE_MODE\n \"\"\"\n if not lang:\n lang = LANG\n try:\n return dic_lang[lang][s]\n except KeyError:\n print(f\"Language key ({s}) not found\")\n return s" }, { "identifier": "t", "path": "module/webui/lang.py", "snippet": "def t(s, *args, **kwargs):\n \"\"\"\n Get translation.\n other args, kwargs pass to .format()\n \"\"\"\n if TRANSLATE_MODE:\n return s\n return _t(s, LANG).format(*args, **kwargs)" }, { "identifier": "put_input", "path": "module/webui/pin.py", "snippet": "def put_input(name, type='text', *, label='', value=None, placeholder=None, readonly=None, datalist=None,\n help_text=None, scope=None, position=OutputPosition.BOTTOM, **other_html_attrs) -> Output:\n \"\"\"Output an input widget. Refer to: `pywebio.input.input()`\"\"\"\n from pywebio.input import input\n check_dom_name_value(name, 'pin `name`')\n single_input_return = input(name=name, label=label, value=value, type=type, placeholder=placeholder,\n readonly=readonly, datalist=datalist, help_text=help_text, **other_html_attrs)\n return _pin_output(single_input_return, scope, position)" }, { "identifier": "put_select", "path": "module/webui/pin.py", "snippet": "def put_select(name, options=None, *, label='', multiple=None, value=None, help_text=None,\n scope=None, position=OutputPosition.BOTTOM, **other_html_attrs) -> Output:\n \"\"\"Output a select widget. Refer to: `pywebio.input.select()`\"\"\"\n from pywebio.input import select\n check_dom_name_value(name, 'pin `name`')\n single_input_return = select(name=name, options=options, label=label, multiple=multiple,\n value=value, help_text=help_text, **other_html_attrs)\n return _pin_output(single_input_return, scope, position)" }, { "identifier": "ProcessManager", "path": "module/webui/process_manager.py", "snippet": "class ProcessManager:\n _processes: Dict[str, \"ProcessManager\"] = {}\n\n def __init__(self, config_name: str = \"alas\") -> None:\n self.config_name = config_name\n self._renderable_queue: queue.Queue[ConsoleRenderable] = State.manager.Queue()\n self.renderables: List[ConsoleRenderable] = []\n self.renderables_max_length = 400\n self.renderables_reduce_length = 80\n self._process: Process = None\n self.thd_log_queue_handler: threading.Thread = None\n\n def start(self, func, ev: threading.Event = None) -> None:\n if not self.alive:\n if func is None:\n func = get_config_mod(self.config_name)\n self._process = Process(\n target=ProcessManager.run_process,\n args=(\n self.config_name,\n func,\n self._renderable_queue,\n ev,\n ),\n )\n self._process.start()\n self.start_log_queue_handler()\n\n def start_log_queue_handler(self):\n if (\n self.thd_log_queue_handler is not None\n and self.thd_log_queue_handler.is_alive()\n ):\n return\n self.thd_log_queue_handler = threading.Thread(\n target=self._thread_log_queue_handler\n )\n self.thd_log_queue_handler.start()\n\n def stop(self) -> None:\n lock = FileLock(f\"{filepath_config(self.config_name)}.lock\")\n with lock:\n if self.alive:\n self._process.kill()\n self.renderables.append(\n f\"[{self.config_name}] exited. Reason: Manual stop\\n\"\n )\n if self.thd_log_queue_handler is not None:\n self.thd_log_queue_handler.join(timeout=1)\n if self.thd_log_queue_handler.is_alive():\n logger.warning(\n \"Log queue handler thread does not stop within 1 seconds\"\n )\n logger.info(f\"[{self.config_name}] exited\")\n\n def _thread_log_queue_handler(self) -> None:\n while self.alive:\n try:\n log = self._renderable_queue.get(timeout=1)\n except queue.Empty:\n continue\n self.renderables.append(log)\n if len(self.renderables) > self.renderables_max_length:\n self.renderables = self.renderables[self.renderables_reduce_length :]\n logger.info(\"End of log queue handler loop\")\n\n @property\n def alive(self) -> bool:\n if self._process is not None:\n return self._process.is_alive()\n else:\n return False\n\n @property\n def state(self) -> int:\n if self.alive:\n return 1\n elif len(self.renderables) == 0:\n return 2\n else:\n console = Console(no_color=True)\n with console.capture() as capture:\n console.print(self.renderables[-1])\n s = capture.get().strip()\n if s.endswith(\"Reason: Manual stop\"):\n return 2\n elif s.endswith(\"Reason: Finish\"):\n return 2\n elif s.endswith(\"Reason: Update\"):\n return 4\n else:\n return 3\n\n @classmethod\n def get_manager(cls, config_name: str) -> \"ProcessManager\":\n \"\"\"\n Create a new alas if not exists.\n \"\"\"\n if config_name not in cls._processes:\n cls._processes[config_name] = ProcessManager(config_name)\n return cls._processes[config_name]\n\n @staticmethod\n def run_process(\n config_name, func: str, q: queue.Queue, e: threading.Event = None\n ) -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--electron\", action=\"store_true\", help=\"Runs by electron client.\"\n )\n args, _ = parser.parse_known_args()\n State.electron = args.electron\n\n # Setup logger\n set_file_logger(name=config_name)\n if State.electron:\n # https://github.com/LmeSzinc/AzurLaneAutoScript/issues/2051\n logger.info(\"Electron detected, remove log output to stdout\")\n from module.logger.logger import console_hdlr\n logger.removeHandler(console_hdlr)\n set_func_logger(func=q.put)\n\n from module.config.config import AzurLaneConfig\n\n AzurLaneConfig.stop_event = e\n try:\n # Run alas\n if func == \"alas\":\n from module.alas import AzurLaneAutoScript\n from aas import ArisuAutoSweeper\n\n if e is not None:\n AzurLaneAutoScript.stop_event = e\n ArisuAutoSweeper(config_name=config_name).loop()\n else:\n logger.critical(f\"No function matched: {func}\")\n logger.info(f\"[{config_name}] exited. Reason: Finish\\n\")\n except Exception as e:\n logger.exception(e)\n\n @classmethod\n def running_instances(cls) -> List[\"ProcessManager\"]:\n l = []\n for process in cls._processes.values():\n if process.alive:\n l.append(process)\n return l\n\n @staticmethod\n def restart_processes(\n instances: List[Union[\"ProcessManager\", str]] = None, ev: threading.Event = None\n ):\n \"\"\"\n After update and reload, or failed to perform an update,\n restart all alas that running before update\n \"\"\"\n logger.hr(\"Restart alas\")\n\n # Load MOD_CONFIG_DICT\n mod_instance()\n\n if instances is None:\n instances = []\n\n _instances = set()\n\n for instance in instances:\n if isinstance(instance, str):\n _instances.add(ProcessManager.get_manager(instance))\n elif isinstance(instance, ProcessManager):\n _instances.add(instance)\n\n try:\n with open(\"./config/reloadalas\", mode=\"r\") as f:\n for line in f.readlines():\n line = line.strip()\n _instances.add(ProcessManager.get_manager(line))\n except FileNotFoundError:\n pass\n\n for process in _instances:\n logger.info(f\"Starting [{process.config_name}]\")\n process.start(func=get_config_mod(process.config_name), ev=ev)\n\n try:\n os.remove(\"./config/reloadalas\")\n except:\n pass\n logger.info(\"Start alas complete\")" }, { "identifier": "RemoteAccess", "path": "module/webui/remote_access.py", "snippet": "class RemoteAccess:\n @staticmethod\n def keep_ssh_alive():\n task_handler: TaskHandler\n task_handler = yield\n while True:\n if _ssh_thread is not None and _ssh_thread.is_alive():\n yield\n continue\n logger.info(\"Remote access service is not running, starting now\")\n try:\n start_remote_access_service()\n except ParseError as e:\n logger.exception(e)\n task_handler.remove_current_task()\n yield\n\n @staticmethod\n def kill_ssh_process():\n if RemoteAccess.is_alive():\n _ssh_process.kill()\n\n @staticmethod\n def is_alive():\n return (\n _ssh_thread is not None\n and _ssh_thread.is_alive()\n and _ssh_process is not None\n and _ssh_process.poll() is None\n )\n\n @staticmethod\n def get_state():\n if RemoteAccess.is_alive():\n if address is not None:\n return 1\n else:\n return 2\n elif _ssh_notfound:\n return 3\n else:\n return 0\n\n @staticmethod\n def get_entry_point():\n return address if RemoteAccess.is_alive() else None" }, { "identifier": "State", "path": "module/webui/setting.py", "snippet": "class State:\n \"\"\"\n Shared settings\n \"\"\"\n\n _init = False\n _clearup = False\n\n restart_event: threading.Event = None\n manager: SyncManager = None\n electron: bool = False\n theme: str = \"default\"\n\n @classmethod\n def init(cls):\n cls.manager = multiprocessing.Manager()\n cls._init = True\n\n @classmethod\n def clearup(cls):\n cls.manager.shutdown()\n cls._clearup = True\n\n @cached_class_property\n def deploy_config(self) -> \"DeployConfig\":\n \"\"\"\n Returns:\n DeployConfig:\n \"\"\"\n from module.webui.config import DeployConfig\n\n return DeployConfig()\n\n @cached_class_property\n def config_updater(self) -> \"ConfigUpdater\":\n \"\"\"\n Returns:\n ConfigUpdater:\n \"\"\"\n from module.config.config_updater import ConfigUpdater\n\n return ConfigUpdater()" }, { "identifier": "updater", "path": "module/webui/updater.py", "snippet": "class Updater(DeployConfig, GitManager, PipManager):\n def __init__(self, file=DEPLOY_CONFIG):\n def delay(self):\n def schedule_time(self):\n def execute_output(self, command) -> str:\n def get_commit(self, revision=\"\", n=1, short_sha1=False) -> Tuple:\n def _check_update(self) -> bool:\n def _check_update_(self) -> bool:\n def check_update(self):\n def git_install(self):\n def pip_install(self):\n def update(self):\n def run_update(self):\n def _start_update(self):\n def _wait_update(self, instances: List[ProcessManager], names):\n def _run_update(self, instances, names):\n def _trigger_reload(delay=2):\n def trigger():\n def schedule_update(self) -> Generator:\n def cancel(self):" }, { "identifier": "Icon", "path": "module/webui/utils.py", "snippet": "class Icon:\n \"\"\"\n Storage html of icon.\n \"\"\"\n\n ALAS = _read(filepath_icon(\"alas\"))\n SETTING = _read(filepath_icon(\"setting\"))\n RUN = _read(filepath_icon(\"run\"))\n DEVELOP = _read(filepath_icon(\"develop\"))\n ADD = _read(filepath_icon(\"add\"))" }, { "identifier": "Switch", "path": "module/webui/utils.py", "snippet": "class Switch:\n def __init__(self, status, get_state, name=None):\n \"\"\"\n Args:\n status\n (dict):A dict describes each state.\n {\n 0: {\n 'func': (Callable)\n },\n 1: {\n 'func'\n 'args': (Optional, tuple)\n 'kwargs': (Optional, dict)\n },\n 2: [\n func1,\n {\n 'func': func2\n 'args': args2\n }\n ]\n -1: []\n }\n (Callable):current state will pass into this function\n lambda state: do_update(state=state)\n get_state:\n (Callable):\n return current state\n (Generator):\n yield current state, do nothing when state not in status\n name:\n \"\"\"\n self._lock = threading.Lock()\n self.name = name\n self.status = status\n self.get_state = get_state\n if isinstance(get_state, Generator):\n self._generator = get_state\n elif isinstance(get_state, Callable):\n self._generator = self._get_state()\n\n @staticmethod\n def get_state():\n pass\n\n def _get_state(self):\n \"\"\"\n Predefined generator when `get_state` is an callable\n Customize it if you have multiple criteria on state\n \"\"\"\n _status = self.get_state()\n yield _status\n while True:\n status = self.get_state()\n if _status != status:\n _status = status\n yield _status\n continue\n yield -1\n\n def switch(self):\n with self._lock:\n r = next(self._generator)\n if callable(self.status):\n self.status(r)\n elif r in self.status:\n f = self.status[r]\n if isinstance(f, (dict, Callable)):\n f = [f]\n for d in f:\n if isinstance(d, Callable):\n d = {\"func\": d}\n func = d[\"func\"]\n args = d.get(\"args\", tuple())\n kwargs = d.get(\"kwargs\", dict())\n func(*args, **kwargs)\n\n def g(self) -> Generator:\n g = get_generator(self.switch)\n if self.name:\n name = self.name\n else:\n name = self.get_state.__name__\n g.__name__ = f\"Switch_{name}_refresh\"\n return g" }, { "identifier": "TaskHandler", "path": "module/webui/utils.py", "snippet": "class TaskHandler:\n def __init__(self) -> None:\n # List of background running task\n self.tasks: List[Task] = []\n # List of task name to be removed\n self.pending_remove_tasks: List[Task] = []\n # Running task\n self._task = None\n # Task running thread\n self._thread: threading.Thread = None\n self._alive = False\n self._lock = threading.Lock()\n\n def add(self, func, delay: float, pending_delete: bool = False) -> None:\n \"\"\"\n Add a task running background.\n Another way of `self.add_task()`.\n func: Callable or Generator\n \"\"\"\n if isinstance(func, Callable):\n g = get_generator(func)\n elif isinstance(func, Generator):\n g = func\n self.add_task(Task(g, delay), pending_delete=pending_delete)\n\n def add_task(self, task: Task, pending_delete: bool = False) -> None:\n \"\"\"\n Add a task running background.\n \"\"\"\n if task in self.tasks:\n logger.warning(f\"Task {task} already in tasks list.\")\n return\n logger.info(f\"Add task {task}\")\n with self._lock:\n self.tasks.append(task)\n if pending_delete:\n self.pending_remove_tasks.append(task)\n\n def _remove_task(self, task: Task) -> None:\n if task in self.tasks:\n self.tasks.remove(task)\n logger.info(f\"Task {task} removed.\")\n else:\n logger.warning(\n f\"Failed to remove task {task}. Current tasks list: {self.tasks}\"\n )\n\n def remove_task(self, task: Task, nowait: bool = False) -> None:\n \"\"\"\n Remove a task in `self.tasks`.\n Args:\n task:\n nowait: if True, remove it right now,\n otherwise remove when call `self.remove_pending_task`\n \"\"\"\n if nowait:\n with self._lock:\n self._remove_task(task)\n else:\n self.pending_remove_tasks.append(task)\n\n def remove_pending_task(self) -> None:\n \"\"\"\n Remove all pending remove tasks.\n \"\"\"\n with self._lock:\n for task in self.pending_remove_tasks:\n self._remove_task(task)\n self.pending_remove_tasks = []\n\n def remove_current_task(self) -> None:\n self.remove_task(self._task, nowait=True)\n\n def get_task(self, name) -> Task:\n with self._lock:\n for task in self.tasks:\n if task.name == name:\n return task\n return None\n\n def loop(self) -> None:\n \"\"\"\n Start task loop.\n You **should** run this function in an individual thread.\n \"\"\"\n self._alive = True\n while self._alive:\n if self.tasks:\n with self._lock:\n self.tasks.sort(key=operator.attrgetter(\"next_run\"))\n task = self.tasks[0]\n if task.next_run < time.time():\n start_time = time.time()\n try:\n self._task = task\n # logger.debug(f'Start task {task.g.__name__}')\n task.send(self)\n # logger.debug(f'End task {task.g.__name__}')\n except Exception as e:\n logger.exception(e)\n self.remove_task(task, nowait=True)\n finally:\n self._task = None\n end_time = time.time()\n task.next_run += task.delay\n with self._lock:\n for task in self.tasks:\n task.next_run += end_time - start_time\n else:\n time.sleep(0.05)\n else:\n time.sleep(0.5)\n logger.info(\"End of task handler loop\")\n\n def _get_thread(self) -> threading.Thread:\n thread = threading.Thread(target=self.loop, daemon=True)\n return thread\n\n def start(self) -> None:\n \"\"\"\n Start task handler.\n \"\"\"\n logger.info(\"Start task handler\")\n if self._thread is not None and self._thread.is_alive():\n logger.warning(\"Task handler already running!\")\n return\n self._thread = self._get_thread()\n self._thread.start()\n\n def stop(self) -> None:\n self.remove_pending_task()\n self._alive = False\n self._thread.join(timeout=2)\n if not self._thread.is_alive():\n logger.info(\"Finish task handler\")\n else:\n logger.warning(\"Task handler does not stop within 2 seconds\")" }, { "identifier": "add_css", "path": "module/webui/utils.py", "snippet": "def add_css(filepath):\n with open(filepath, \"r\") as f:\n css = f.read().replace(\"\\n\", \"\")\n run_js(f\"\"\"$('head').append('<style>{css}</style>')\"\"\")" }, { "identifier": "filepath_css", "path": "module/webui/utils.py", "snippet": "def filepath_css(filename):\n return f\"./assets/gui/css/{filename}.css\"" }, { "identifier": "get_alas_config_listen_path", "path": "module/webui/utils.py", "snippet": "def get_alas_config_listen_path(args):\n for path, d in deep_iter(args, depth=3):\n if d.get(\"display\") in [\"readonly\", \"hide\"]:\n continue\n yield path" }, { "identifier": "get_localstorage", "path": "module/webui/utils.py", "snippet": "def get_localstorage(key):\n return eval_js(\"localStorage.getItem(key)\", key=key)" }, { "identifier": "get_window_visibility_state", "path": "module/webui/utils.py", "snippet": "def get_window_visibility_state():\n ret = eval_js(\"document.visibilityState\")\n return False if ret == \"hidden\" else True" }, { "identifier": "login", "path": "module/webui/utils.py", "snippet": "def login(password):\n if get_localstorage(\"password\") == str(password):\n return True\n pwd = input(label=\"Please login below.\", type=PASSWORD, placeholder=\"PASSWORD\")\n if str(pwd) == str(password):\n set_localstorage(\"password\", str(pwd))\n return True\n else:\n toast(\"Wrong password!\", color=\"error\")\n return False" }, { "identifier": "parse_pin_value", "path": "module/webui/utils.py", "snippet": "def parse_pin_value(val, valuetype: str = None):\n \"\"\"\n input, textarea return str\n select return its option (str or int)\n checkbox return [] or [True] (define in put_checkbox_)\n \"\"\"\n if isinstance(val, list):\n if len(val) == 0:\n return False\n else:\n return True\n elif valuetype:\n return str2type[valuetype](val)\n elif isinstance(val, (int, float)):\n return val\n else:\n try:\n v = float(val)\n except ValueError:\n return val\n if v.is_integer():\n return int(v)\n else:\n return v" }, { "identifier": "raise_exception", "path": "module/webui/utils.py", "snippet": "def raise_exception(x=3):\n \"\"\"\n For testing purpose\n \"\"\"\n if x > 0:\n raise_exception(x - 1)\n else:\n raise Exception(\"quq\")" }, { "identifier": "re_fullmatch", "path": "module/webui/utils.py", "snippet": "def re_fullmatch(pattern, string):\n if pattern == \"datetime\":\n try:\n datetime.datetime.fromisoformat(string)\n return True\n except ValueError:\n return False\n # elif:\n return re.fullmatch(pattern=pattern, string=string)" }, { "identifier": "BinarySwitchButton", "path": "module/webui/widgets.py", "snippet": "class ScrollableCode:\nclass RichLog:\nclass BinarySwitchButton(Switch):\n def __init__(self, keep_bottom: bool = True) -> None:\n def output(self):\n def append(self, text: str) -> None:\n def scroll(self) -> None:\n def reset(self) -> None:\n def set_scroll(self, b: bool) -> None:\n def __init__(self, scope, font_width=\"0.559\") -> None:\n def render(self, renderable: ConsoleRenderable) -> str:\n def extend(self, text):\n def reset(self):\n def scroll(self) -> None:\n def set_scroll(self, b: bool) -> None:\n def get_width(self):\n def put_log(self, pm: ProcessManager) -> Generator:\n def __init__(\n self,\n get_state,\n label_on,\n label_off,\n onclick_on,\n onclick_off,\n scope,\n color_on=\"success\",\n color_off=\"secondary\",\n ):\n def update_button(self, label, onclick, color):\ndef put_icon_buttons(\n icon_html: str,\n buttons: List[Dict[str, str]],\n onclick: Union[List[Callable[[], None]], Callable[[], None]],\n) -> Output:\ndef put_none() -> Output:\ndef get_title_help(kwargs: T_Output_Kwargs) -> Output:\ndef put_arg_input(kwargs: T_Output_Kwargs) -> Output:\ndef product_stored_row(kwargs: T_Output_Kwargs, key, value):\ndef put_arg_stored(kwargs: T_Output_Kwargs) -> Output:\ndef put_arg_select(kwargs: T_Output_Kwargs) -> Output:\ndef put_arg_state(kwargs: T_Output_Kwargs) -> Output:\ndef put_arg_textarea(kwargs: T_Output_Kwargs) -> Output:\ndef put_arg_checkbox(kwargs: T_Output_Kwargs) -> Output:\ndef put_arg_datetime(kwargs: T_Output_Kwargs) -> Output:\ndef put_arg_storage(kwargs: T_Output_Kwargs) -> Optional[Output]:\n def clear_callback():\ndef put_output(output_kwargs: T_Output_Kwargs) -> Optional[Output]:\ndef get_loading_style(shape: str, fill: bool) -> str:\ndef put_loading_text(\n text: str,\n shape: str = \"border\",\n color: str = \"dark\",\n fill: bool = False,\n size: str = \"auto 2px 1fr\",\n):" } ]
import argparse import queue import threading import time import module.webui.lang as lang from datetime import datetime from functools import partial from typing import Dict, List, Optional from pywebio import config as webconfig from pywebio.output import ( Output, clear, close_popup, popup, put_button, put_buttons, put_collapse, put_column, put_error, put_html, put_link, put_loading, put_markdown, put_row, put_scope, put_table, put_text, put_warning, toast, use_scope, ) from pywebio.pin import pin, pin_on_change from pywebio.session import go_app, info, local, register_thread, run_js, set_env from module.config.config import AzurLaneConfig, Function from module.config.utils import ( alas_instance, alas_template, deep_get, deep_iter, deep_set, dict_to_kv, filepath_args, filepath_config, read_file, ) from module.logger import logger from module.webui.base import Frame from module.webui.fake import ( get_config_mod, load_config, ) from module.webui.fastapi import asgi_app from module.webui.lang import _t, t from module.webui.pin import put_input, put_select from module.webui.process_manager import ProcessManager from module.webui.remote_access import RemoteAccess from module.webui.setting import State from module.webui.updater import updater from module.webui.utils import ( Icon, Switch, TaskHandler, add_css, filepath_css, get_alas_config_listen_path, get_localstorage, get_window_visibility_state, login, parse_pin_value, raise_exception, re_fullmatch, ) from module.webui.widgets import ( BinarySwitchButton, RichLog, T_Output_Kwargs, put_icon_buttons, put_loading_text, put_none, put_output, )
15,264
log = RichLog("log") with use_scope("logs"): put_scope("log-bar", [ put_scope("log-title", [ put_text(t("Gui.Overview.Log")).style("font-size: 1.25rem; margin: auto .5rem auto;"), put_scope("log-title-btns", [ put_scope("log_scroll_btn"), ]), ]), put_html('<hr class="hr-group">'), put_scope("dashboard", [ # Empty dashboard, values will be updated in alas_update_overview_task() put_scope(f"dashboard-row-{arg}", []) for arg in self.ALAS_STORED.keys() if deep_get(self.ALAS_STORED, keys=[arg, "order"], default=0) # Empty content to left-align last row ] + [put_html("<i></i>")] * min(len(self.ALAS_STORED), 4)) ]) put_scope("log", [put_html("")]) log.console.width = log.get_width() switch_log_scroll = BinarySwitchButton( label_on=t("Gui.Button.ScrollON"), label_off=t("Gui.Button.ScrollOFF"), onclick_on=lambda: log.set_scroll(False), onclick_off=lambda: log.set_scroll(True), get_state=lambda: log.keep_bottom, color_on="on", color_off="off", scope="log_scroll_btn", ) self.task_handler.add(switch_scheduler.g(), 1, True) self.task_handler.add(switch_log_scroll.g(), 1, True) self.task_handler.add(self.alas_update_overview_task, 10, True) self.task_handler.add(log.put_log(self.alas), 0.25, True) def _init_alas_config_watcher(self) -> None: def put_queue(path, value): self.modified_config_queue.put({"name": path, "value": value}) for path in get_alas_config_listen_path(self.ALAS_ARGS): pin_on_change( name="_".join(path), onchange=partial(put_queue, ".".join(path)) ) logger.info("Init config watcher done.") def _alas_thread_update_config(self) -> None: modified = {} while self.alive: try: d = self.modified_config_queue.get(timeout=10) config_name = self.alas_name read = self.alas_config.read_file write = self.alas_config.write_file except queue.Empty: continue modified[d["name"]] = d["value"] while True: try: d = self.modified_config_queue.get(timeout=1) modified[d["name"]] = d["value"] except queue.Empty: self._save_config(modified, config_name, read, write) modified.clear() break def _save_config( self, modified: Dict[str, str], config_name: str, read=State.config_updater.read_file, write=State.config_updater.write_file, ) -> None: try: valid = [] invalid = [] config = read(config_name) for k, v in modified.copy().items(): valuetype = deep_get(self.ALAS_ARGS, k + ".valuetype") v = parse_pin_value(v, valuetype) validate = deep_get(self.ALAS_ARGS, k + ".validate") if not len(str(v)): default = deep_get(self.ALAS_ARGS, k + ".value") modified[k] = default deep_set(config, k, default) valid.append(k) pin["_".join(k.split("."))] = default elif not validate or re_fullmatch(validate, v): deep_set(config, k, v) modified[k] = v valid.append(k) # update Emotion Record if Emotion Value is changed if "Emotion" in k and "Value" in k: k = k.split(".") k[-1] = k[-1].replace("Value", "Record") k = ".".join(k) v = datetime.now().strftime("%Y-%m-%d %H:%M:%S") modified[k] = v deep_set(config, k, v) valid.append(k) pin["_".join(k.split("."))] = v else: modified.pop(k) invalid.append(k) logger.warning(f"Invalid value {v} for key {k}, skip saving.") self.pin_remove_invalid_mark(valid) self.pin_set_invalid_mark(invalid) if modified: toast( t("Gui.Toast.ConfigSaved"), duration=1, position="right", color="success", ) logger.info(
task_handler = TaskHandler() class AlasGUI(Frame): ALAS_MENU: Dict[str, Dict[str, List[str]]] ALAS_ARGS: Dict[str, Dict[str, Dict[str, Dict[str, str]]]] ALAS_STORED: Dict[str, Dict[str, Dict[str, str]]] theme = "default" def initial(self) -> None: self.ALAS_MENU = read_file(filepath_args("menu", self.alas_mod)) self.ALAS_ARGS = read_file(filepath_args("args", self.alas_mod)) self.ALAS_STORED = read_file(filepath_args("stored", self.alas_mod)) self._init_alas_config_watcher() def __init__(self) -> None: super().__init__() # modified keys, return values of pin_wait_change() self.modified_config_queue = queue.Queue() # alas config name self.alas_name = "" self.alas_mod = "alas" self.alas_config = AzurLaneConfig("template") self.initial() @use_scope("aside", clear=True) def set_aside(self) -> None: # TODO: update put_icon_buttons() put_icon_buttons( Icon.DEVELOP, buttons=[ {"label": t("Gui.Aside.Home"), "value": "Home", "color": "aside"} ], onclick=[self.ui_develop], ), for name in alas_instance(): put_icon_buttons( Icon.RUN, buttons=[{"label": name, "value": name, "color": "aside"}], onclick=self.ui_alas, ) put_icon_buttons( Icon.ADD, buttons=[ {"label": t("Gui.Aside.AddAlas"), "value": "AddAlas", "color": "aside"} ], onclick=[self.ui_add_alas], ), @use_scope("header_status") def set_status(self, state: int) -> None: """ Args: state (int): 1 (running) 2 (not running) 3 (warning, stop unexpectedly) 4 (stop for update) 0 (hide) -1 (*state not changed) """ if state == -1: return clear() if state == 1: put_loading_text(t("Gui.Status.Running"), color="success") elif state == 2: put_loading_text(t("Gui.Status.Inactive"), color="secondary", fill=True) elif state == 3: put_loading_text(t("Gui.Status.Warning"), shape="grow", color="warning") elif state == 4: put_loading_text(t("Gui.Status.Updating"), shape="grow", color="success") @classmethod def set_theme(cls, theme="default") -> None: cls.theme = theme State.deploy_config.Theme = theme State.theme = theme webconfig(theme=theme) @use_scope("menu", clear=True) def alas_set_menu(self) -> None: """ Set menu """ put_buttons( [{ "label": t("Gui.MenuAlas.Overview"), "value": "Overview", "color": "menu", }], onclick=[self.alas_overview], ).style(f"--menu-Overview--") for menu, task_data in self.ALAS_MENU.items(): if task_data.get("page") == "tool": _onclick = self.alas_daemon_overview else: _onclick = self.alas_set_group if task_data.get("menu") == "collapse": task_btn_list = [ put_buttons( [{ "label": t(f"Task.{task}.name"), "value": task, "color": "menu", }], onclick=_onclick, ).style(f"--menu-{task}--") for task in task_data.get("tasks", []) ] put_collapse(title=t(f"Menu.{menu}.name"), content=task_btn_list) else: title = t(f"Menu.{menu}.name") put_html('<div class="hr-task-group-box">' '<span class="hr-task-group-line"></span>' f'<span class="hr-task-group-text">{title}</span>' '<span class="hr-task-group-line"></span>' '</div>' ) for task in task_data.get("tasks", []): put_buttons( [{ "label": t(f"Task.{task}.name"), "value": task, "color": "menu", }], onclick=_onclick, ).style(f"--menu-{task}--").style(f"padding-left: 0.75rem") self.alas_overview() @use_scope("content", clear=True) def alas_set_group(self, task: str) -> None: """ Set arg groups from dict """ self.init_menu(name=task) self.set_title(t(f"Task.{task}.name")) put_scope("_groups", [put_none(), put_scope("groups"), put_scope("navigator")]) task_help: str = t(f"Task.{task}.help") if task_help: put_scope( "group__info", scope="groups", content=[put_text(task_help).style("font-size: 1rem")], ) config = self.alas_config.read_file(self.alas_name) for group, arg_dict in deep_iter(self.ALAS_ARGS[task], depth=1): if self.set_group(group, arg_dict, config, task): self.set_navigator(group) @use_scope("groups") def set_group(self, group, arg_dict, config, task): group_name = group[0] output_list: List[Output] = [] for arg, arg_dict in deep_iter(arg_dict, depth=1): output_kwargs: T_Output_Kwargs = arg_dict.copy() # Skip hide display: Optional[str] = output_kwargs.pop("display", None) if display == "hide": continue # Disable elif display == "disabled": output_kwargs["disabled"] = True # Output type output_kwargs["widget_type"] = output_kwargs.pop("type") arg_name = arg[0] # [arg_name,] # Internal pin widget name output_kwargs["name"] = f"{task}_{group_name}_{arg_name}" # Display title output_kwargs["title"] = t(f"{group_name}.{arg_name}.name") # Get value from config value = deep_get( config, [task, group_name, arg_name], output_kwargs["value"] ) # idk value = str(value) if isinstance(value, datetime) else value # Default value output_kwargs["value"] = value # Options output_kwargs["options"] = options = output_kwargs.pop("option", []) # Options label options_label = [] for opt in options: options_label.append(t(f"{group_name}.{arg_name}.{opt}")) output_kwargs["options_label"] = options_label # Help arg_help = t(f"{group_name}.{arg_name}.help") if arg_help == "" or not arg_help: arg_help = None output_kwargs["help"] = arg_help # Invalid feedback output_kwargs["invalid_feedback"] = t("Gui.Text.InvalidFeedBack", value) o = put_output(output_kwargs) if o is not None: # output will inherit current scope when created, override here o.spec["scope"] = f"#pywebio-scope-group_{group_name}" output_list.append(o) if not output_list: return 0 with use_scope(f"group_{group_name}"): put_text(t(f"{group_name}._info.name")) group_help = t(f"{group_name}._info.help") if group_help != "": put_text(group_help) put_html('<hr class="hr-group">') for output in output_list: output.show() return len(output_list) @use_scope("navigator") def set_navigator(self, group): js = f""" $("#pywebio-scope-groups").scrollTop( $("#pywebio-scope-group_{group[0]}").position().top + $("#pywebio-scope-groups").scrollTop() - 59 ) """ put_button( label=t(f"{group[0]}._info.name"), onclick=lambda: run_js(js), color="navigator", ) def set_dashboard(self, arg, arg_dict, config): i18n = arg_dict.get('i18n') if i18n: name = t(i18n) else: name = arg color = arg_dict.get("color", "#777777") nodata = t("Gui.Dashboard.NoData") def set_value(dic): if "total" in dic.get("attrs", []) and config.get("total") is not None: return [ put_text(config.get("value", nodata)).style("--dashboard-value--"), put_text(f' / {config.get("total", "")}').style("--dashboard-time--"), ] else: return [ put_text(config.get("value", nodata)).style("--dashboard-value--"), ] with use_scope(f"dashboard-row-{arg}", clear=True): put_html(f'<div><div class="dashboard-icon" style="background-color:{color}"></div>'), put_scope(f"dashboard-content-{arg}", [ put_scope(f"dashboard-value-{arg}", set_value(arg_dict)), put_scope(f"dashboard-time-{arg}", [ put_text(f"{name} - {lang.readable_time(config.get('time', ''))}").style("--dashboard-time--"), ]) ]) @use_scope("content", clear=True) def alas_overview(self) -> None: self.init_menu(name="Overview") self.set_title(t(f"Gui.MenuAlas.Overview")) put_scope("overview", [put_scope("schedulers"), put_scope("logs")]) with use_scope("schedulers"): put_scope( "scheduler-bar", [ put_text(t("Gui.Overview.Scheduler")).style( "font-size: 1.25rem; margin: auto .5rem auto;" ), put_scope("scheduler_btn"), ], ) put_scope( "running", [ put_text(t("Gui.Overview.Running")), put_html('<hr class="hr-group">'), put_scope("running_tasks"), ], ) put_scope( "pending", [ put_text(t("Gui.Overview.Pending")), put_html('<hr class="hr-group">'), put_scope("pending_tasks"), ], ) put_scope( "waiting", [ put_text(t("Gui.Overview.Waiting")), put_html('<hr class="hr-group">'), put_scope("waiting_tasks"), ], ) switch_scheduler = BinarySwitchButton( label_on=t("Gui.Button.Stop"), label_off=t("Gui.Button.Start"), onclick_on=lambda: self.alas.stop(), onclick_off=lambda: self.alas.start(None, updater.event), get_state=lambda: self.alas.alive, color_on="off", color_off="on", scope="scheduler_btn", ) log = RichLog("log") with use_scope("logs"): put_scope("log-bar", [ put_scope("log-title", [ put_text(t("Gui.Overview.Log")).style("font-size: 1.25rem; margin: auto .5rem auto;"), put_scope("log-title-btns", [ put_scope("log_scroll_btn"), ]), ]), put_html('<hr class="hr-group">'), put_scope("dashboard", [ # Empty dashboard, values will be updated in alas_update_overview_task() put_scope(f"dashboard-row-{arg}", []) for arg in self.ALAS_STORED.keys() if deep_get(self.ALAS_STORED, keys=[arg, "order"], default=0) # Empty content to left-align last row ] + [put_html("<i></i>")] * min(len(self.ALAS_STORED), 4)) ]) put_scope("log", [put_html("")]) log.console.width = log.get_width() switch_log_scroll = BinarySwitchButton( label_on=t("Gui.Button.ScrollON"), label_off=t("Gui.Button.ScrollOFF"), onclick_on=lambda: log.set_scroll(False), onclick_off=lambda: log.set_scroll(True), get_state=lambda: log.keep_bottom, color_on="on", color_off="off", scope="log_scroll_btn", ) self.task_handler.add(switch_scheduler.g(), 1, True) self.task_handler.add(switch_log_scroll.g(), 1, True) self.task_handler.add(self.alas_update_overview_task, 10, True) self.task_handler.add(log.put_log(self.alas), 0.25, True) def _init_alas_config_watcher(self) -> None: def put_queue(path, value): self.modified_config_queue.put({"name": path, "value": value}) for path in get_alas_config_listen_path(self.ALAS_ARGS): pin_on_change( name="_".join(path), onchange=partial(put_queue, ".".join(path)) ) logger.info("Init config watcher done.") def _alas_thread_update_config(self) -> None: modified = {} while self.alive: try: d = self.modified_config_queue.get(timeout=10) config_name = self.alas_name read = self.alas_config.read_file write = self.alas_config.write_file except queue.Empty: continue modified[d["name"]] = d["value"] while True: try: d = self.modified_config_queue.get(timeout=1) modified[d["name"]] = d["value"] except queue.Empty: self._save_config(modified, config_name, read, write) modified.clear() break def _save_config( self, modified: Dict[str, str], config_name: str, read=State.config_updater.read_file, write=State.config_updater.write_file, ) -> None: try: valid = [] invalid = [] config = read(config_name) for k, v in modified.copy().items(): valuetype = deep_get(self.ALAS_ARGS, k + ".valuetype") v = parse_pin_value(v, valuetype) validate = deep_get(self.ALAS_ARGS, k + ".validate") if not len(str(v)): default = deep_get(self.ALAS_ARGS, k + ".value") modified[k] = default deep_set(config, k, default) valid.append(k) pin["_".join(k.split("."))] = default elif not validate or re_fullmatch(validate, v): deep_set(config, k, v) modified[k] = v valid.append(k) # update Emotion Record if Emotion Value is changed if "Emotion" in k and "Value" in k: k = k.split(".") k[-1] = k[-1].replace("Value", "Record") k = ".".join(k) v = datetime.now().strftime("%Y-%m-%d %H:%M:%S") modified[k] = v deep_set(config, k, v) valid.append(k) pin["_".join(k.split("."))] = v else: modified.pop(k) invalid.append(k) logger.warning(f"Invalid value {v} for key {k}, skip saving.") self.pin_remove_invalid_mark(valid) self.pin_set_invalid_mark(invalid) if modified: toast( t("Gui.Toast.ConfigSaved"), duration=1, position="right", color="success", ) logger.info(
f"Save config {filepath_config(config_name)}, {dict_to_kv(modified)}"
9
2023-11-01 07:09:45+00:00
24k
radekd91/inferno
inferno/models/DECA.py
[ { "identifier": "EmoNetLoss", "path": "inferno/layers/losses/EmoNetLoss.py", "snippet": "class EmoNetLoss(EmoLossBase):\n# class EmoNetLoss(object):\n\n def __init__(self, device, emonet=None, trainable=False, normalize_features=False, emo_feat_loss=None, au_loss=None):\n if emonet is None:\n emonet = get_emonet(device).eval()\n\n last_feature_size = 256 # TODO: fix this hardcoded number, get it from EmoNet class instead\n if isinstance(emo_feat_loss, dict ) and \"barlow_twins\" in emo_feat_loss[\"type\"]:\n # if barlow twins, we need to know the feature size\n emo_feat_loss[\"feature_size\"] = last_feature_size\n\n super().__init__(trainable, normalize_features=normalize_features, emo_feat_loss=emo_feat_loss, au_loss=au_loss,\n last_feature_size=last_feature_size)\n self.emonet = emonet\n\n # elif isinstance(emonet, str):\n # path = Path(emonet)\n # if path.is_dir():\n # print(f\"Loading trained EmoNet from: '{path}'\")\n # def load_configs(run_path):\n # from omegaconf import OmegaConf\n # with open(Path(run_path) / \"cfg.yaml\", \"r\") as f:\n # conf = OmegaConf.load(f)\n # return conf\n #\n # cfg = load_configs(path)\n # checkpoint_mode = 'best'\n # stages_prefixes = \"\"\n #\n # checkpoint, checkpoint_kwargs = get_checkpoint_with_kwargs(cfg, stages_prefixes,\n # checkpoint_mode=checkpoint_mode,\n # # relative_to=relative_to_path,\n # # replace_root=replace_root_path\n # )\n # checkpoint_kwargs = checkpoint_kwargs or {}\n # emonet_module = EmoNetModule.load_from_checkpoint(checkpoint_path=checkpoint, strict=False, **checkpoint_kwargs)\n # self.emonet = emonet_module.backbone\n # else:\n # raise ValueError(\"Please specify the directory which contains the config of the trained Emonet.\")\n\n # else:\n # self.emonet = emonet\n\n if not trainable:\n self.emonet.eval()\n self.emonet.requires_grad_(False)\n else:\n self.emonet.train()\n self.emonet.emo_parameters_requires_grad(True)\n\n # self.emonet.eval()\n # self.emonet = self.emonet.requires_grad_(False)\n # self.transforms = Resize((256, 256))\n self.size = (256, 256)\n # self.emo_feat_loss = F.l1_loss\n # self.valence_loss = F.l1_loss\n # self.arousal_loss = F.l1_loss\n # # self.expression_loss = F.kl_div\n # self.expression_loss = F.l1_loss\n # self.input_emotion = None\n # self.output_emotion = None\n\n @property\n def network(self):\n return self.emonet\n\n def to(self, *args, **kwargs):\n self.emonet = self.emonet.to(*args, **kwargs)\n # self.emonet = self.emonet.requires_grad_(False)\n # for p in self.emonet.parameters():\n # p.requires_grad = False\n\n def eval(self):\n self.emonet = self.emonet.eval()\n # self.emonet = self.emonet.requires_grad_(False)\n # for p in self.emonet.parameters():\n # p.requires_grad = False\n\n def train(self, mode: bool = True):\n super().train(mode)\n if hasattr(self, 'emonet'):\n self.emonet = self.emonet.eval() # evaluation mode no matter what, it's just a loss function\n # self.emonet = self.emonet.requires_grad_(False)\n # for p in self.emonet.parameters():\n # p.requires_grad = False\n\n def forward(self, predicted, target, *args, **kwargs):\n res = self.compute_loss(target, predicted, *args, **kwargs)\n feat_2_loss = res[1]\n return feat_2_loss\n\n def emonet_out(self, images):\n images = F.interpolate(images, self.size, mode='bilinear')\n # images = self.transform(images)\n return self.emonet(images, intermediate_features=True)\n\n\n def _get_trainable_params(self):\n if self.trainable:\n return self.emonet.emo_parameters\n return []" }, { "identifier": "create_emo_loss", "path": "inferno/layers/losses/EmoNetLoss.py", "snippet": "def create_emo_loss(device, emoloss = None, trainable=False, dual=False, normalize_features=False, emo_feat_loss=None):\n if emoloss is None:\n return EmoNetLoss(device, emonet=emoloss)\n if isinstance(emoloss, str):\n path = Path(emoloss)\n if not path.is_absolute():\n path = Path(get_path_to_assets()) / path\n if path.is_dir():\n from inferno.layers.losses.emotion_loss_loader import emo_network_from_path\n emo_loss = emo_network_from_path(path)\n\n if isinstance(emo_loss, EmoNetModule):\n emonet = emo_loss.emonet\n print(\"Creating EmoNetLoss\")\n return EmoNetLoss(device, emonet=emonet, trainable=trainable,\n normalize_features=normalize_features, emo_feat_loss=emo_feat_loss)\n else:\n if not dual:\n print(f\"Creating EmoBackboneLoss, trainable={trainable}\")\n return EmoBackboneLoss(device, emo_loss, trainable=trainable,\n normalize_features=normalize_features, emo_feat_loss=emo_feat_loss)\n else:\n print(f\"Creating EmoBackboneDualLoss\")\n return EmoBackboneDualLoss(device, emo_loss, trainable=trainable, clone_is_trainable=True,\n normalize_features=normalize_features, emo_feat_loss=emo_feat_loss)\n else:\n raise ValueError(\"Please specify the directory which contains the config of the trained Emonet.\")\n else: \n raise TypeError(f\"Wrong type of emoloss: {type(emoloss)}\")" }, { "identifier": "create_au_loss", "path": "inferno/layers/losses/EmoNetLoss.py", "snippet": "def create_au_loss(device, au_loss):\n if au_loss is None:\n raise NotImplementedError(\"Pass an au_loss config.\")\n # return EmoNetLoss(device, emonet=au_loss)\n if isinstance(au_loss, (dict, omegaconf.DictConfig)):\n path = Path(au_loss.path)\n if path.is_dir():\n au_loss_net = emo_network_from_path(path)\n\n if isinstance(au_loss_net, EmoNetModule):\n emonet = au_loss_net.emonet\n print(\"Creating EmoNetLoss\")\n return EmoNetLoss(device,\n emonet=emonet,\n trainable=au_loss.trainable,\n normalize_features=au_loss.normalize_features,\n emo_feat_loss=au_loss.feat_loss,\n au_loss=au_loss.au_loss)\n else:\n if not au_loss.dual:\n print(f\"Creating EmoBackboneLoss, trainable={au_loss.trainable}\")\n return EmoBackboneLoss(device, au_loss_net,\n trainable=au_loss.trainable,\n normalize_features=au_loss.normalize_features,\n emo_feat_loss=au_loss.feat_loss, \n au_loss=au_loss.au_loss\n )\n else:\n print(f\"Creating EmoBackboneDualLoss\")\n return EmoBackboneDualLoss(device, au_loss_net,\n trainable=au_loss.trainable,\n clone_is_trainable=True,\n normalize_features=au_loss.normalize_features,\n emo_feat_loss=au_loss.feat_loss,\n au_loss=au_loss.au_loss)\n else:\n raise ValueError(\"Please specify the config to instantiate AU loss\")" }, { "identifier": "SRenderY", "path": "inferno/models/Renderer.py", "snippet": "class SRenderY(nn.Module):\n def __init__(self, image_size, obj_filename, uv_size=256):\n super(SRenderY, self).__init__()\n self.image_size = image_size\n self.uv_size = uv_size\n\n verts, faces, aux = load_obj(obj_filename)\n uvcoords = aux.verts_uvs[None, ...] # (N, V, 2)\n uvfaces = faces.textures_idx[None, ...] # (N, F, 3)\n faces = faces.verts_idx[None, ...]\n self.rasterizer = Pytorch3dRasterizer(image_size)\n self.uv_rasterizer = Pytorch3dRasterizer(uv_size)\n\n # faces\n dense_triangles = util.generate_triangles(uv_size, uv_size)\n self.register_buffer('dense_faces', torch.from_numpy(dense_triangles).long()[None, :, :])\n self.register_buffer('faces', faces)\n self.register_buffer('raw_uvcoords', uvcoords)\n\n # uv coords\n uvcoords = torch.cat([uvcoords, uvcoords[:, :, 0:1] * 0. + 1.], -1) # [bz, ntv, 3]\n uvcoords = uvcoords * 2 - 1;\n uvcoords[..., 1] = -uvcoords[..., 1]\n face_uvcoords = util.face_vertices(uvcoords, uvfaces)\n self.register_buffer('uvcoords', uvcoords)\n self.register_buffer('uvfaces', uvfaces)\n self.register_buffer('face_uvcoords', face_uvcoords)\n\n # shape colors, for rendering shape overlay\n colors = torch.tensor([180, 180, 180])[None, None, :].repeat(1, faces.max() + 1, 1).float() / 255.\n face_colors = util.face_vertices(colors, faces)\n self.register_buffer('face_colors', face_colors)\n\n ## SH factors for lighting\n pi = np.pi\n constant_factor = torch.tensor(\n [1 / np.sqrt(4 * pi), ((2 * pi) / 3) * (np.sqrt(3 / (4 * pi))), ((2 * pi) / 3) * (np.sqrt(3 / (4 * pi))), \\\n ((2 * pi) / 3) * (np.sqrt(3 / (4 * pi))), (pi / 4) * (3) * (np.sqrt(5 / (12 * pi))),\n (pi / 4) * (3) * (np.sqrt(5 / (12 * pi))), \\\n (pi / 4) * (3) * (np.sqrt(5 / (12 * pi))), (pi / 4) * (3 / 2) * (np.sqrt(5 / (12 * pi))),\n (pi / 4) * (1 / 2) * (np.sqrt(5 / (4 * pi)))]).float()\n self.register_buffer('constant_factor', constant_factor)\n\n def forward(self, vertices, transformed_vertices, albedos, lights=None, light_type='point'):\n '''\n -- Texture Rendering\n vertices: [batch_size, V, 3], vertices in world space, for calculating normals, then shading\n transformed_vertices: [batch_size, V, 3], rnage:[-1,1], projected vertices, in image space, for rasterization\n albedos: [batch_size, 3, h, w], uv map\n lights:\n spherical homarnic: [N, 9(shcoeff), 3(rgb)]\n points/directional lighting: [N, n_lights, 6(xyzrgb)]\n light_type:\n point or directional\n '''\n batch_size = vertices.shape[0]\n ## rasterizer near 0 far 100. move mesh so minz larger than 0\n transformed_vertices[:, :, 2] = transformed_vertices[:, :, 2] + 10\n\n # attributes\n face_vertices = util.face_vertices(vertices, self.faces.expand(batch_size, -1, -1))\n normals = util.vertex_normals(vertices, self.faces.expand(batch_size, -1, -1))\n face_normals = util.face_vertices(normals, self.faces.expand(batch_size, -1, -1))\n transformed_normals = util.vertex_normals(transformed_vertices, self.faces.expand(batch_size, -1, -1))\n transformed_face_normals = util.face_vertices(transformed_normals, self.faces.expand(batch_size, -1, -1))\n\n attributes = torch.cat([self.face_uvcoords.expand(batch_size, -1, -1, -1),\n transformed_face_normals.detach(),\n face_vertices.detach(),\n face_normals],\n -1)\n\n # rasterize\n rendering = self.rasterizer(transformed_vertices, self.faces.expand(batch_size, -1, -1), attributes)\n\n ####\n # vis mask\n alpha_images = rendering[:, -1, :, :][:, None, :, :].detach()\n\n # albedo\n uvcoords_images = rendering[:, :3, :, :]\n grid = (uvcoords_images).permute(0, 2, 3, 1)[:, :, :, :2]\n albedo_images = F.grid_sample(albedos, grid, align_corners=False)\n\n # visible mask for pixels with positive normal direction\n transformed_normal_map = rendering[:, 3:6, :, :].detach()\n pos_mask = (transformed_normal_map[:, 2:, :, :] < -0.05).float()\n\n # shading\n normal_images = rendering[:, 9:12, :, :]\n if lights is not None:\n if lights.shape[1] == 9:\n shading_images = self.add_SHlight(normal_images, lights)\n else:\n if light_type == 'point':\n vertice_images = rendering[:, 6:9, :, :].detach()\n shading = self.add_pointlight(vertice_images.permute(0, 2, 3, 1).reshape([batch_size, -1, 3]),\n normal_images.permute(0, 2, 3, 1).reshape([batch_size, -1, 3]),\n lights)\n shading_images = shading.reshape(\n [batch_size, albedo_images.shape[2], albedo_images.shape[3], 3]).permute(0, 3, 1, 2)\n else:\n shading = self.add_directionlight(normal_images.permute(0, 2, 3, 1).reshape([batch_size, -1, 3]),\n lights)\n shading_images = shading.reshape(\n [batch_size, albedo_images.shape[2], albedo_images.shape[3], 3]).permute(0, 3, 1, 2)\n images = albedo_images * shading_images\n else:\n images = albedo_images\n shading_images = images.detach() * 0.\n # import ipdb; ipdb.set_trace()\n # print('albedo: ', albedo_images.min(), albedo_images.max())\n # print('normal: ', normal_images.min(), normal_images.max())\n # print('lights: ', lights.min(), lights.max())\n # print('shading: ', shading_images.min(), shading_images.max())\n # print('images: ', images.min(), images.max())\n # exit()\n outputs = {\n 'images': images * alpha_images,\n 'albedo_images': albedo_images,\n 'alpha_images': alpha_images,\n 'pos_mask': pos_mask,\n 'shading_images': shading_images,\n 'grid': grid,\n 'normals': normals,\n 'normal_images': normal_images,\n 'transformed_normals': transformed_normals,\n }\n\n return outputs\n\n def add_SHlight(self, normal_images, sh_coeff):\n '''\n sh_coeff: [bz, 9, 3]\n '''\n N = normal_images\n sh = torch.stack([\n N[:, 0] * 0. + 1., N[:, 0], N[:, 1], \\\n N[:, 2], N[:, 0] * N[:, 1], N[:, 0] * N[:, 2],\n N[:, 1] * N[:, 2], N[:, 0] ** 2 - N[:, 1] ** 2, 3 * (N[:, 2] ** 2) - 1\n ],\n 1) # [bz, 9, h, w]\n sh = sh * self.constant_factor[None, :, None, None]\n shading = torch.sum(sh_coeff[:, :, :, None, None] * sh[:, :, None, :, :], 1) # [bz, 9, 3, h, w]\n return shading\n\n def add_pointlight(self, vertices, normals, lights):\n '''\n vertices: [bz, nv, 3]\n lights: [bz, nlight, 6]\n returns:\n shading: [bz, nv, 3]\n '''\n light_positions = lights[:, :, :3];\n light_intensities = lights[:, :, 3:]\n directions_to_lights = F.normalize(light_positions[:, :, None, :] - vertices[:, None, :, :], dim=3)\n # normals_dot_lights = torch.clamp((normals[:,None,:,:]*directions_to_lights).sum(dim=3), 0., 1.)\n normals_dot_lights = (normals[:, None, :, :] * directions_to_lights).sum(dim=3)\n shading = normals_dot_lights[:, :, :, None] * light_intensities[:, :, None, :]\n return shading.mean(1)\n\n def add_directionlight(self, normals, lights):\n '''\n normals: [bz, nv, 3]\n lights: [bz, nlight, 6]\n returns:\n shading: [bz, nv, 3]\n '''\n light_direction = lights[:, :, :3];\n light_intensities = lights[:, :, 3:]\n directions_to_lights = F.normalize(light_direction[:, :, None, :].expand(-1, -1, normals.shape[1], -1), dim=3)\n # normals_dot_lights = torch.clamp((normals[:,None,:,:]*directions_to_lights).sum(dim=3), 0., 1.)\n # normals_dot_lights = (normals[:,None,:,:]*directions_to_lights).sum(dim=3)\n normals_dot_lights = torch.clamp((normals[:, None, :, :] * directions_to_lights).sum(dim=3), 0., 1.)\n shading = normals_dot_lights[:, :, :, None] * light_intensities[:, :, None, :]\n return shading.mean(1)\n\n def render_shape(self, vertices, transformed_vertices, images=None, detail_normal_images=None, lights=None):\n '''\n -- rendering shape with detail normal map\n '''\n batch_size = vertices.shape[0]\n if lights is None:\n light_positions = torch.tensor(\n [\n [-1, 1, 1],\n [1, 1, 1],\n [-1, -1, 1],\n [1, -1, 1],\n [0, 0, 1]\n ]\n )[None, :, :].expand(batch_size, -1, -1).float()\n light_intensities = torch.ones_like(light_positions).float() * 1.7\n lights = torch.cat((light_positions, light_intensities), 2).to(vertices.device)\n transformed_vertices[:, :, 2] = transformed_vertices[:, :, 2] + 10\n\n # Attributes\n face_vertices = util.face_vertices(vertices, self.faces.expand(batch_size, -1, -1))\n normals = util.vertex_normals(vertices, self.faces.expand(batch_size, -1, -1));\n face_normals = util.face_vertices(normals, self.faces.expand(batch_size, -1, -1))\n transformed_normals = util.vertex_normals(transformed_vertices, self.faces.expand(batch_size, -1, -1));\n transformed_face_normals = util.face_vertices(transformed_normals, self.faces.expand(batch_size, -1, -1))\n attributes = torch.cat([self.face_colors.expand(batch_size, -1, -1, -1),\n transformed_face_normals.detach(),\n face_vertices.detach(),\n face_normals],\n -1)\n # rasterize\n rendering = self.rasterizer(transformed_vertices, self.faces.expand(batch_size, -1, -1), attributes)\n\n ####\n alpha_images = rendering[:, -1, :, :][:, None, :, :].detach()\n\n # albedo\n albedo_images = rendering[:, :3, :, :]\n # mask\n transformed_normal_map = rendering[:, 3:6, :, :].detach()\n pos_mask = (transformed_normal_map[:, 2:, :, :] < 0).float()\n\n # shading\n normal_images = rendering[:, 9:12, :, :].detach()\n vertice_images = rendering[:, 6:9, :, :].detach()\n if detail_normal_images is not None:\n normal_images = detail_normal_images\n\n shading = self.add_directionlight(normal_images.permute(0, 2, 3, 1).reshape([batch_size, -1, 3]), lights)\n shading_images = shading.reshape([batch_size, albedo_images.shape[2], albedo_images.shape[3], 3]).permute(0, 3,\n 1,\n 2).contiguous()\n shaded_images = albedo_images * shading_images\n\n if images is None:\n shape_images = shaded_images * alpha_images + torch.zeros_like(shaded_images).to(vertices.device) * (\n 1 - alpha_images)\n else:\n shape_images = shaded_images * alpha_images + images * (1 - alpha_images)\n return shape_images\n\n def render_depth(self, transformed_vertices):\n '''\n -- rendering depth\n '''\n batch_size = transformed_vertices.shape[0]\n\n transformed_vertices[:, :, 2] = transformed_vertices[:, :, 2] - transformed_vertices[:, :, 2].min()\n z = -transformed_vertices[:, :, 2:].repeat(1, 1, 3)\n z = z - z.min()\n z = z / z.max()\n # Attributes\n attributes = util.face_vertices(z, self.faces.expand(batch_size, -1, -1))\n # rasterize\n rendering = self.rasterizer(transformed_vertices, self.faces.expand(batch_size, -1, -1), attributes)\n\n ####\n alpha_images = rendering[:, -1, :, :][:, None, :, :].detach()\n depth_images = rendering[:, :1, :, :]\n return depth_images\n\n def render_normal(self, transformed_vertices, normals):\n '''\n -- rendering normal\n '''\n batch_size = normals.shape[0]\n\n # Attributes\n attributes = util.face_vertices(normals, self.faces.expand(batch_size, -1, -1))\n # rasterize\n rendering = self.rasterizer(transformed_vertices, self.faces.expand(batch_size, -1, -1), attributes)\n\n ####\n alpha_images = rendering[:, -1, :, :][:, None, :, :].detach()\n normal_images = rendering[:, :3, :, :]\n return normal_images\n\n def world2uv(self, vertices):\n '''\n project vertices from world space to uv space\n vertices: [bz, V, 3]\n uv_vertices: [bz, 3, h, w]\n '''\n batch_size = vertices.shape[0]\n face_vertices = util.face_vertices(vertices, self.faces.expand(batch_size, -1, -1))\n uv_vertices = self.uv_rasterizer(self.uvcoords.expand(batch_size, -1, -1),\n self.uvfaces.expand(batch_size, -1, -1), face_vertices)[:, :3]\n return uv_vertices" }, { "identifier": "ResnetEncoder", "path": "inferno/models/DecaEncoder.py", "snippet": "class ResnetEncoder(BaseEncoder):\n def __init__(self, outsize, last_op=None):\n super(ResnetEncoder, self).__init__(outsize, last_op)\n # feature_size = 2048\n # self.encoder = resnet.load_ResNet50Model() # out: 2048\n # ### regressor\n # self.layers = nn.Sequential(\n # nn.Linear(feature_size, 1024),\n # nn.ReLU(),\n # nn.Linear(1024, outsize)\n # )\n # self.last_op = last_op\n\n def _create_encoder(self):\n self.encoder = resnet.load_ResNet50Model() # out: 2048" }, { "identifier": "SecondHeadResnet", "path": "inferno/models/DecaEncoder.py", "snippet": "class SecondHeadResnet(nn.Module):\n\n def __init__(self, enc : BaseEncoder, outsize, last_op=None):\n super().__init__()\n self.resnet = enc # yes, self.resnet is no longer accurate but the name is kept for legacy reasons (to be able to load old models)\n self.layers = nn.Sequential(\n nn.Linear(self.resnet.feature_size, 1024),\n nn.ReLU(),\n nn.Linear(1024, outsize)\n )\n if last_op == 'same':\n self.last_op = self.resnet.last_op\n else:\n self.last_op = last_op\n\n def forward_features(self, inputs):\n out1, features = self.resnet(inputs, output_features=True)\n return out1, features\n\n def forward_features_to_output(self, features):\n parameters = self.layers(features)\n if self.last_op:\n parameters = self.last_op(parameters)\n return parameters\n\n\n def forward(self, inputs):\n out1, features = self.forward_features()\n out2 = self.forward_features_to_output(features)\n return out1, out2\n\n\n def train(self, mode: bool = True):\n #here we NEVER modify the eval/train status of the resnet backbone, only the FC layers of the second head\n self.layers.train(mode)\n return self\n\n def reset_last_layer(self):\n # initialize the last layer to zero to help the network \n # predict the initial pose a bit more stable\n torch.nn.init.constant_(self.layers[-1].weight, 0)\n torch.nn.init.constant_(self.layers[-1].bias, 0)\n\n def get_feature_size(self): \n return self.resnet.feature_size" }, { "identifier": "SwinEncoder", "path": "inferno/models/DecaEncoder.py", "snippet": "class SwinEncoder(BaseEncoder):\n\n def __init__(self, swin_type, img_size, outsize, last_op=None):\n self.swin_type = swin_type\n self.img_size = img_size\n super().__init__(outsize, last_op)\n\n def _create_encoder(self):\n swin_cfg = swin_cfg_from_name(self.swin_type)\n self.encoder = create_swin_backbone(\n swin_cfg, self.feature_size, self.img_size, load_pretrained_swin=True, pretrained_model=self.swin_type)\n\n\n def forward_features(self, inputs):\n pooled_feature, patches = self.encoder(inputs, include_features=True, include_patches=False)\n return pooled_feature, patches" }, { "identifier": "Generator", "path": "inferno/models/DecaDecoder.py", "snippet": "class Generator(nn.Module):\n def __init__(self, latent_dim=100, out_channels=1, out_scale=1, sample_mode='bilinear'):\n super(Generator, self).__init__()\n self.out_scale = out_scale\n\n self.init_size = 32 // 4 # Initial size before upsampling\n self.l1 = nn.Sequential(nn.Linear(latent_dim, 128 * self.init_size ** 2))\n self.conv_blocks = nn.Sequential(\n nn.BatchNorm2d(128),\n nn.Upsample(scale_factor=2, mode=sample_mode), # 16\n nn.Conv2d(128, 128, 3, stride=1, padding=1),\n nn.BatchNorm2d(128, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Upsample(scale_factor=2, mode=sample_mode), # 32\n nn.Conv2d(128, 64, 3, stride=1, padding=1),\n nn.BatchNorm2d(64, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Upsample(scale_factor=2, mode=sample_mode), # 64\n nn.Conv2d(64, 64, 3, stride=1, padding=1),\n nn.BatchNorm2d(64, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Upsample(scale_factor=2, mode=sample_mode), # 128\n nn.Conv2d(64, 32, 3, stride=1, padding=1),\n nn.BatchNorm2d(32, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Upsample(scale_factor=2, mode=sample_mode), # 256\n nn.Conv2d(32, 16, 3, stride=1, padding=1),\n nn.BatchNorm2d(16, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(16, out_channels, 3, stride=1, padding=1),\n nn.Tanh(),\n )\n\n def forward(self, z):\n out = self.l1(z)\n out = out.view(out.shape[0], 128, self.init_size, self.init_size)\n img = self.conv_blocks(out)\n return img * self.out_scale" }, { "identifier": "GeneratorAdaIn", "path": "inferno/models/DecaDecoder.py", "snippet": "class GeneratorAdaIn(nn.Module):\n def __init__(self, latent_dim, condition_dim, out_channels=1, out_scale=1, sample_mode='bilinear'):\n super().__init__()\n self.out_scale = out_scale\n\n self.init_size = 32 // 4 # Initial size before upsampling\n self.l1 = nn.Sequential(nn.Linear(latent_dim, 128 * self.init_size ** 2))\n # self.conv_blocks = nn.Sequential(\n # # nn.BatchNorm2d(128),\n # # nn.Upsample(scale_factor=2, mode=sample_mode), # 16\n # # nn.Conv2d(128, 128, 3, stride=1, padding=1),\n # AdaInUpConvBlock(128,128, condition_dim),\n # # nn.BatchNorm2d(128, 0.8),\n # # nn.LeakyReLU(0.2, inplace=True),\n # # nn.Upsample(scale_factor=2, mode=sample_mode), # 32\n # # nn.Conv2d(128, 64, 3, stride=1, padding=1),\n # AdaInUpConvBlock(128, 64, condition_dim),\n # # nn.BatchNorm2d(64, 0.8),\n # # nn.LeakyReLU(0.2, inplace=True),\n # # nn.Upsample(scale_factor=2, mode=sample_mode), # 64\n # # nn.Conv2d(64, 64, 3, stride=1, padding=1),\n # AdaInUpConvBlock(64, 64, condition_dim),\n # # nn.BatchNorm2d(64, 0.8),\n # # nn.LeakyReLU(0.2, inplace=True),\n # # nn.Upsample(scale_factor=2, mode=sample_mode), # 128\n # # nn.Conv2d(64, 32, 3, stride=1, padding=1),\n # AdaInUpConvBlock(64, 32, condition_dim),\n # # nn.BatchNorm2d(32, 0.8),\n # # nn.LeakyReLU(0.2, inplace=True),\n # # nn.Upsample(scale_factor=2, mode=sample_mode), # 256\n # # nn.Conv2d(32, 16, 3, stride=1, padding=1),\n # AdaInUpConvBlock(32, 16, condition_dim),\n # # nn.BatchNorm2d(16, 0.8),\n # # nn.LeakyReLU(0.2, inplace=True),\n # # nn.Conv2d(16, out_channels, 3, stride=1, padding=1),\n # AdaInUpConvBlock(16, out_channels, condition_dim, scale_factor=0)\n # nn.Tanh(),\n # )\n self.conv_block1 = AdaInUpConvBlock(128,128, condition_dim, sample_mode=sample_mode) # 16\n self.conv_block2 = AdaInUpConvBlock(128, 64, condition_dim, sample_mode=sample_mode) # 32\n self.conv_block3 = AdaInUpConvBlock(64, 64, condition_dim, sample_mode=sample_mode) # 64\n self.conv_block4 = AdaInUpConvBlock(64, 32, condition_dim, sample_mode=sample_mode) # 128\n self.conv_block5 = AdaInUpConvBlock(32, 16, condition_dim, sample_mode=sample_mode) # 256\n self.conv_block6 = AdaInUpConvBlock(16, out_channels, condition_dim, scale_factor=0) # 256\n self.conv_blocks = [self.conv_block1, self.conv_block2, self.conv_block3, self.conv_block4,\n self.conv_block5, self.conv_block6]\n self.out_actv = nn.Tanh()\n\n\n def forward(self, z, cond):\n out = self.l1(z)\n out = out.view(out.shape[0], 128, self.init_size, self.init_size)\n for i, block in enumerate(self.conv_blocks):\n out = block(out, cond)\n img = self.out_actv(out)\n return img * self.out_scale" }, { "identifier": "FLAME", "path": "inferno/models/DecaFLAME.py", "snippet": "class FLAME(nn.Module):\n \"\"\"\n Given flame parameters this class generates a differentiable FLAME function\n which outputs the a mesh and 2D/3D facial landmarks\n \"\"\"\n\n def __init__(self, config):\n super(FLAME, self).__init__()\n print(\"creating the FLAME Decoder\")\n with open(config.flame_model_path, 'rb') as f:\n # flame_model = Struct(**pickle.load(f, encoding='latin1'))\n ss = pickle.load(f, encoding='latin1')\n flame_model = Struct(**ss)\n\n self.cfg = config\n self.dtype = torch.float32\n self.register_buffer('faces_tensor', to_tensor(to_np(flame_model.f, dtype=np.int64), dtype=torch.long))\n # The vertices of the template model\n self.register_buffer('v_template', to_tensor(to_np(flame_model.v_template), dtype=self.dtype))\n # The shape components and expression\n shapedirs = to_tensor(to_np(flame_model.shapedirs), dtype=self.dtype)\n shapedirs = torch.cat([shapedirs[:, :, :config.n_shape], shapedirs[:, :, 300:300 + config.n_exp]], 2)\n self.register_buffer('shapedirs', shapedirs)\n # The pose components\n num_pose_basis = flame_model.posedirs.shape[-1]\n posedirs = np.reshape(flame_model.posedirs, [-1, num_pose_basis]).T\n self.register_buffer('posedirs', to_tensor(to_np(posedirs), dtype=self.dtype))\n #\n self.register_buffer('J_regressor', to_tensor(to_np(flame_model.J_regressor), dtype=self.dtype))\n parents = to_tensor(to_np(flame_model.kintree_table[0])).long();\n parents[0] = -1\n self.register_buffer('parents', parents)\n self.register_buffer('lbs_weights', to_tensor(to_np(flame_model.weights), dtype=self.dtype))\n\n # Fixing Eyeball and neck rotation\n default_eyball_pose = torch.zeros([1, 6], dtype=self.dtype, requires_grad=False)\n self.register_parameter('eye_pose', nn.Parameter(default_eyball_pose,\n requires_grad=False))\n default_neck_pose = torch.zeros([1, 3], dtype=self.dtype, requires_grad=False)\n self.register_parameter('neck_pose', nn.Parameter(default_neck_pose,\n requires_grad=False))\n\n # Static and Dynamic Landmark embeddings for FLAME\n lmk_embeddings = np.load(config.flame_lmk_embedding_path, allow_pickle=True, encoding='latin1')\n lmk_embeddings = lmk_embeddings[()]\n self.register_buffer('lmk_faces_idx', torch.tensor(lmk_embeddings['static_lmk_faces_idx'], dtype=torch.long))\n self.register_buffer('lmk_bary_coords',\n torch.tensor(lmk_embeddings['static_lmk_bary_coords'], dtype=self.dtype))\n self.register_buffer('dynamic_lmk_faces_idx',\n torch.tensor(lmk_embeddings['dynamic_lmk_faces_idx'], dtype=torch.long))\n self.register_buffer('dynamic_lmk_bary_coords',\n torch.tensor(lmk_embeddings['dynamic_lmk_bary_coords'], dtype=self.dtype))\n self.register_buffer('full_lmk_faces_idx', torch.tensor(lmk_embeddings['full_lmk_faces_idx'], dtype=torch.long))\n self.register_buffer('full_lmk_bary_coords',\n torch.tensor(lmk_embeddings['full_lmk_bary_coords'], dtype=self.dtype))\n\n neck_kin_chain = [];\n NECK_IDX = 1\n curr_idx = torch.tensor(NECK_IDX, dtype=torch.long)\n while curr_idx != -1:\n neck_kin_chain.append(curr_idx)\n curr_idx = self.parents[curr_idx]\n self.register_buffer('neck_kin_chain', torch.stack(neck_kin_chain))\n\n def _find_dynamic_lmk_idx_and_bcoords(self, pose, dynamic_lmk_faces_idx,\n dynamic_lmk_b_coords,\n neck_kin_chain, dtype=torch.float32):\n \"\"\"\n Selects the face contour depending on the reletive position of the head\n Input:\n vertices: N X num_of_vertices X 3\n pose: N X full pose\n dynamic_lmk_faces_idx: The list of contour face indexes\n dynamic_lmk_b_coords: The list of contour barycentric weights\n neck_kin_chain: The tree to consider for the relative rotation\n dtype: Data type\n return:\n The contour face indexes and the corresponding barycentric weights\n \"\"\"\n\n batch_size = pose.shape[0]\n\n aa_pose = torch.index_select(pose.view(batch_size, -1, 3), 1,\n neck_kin_chain)\n rot_mats = batch_rodrigues(\n aa_pose.view(-1, 3), dtype=dtype).view(batch_size, -1, 3, 3)\n\n rel_rot_mat = torch.eye(3, device=pose.device,\n dtype=dtype).unsqueeze_(dim=0).expand(batch_size, -1, -1)\n for idx in range(len(neck_kin_chain)):\n rel_rot_mat = torch.bmm(rot_mats[:, idx], rel_rot_mat)\n\n y_rot_angle = torch.round(\n torch.clamp(rot_mat_to_euler(rel_rot_mat) * 180.0 / np.pi,\n max=39)).to(dtype=torch.long)\n\n neg_mask = y_rot_angle.lt(0).to(dtype=torch.long)\n mask = y_rot_angle.lt(-39).to(dtype=torch.long)\n neg_vals = mask * 78 + (1 - mask) * (39 - y_rot_angle)\n y_rot_angle = (neg_mask * neg_vals +\n (1 - neg_mask) * y_rot_angle)\n\n dyn_lmk_faces_idx = torch.index_select(dynamic_lmk_faces_idx,\n 0, y_rot_angle)\n dyn_lmk_b_coords = torch.index_select(dynamic_lmk_b_coords,\n 0, y_rot_angle)\n return dyn_lmk_faces_idx, dyn_lmk_b_coords\n\n def _vertices2landmarks(self, vertices, faces, lmk_faces_idx, lmk_bary_coords):\n \"\"\"\n Calculates landmarks by barycentric interpolation\n Input:\n vertices: torch.tensor NxVx3, dtype = torch.float32\n The tensor of input vertices\n faces: torch.tensor (N*F)x3, dtype = torch.long\n The faces of the mesh\n lmk_faces_idx: torch.tensor N X L, dtype = torch.long\n The tensor with the indices of the faces used to calculate the\n landmarks.\n lmk_bary_coords: torch.tensor N X L X 3, dtype = torch.float32\n The tensor of barycentric coordinates that are used to interpolate\n the landmarks\n\n Returns:\n landmarks: torch.tensor NxLx3, dtype = torch.float32\n The coordinates of the landmarks for each mesh in the batch\n \"\"\"\n # Extract the indices of the vertices for each face\n # NxLx3\n batch_size, num_verts = vertices.shape[:2]\n lmk_faces = torch.index_select(faces, 0, lmk_faces_idx.view(-1)).view(\n 1, -1, 3).view(batch_size, lmk_faces_idx.shape[1], -1)\n\n lmk_faces += torch.arange(batch_size, dtype=torch.long).view(-1, 1, 1).to(\n device=vertices.device) * num_verts\n\n lmk_vertices = vertices.view(-1, 3)[lmk_faces]\n landmarks = torch.einsum('blfi,blf->bli', [lmk_vertices, lmk_bary_coords])\n return landmarks\n\n def _vertices2landmarks2d(self, vertices, full_pose):\n \"\"\"\n Calculates landmarks by barycentric interpolation\n Input:\n vertices: torch.tensor NxVx3, dtype = torch.float32\n The tensor of input vertices\n full_pose: torch.tensor N X 12, dtype = torch.float32\n The tensor with global pose, neck pose, jaw pose and eye pose (respectively) in axis angle format\n\n Returns:\n landmarks: torch.tensor NxLx3, dtype = torch.float32\n The coordinates of the landmarks for each mesh in the batch\n \"\"\"\n # Extract the indices of the vertices for each face\n # NxLx3\n batch_size = vertices.shape[0]\n lmk_faces_idx = self.lmk_faces_idx.unsqueeze(dim=0).expand(batch_size, -1)\n lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).expand(batch_size, -1, -1)\n\n dyn_lmk_faces_idx, dyn_lmk_bary_coords = self._find_dynamic_lmk_idx_and_bcoords(\n full_pose, self.dynamic_lmk_faces_idx,\n self.dynamic_lmk_bary_coords,\n self.neck_kin_chain, dtype=self.dtype)\n lmk_faces_idx = torch.cat([dyn_lmk_faces_idx, lmk_faces_idx], 1)\n lmk_bary_coords = torch.cat([dyn_lmk_bary_coords, lmk_bary_coords], 1)\n\n landmarks2d = vertices2landmarks(vertices, self.faces_tensor,\n lmk_faces_idx,\n lmk_bary_coords)\n return landmarks2d\n\n\n def seletec_3d68(self, vertices):\n landmarks3d = vertices2landmarks(vertices, self.faces_tensor,\n self.full_lmk_faces_idx.repeat(vertices.shape[0], 1),\n self.full_lmk_bary_coords.repeat(vertices.shape[0], 1, 1))\n return landmarks3d\n\n def forward(self, shape_params=None, expression_params=None, pose_params=None, eye_pose_params=None):\n \"\"\"\n Input:\n shape_params: N X number of shape parameters\n expression_params: N X number of expression parameters\n pose_params: N X number of pose parameters (6)\n return:d\n vertices: N X V X 3\n landmarks: N X number of landmarks X 3\n \"\"\"\n batch_size = shape_params.shape[0]\n if pose_params is None:\n pose_params = self.eye_pose.expand(batch_size, -1) # TODO: is this correct?\n if eye_pose_params is None:\n eye_pose_params = self.eye_pose.expand(batch_size, -1)\n if expression_params is None:\n expression_params = torch.zeros(batch_size, self.cfg.n_exp).to(shape_params.device)\n\n betas = torch.cat([shape_params, expression_params], dim=1)\n full_pose = torch.cat(\n [pose_params[:, :3], self.neck_pose.expand(batch_size, -1), pose_params[:, 3:], eye_pose_params], dim=1)\n template_vertices = self.v_template.unsqueeze(0).expand(batch_size, -1, -1)\n\n vertices, _ = lbs(betas, full_pose, template_vertices,\n self.shapedirs, self.posedirs,\n self.J_regressor, self.parents,\n self.lbs_weights, dtype=self.dtype, \n detach_pose_correctives=False)\n\n lmk_faces_idx = self.lmk_faces_idx.unsqueeze(dim=0).expand(batch_size, -1)\n lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).expand(batch_size, -1, -1)\n\n dyn_lmk_faces_idx, dyn_lmk_bary_coords = self._find_dynamic_lmk_idx_and_bcoords(\n full_pose, self.dynamic_lmk_faces_idx,\n self.dynamic_lmk_bary_coords,\n self.neck_kin_chain, dtype=self.dtype)\n lmk_faces_idx = torch.cat([dyn_lmk_faces_idx, lmk_faces_idx], 1)\n lmk_bary_coords = torch.cat([dyn_lmk_bary_coords, lmk_bary_coords], 1)\n\n landmarks2d = vertices2landmarks(vertices, self.faces_tensor,\n lmk_faces_idx,\n lmk_bary_coords)\n bz = vertices.shape[0]\n landmarks3d = vertices2landmarks(vertices, self.faces_tensor,\n self.full_lmk_faces_idx.repeat(bz, 1),\n self.full_lmk_bary_coords.repeat(bz, 1, 1))\n\n return vertices, landmarks2d, landmarks3d" }, { "identifier": "FLAMETex", "path": "inferno/models/DecaFLAME.py", "snippet": "class FLAMETex(nn.Module):\n \"\"\"\n current FLAME texture:\n https://github.com/TimoBolkart/TF_FLAME/blob/ade0ab152300ec5f0e8555d6765411555c5ed43d/sample_texture.py#L64\n tex_path: '/ps/scratch/yfeng/Data/FLAME/texture/albedoModel2020_FLAME_albedoPart.npz'\n ## adapted from BFM\n tex_path: '/ps/scratch/yfeng/Data/FLAME/texture/FLAME_albedo_from_BFM.npz'\n \"\"\"\n\n def __init__(self, config):\n super(FLAMETex, self).__init__()\n if config.tex_type == 'BFM':\n mu_key = 'MU'\n pc_key = 'PC'\n n_pc = 199\n tex_path = config.tex_path\n try:\n tex_space = np.load(tex_path)\n texture_mean = tex_space[mu_key].reshape(1, -1)\n texture_basis = tex_space[pc_key].reshape(-1, n_pc)\n except FileNotFoundError as e: \n im_size = 512 \n texture_mean = np.ones((1, im_size*im_size*3)) * 0.5\n texture_basis = np.eye(im_size*im_size*3, n_pc) * 0.5\n print(\"[WARNING] texture file not found. Setting texture space with dummy values.\")\n\n elif config.tex_type == 'FLAME':\n mu_key = 'mean'\n pc_key = 'tex_dir'\n n_pc = 200\n tex_path = config.tex_path\n tex_space = np.load(tex_path)\n texture_mean = tex_space[mu_key].reshape(1, -1) / 255.\n texture_basis = tex_space[pc_key].reshape(-1, n_pc) / 255.\n\n else:\n print('texture type \"', config.tex_type, '\" does not exist!')\n raise NotImplementedError('texture type \"', config.tex_type, '\" does not exist!')\n\n n_tex = config.n_tex\n num_components = texture_basis.shape[1]\n texture_mean = torch.from_numpy(texture_mean).float()[None, ...]\n texture_basis = torch.from_numpy(texture_basis[:, :n_tex]).float()[None, ...]\n self.register_buffer('texture_mean', texture_mean)\n self.register_buffer('texture_basis', texture_basis)\n\n def forward(self, texcode):\n texture = self.texture_mean + (self.texture_basis * texcode[:, None, :]).sum(-1)\n texture = texture.reshape(texcode.shape[0], 512, 512, 3).permute(0, 3, 1, 2)\n texture = F.interpolate(texture, [256, 256])\n texture = texture[:, [2, 1, 0], :, :]\n return texture" }, { "identifier": "FLAME_mediapipe", "path": "inferno/models/DecaFLAME.py", "snippet": "class FLAME_mediapipe(FLAME): \n\n def __init__(self, config):\n super().__init__(config)\n # static MEDIAPIPE landmark embeddings for FLAME\n lmk_embeddings_mediapipe = np.load(config.flame_mediapipe_lmk_embedding_path, \n allow_pickle=True, encoding='latin1')\n # indices = lmk_embeddings_mediapipe['landmark_indices']\n self.register_buffer('lmk_faces_idx_mediapipe', \n torch.tensor(lmk_embeddings_mediapipe['lmk_face_idx'].astype(np.int64), dtype=torch.long))\n self.register_buffer('lmk_bary_coords_mediapipe',\n torch.tensor(lmk_embeddings_mediapipe['lmk_b_coords'], dtype=self.dtype))\n \n def forward(self, shape_params=None, expression_params=None, pose_params=None, eye_pose_params=None):\n vertices, landmarks2d, landmarks3d = super().forward(shape_params, expression_params, pose_params, eye_pose_params)\n batch_size = shape_params.shape[0]\n lmk_faces_idx_mediapipe = self.lmk_faces_idx_mediapipe.unsqueeze(dim=0).expand(batch_size, -1).contiguous()\n lmk_bary_coords_mediapipe = self.lmk_bary_coords_mediapipe.unsqueeze(dim=0).expand(batch_size, -1, -1).contiguous()\n landmarks2d_mediapipe = vertices2landmarks(vertices, self.faces_tensor,\n lmk_faces_idx_mediapipe,\n lmk_bary_coords_mediapipe )\n # landmarks3d_mediapipe = vertices2landmarks(vertices, self.faces_tensor,\n # self.full_lmk_faces_idx_mediapipe.repeat(bz, 1),\n # self.full_lmk_bary_coords_mediapipe.repeat(bz, 1, 1))\n\n return vertices, landmarks2d, landmarks3d, landmarks2d_mediapipe#, landmarks3d_mediapipe" }, { "identifier": "EmotionMLP", "path": "inferno/models/EmotionMLP.py", "snippet": "class EmotionMLP(torch.nn.Module):\n\n def __init__(self, config, deca_cfg):\n super().__init__()\n self.config = config\n in_size = 0\n if self.config.use_identity:\n in_size += deca_cfg.n_shape\n if self.config.use_expression:\n in_size += deca_cfg.n_exp\n if self.config.use_global_pose:\n in_size += 3\n if self.config.use_jaw_pose:\n in_size += 3\n if self.config.use_detail_code:\n self.n_detail = deca_cfg.n_detail\n in_size += deca_cfg.n_detail\n else:\n self.n_detail = None\n if 'use_detail_emo_code' in self.config.keys() and self.config.use_detail_emo_code:\n self.n_detail_emo = deca_cfg.n_detail_emo\n in_size += deca_cfg.n_detail_emo\n else:\n self.n_detail_emo = None\n\n hidden_layer_sizes = config.num_mlp_layers * [in_size]\n\n out_size = 0\n if self.config.predict_expression:\n self.num_classes = self.config.data.n_expression if 'n_expression' in self.config.data.keys() else 9\n out_size += self.num_classes\n if self.config.predict_valence:\n out_size += 1\n if self.config.predict_arousal:\n out_size += 1\n\n # if \"use_mlp\" not in self.config.keys() or self.config.use_mlp:\n if 'mlp_norm_layer' in self.config.keys():\n batch_norm = class_from_str(self.config.mlp_norm_layer, sys.modules[__name__])\n else:\n batch_norm = None\n self.mlp = MLP(in_size, out_size, hidden_layer_sizes, batch_norm=batch_norm)\n # else:\n # self.mlp = None\n\n if 'v_activation' in config.keys():\n self.v_activation = class_from_str(self.config.v_activation, sys.modules[__name__])\n else:\n self.v_activation = None\n\n if 'a_activation' in config.keys():\n self.a_activation = class_from_str(self.config.a_activation, sys.modules[__name__])\n else:\n self.a_activation = None\n\n if 'exp_activation' in config.keys():\n self.exp_activation = class_from_str(self.config.exp_activation, sys.modules[__name__])\n else:\n self.exp_activation = F.log_softmax\n\n self.va_loss = loss_from_cfg(config, 'va_loss')\n self.v_loss = loss_from_cfg(config, 'v_loss')\n self.a_loss = loss_from_cfg(config, 'a_loss')\n self.exp_loss = loss_from_cfg(config, 'exp_loss')\n\n # config backwards compatibility\n self.config = add_cfg_if_missing(self.config, 'detach_shape', False)\n self.config = add_cfg_if_missing(self.config, 'detach_expression', False)\n self.config = add_cfg_if_missing(self.config, 'detach_detailcode', False)\n self.config = add_cfg_if_missing(self.config, 'detach_jaw', False)\n self.config = add_cfg_if_missing(self.config, 'detach_global_pose', False)\n\n\n def forward(self, values, result_prefix=\"\"):\n shapecode = values['shapecode']\n\n if self.config.detach_shape:\n shapecode = shapecode.detach()\n\n # texcode = values['texcode']\n expcode = values['expcode']\n\n if self.config.detach_expression:\n expcode = expcode.detach()\n\n posecode = values['posecode']\n if self.config.use_detail_code:\n if 'detailcode' in values.keys() and values['detailcode'] is not None:\n detailcode = values['detailcode']\n if self.config.detach_detailcode:\n detailcode = detailcode.detach()\n else:\n detailcode = torch.zeros((posecode.shape[0], self.n_detail), dtype=posecode.dtype, device=posecode.device )\n else:\n detailcode = None\n\n if 'use_detailemo_code' in self.config.keys() and self.config.use_detailemo_code:\n if 'detailemocode' in values.keys() and values['detailemocode'] is not None:\n detailemocode = values['detailemocode']\n if 'detach_detailemocode' in self.config.keys() and self.config.detach_detailemocode:\n detailemocode = detailemocode.detach()\n else:\n detailemocode = torch.zeros((posecode.shape[0], self.n_detail_emo), dtype=posecode.dtype, device=posecode.device )\n else:\n detailemocode = None\n\n\n global_pose = posecode[:, :3]\n if self.config.detach_global_pose:\n global_pose = global_pose.detach()\n\n jaw_pose = posecode[:, 3:]\n if self.config.detach_jaw:\n jaw_pose = jaw_pose.detach()\n\n input_list = []\n\n if self.config.use_identity:\n input_list += [shapecode]\n\n if self.config.use_expression:\n input_list += [expcode]\n\n if self.config.use_global_pose:\n input_list += [global_pose]\n\n if self.config.use_jaw_pose:\n input_list += [jaw_pose]\n\n if self.config.use_detail_code:\n input_list += [detailcode]\n\n if 'use_detail_emo_code' in self.config.keys() and self.config.use_detail_emo_code:\n input_list += [detailemocode]\n\n input = torch.cat(input_list, dim=1)\n output = self.mlp(input)\n\n out_idx = 0\n if self.config.predict_expression:\n expr_classification = output[:, out_idx:(out_idx + self.num_classes)]\n if self.exp_activation is not None:\n expr_classification = self.exp_activation(output[:, out_idx:(out_idx + self.num_classes)], dim=1)\n out_idx += self.num_classes\n else:\n expr_classification = None\n\n if self.config.predict_valence:\n valence = output[:, out_idx:(out_idx+1)]\n if self.v_activation is not None:\n valence = self.v_activation(valence)\n out_idx += 1\n else:\n valence = None\n\n if self.config.predict_arousal:\n arousal = output[:, out_idx:(out_idx+1)]\n if self.a_activation is not None:\n arousal = self.a_activation(output[:, out_idx:(out_idx + 1)])\n out_idx += 1\n else:\n arousal = None\n\n values[result_prefix + \"valence\"] = valence\n values[result_prefix + \"arousal\"] = arousal\n values[result_prefix + \"expr_classification\"] = expr_classification\n\n return values\n\n\n def compute_loss(self, pred, batch, training, pred_prefix=\"\"):\n valence_gt = pred[\"va\"][:, 0:1]\n arousal_gt = pred[\"va\"][:, 1:2]\n expr_classification_gt = pred[\"affectnetexp\"]\n if \"expression_weight\" in pred.keys():\n class_weight = pred[\"expression_weight\"][0]\n else:\n class_weight = None\n\n gt = {}\n gt[\"valence\"] = valence_gt\n gt[\"arousal\"] = arousal_gt\n gt[\"expr_classification\"] = expr_classification_gt\n\n # TODO: this is not ugly enough\n scheme = None if 'va_loss_scheme' not in self.config.keys() else self.config.va_loss_scheme\n loss_term_weights = _get_step_loss_weights(self.v_loss, self.a_loss, self.va_loss, scheme, training)\n\n valence_sample_weight = batch[\"valence_sample_weight\"] if \"valence_sample_weight\" in batch.keys() else None\n arousal_sample_weight = batch[\"arousal_sample_weight\"] if \"arousal_sample_weight\" in batch.keys() else None\n va_sample_weight = batch[\"va_sample_weight\"] if \"va_sample_weight\" in batch.keys() else None\n expression_sample_weight = batch[\"expression_sample_weight\"] if \"expression_sample_weight\" in batch.keys() else None\n\n if 'continuous_va_balancing' in self.config.keys():\n if self.config.continuous_va_balancing == '1d':\n v_weight = valence_sample_weight\n a_weight = arousal_sample_weight\n elif self.config.continuous_va_balancing == '2d':\n v_weight = va_sample_weight\n a_weight = va_sample_weight\n elif self.config.continuous_va_balancing == 'expr':\n v_weight = expression_sample_weight\n a_weight = expression_sample_weight\n else:\n raise RuntimeError(f\"Invalid continuous affect balancing\"\n f\" '{self.config.continuous_va_balancing}'\")\n if len(v_weight.shape) > 1:\n v_weight = v_weight.view(-1)\n if len(a_weight.shape) > 1:\n a_weight = a_weight.view(-1)\n else:\n v_weight = None\n a_weight = None\n\n losses, metrics = {}, {}\n # print(metrics.keys())\n losses, metrics = v_or_a_loss(self.v_loss, pred, gt, loss_term_weights, metrics, losses, \"valence\",\n pred_prefix=pred_prefix, permit_dropping_corr=not training,\n sample_weights=v_weight)\n losses, metrics = v_or_a_loss(self.a_loss, pred, gt, loss_term_weights, metrics, losses, \"arousal\",\n pred_prefix=pred_prefix, permit_dropping_corr=not training,\n sample_weights=a_weight)\n losses, metrics = va_loss(self.va_loss, pred, gt, loss_term_weights, metrics, losses,\n pred_prefix=pred_prefix, permit_dropping_corr=not training)\n losses, metrics = exp_loss(self.exp_loss, pred, gt, class_weight, metrics, losses,\n self.config.expression_balancing, self.num_classes, pred_prefix=pred_prefix, )\n\n return losses, metrics" }, { "identifier": "Expression7", "path": "inferno/datasets/AffWild2Dataset.py", "snippet": "class Expression7(Enum):\n Neutral = 0\n Anger = 1\n Disgust = 2\n Fear = 3\n Happiness = 4\n Sadness = 5\n Surprise = 6\n None_ = 7" }, { "identifier": "AffectNetExpressions", "path": "inferno/datasets/AffectNetDataModule.py", "snippet": "class AffectNetExpressions(Enum):\n Neutral = 0\n Happy = 1\n Sad = 2\n Surprise = 3\n Fear = 4\n Disgust = 5\n Anger = 6\n Contempt = 7\n None_ = 8\n Uncertain = 9\n Occluded = 10\n xxx = 11\n\n\n @staticmethod\n def from_str(string : str):\n string = string[0].upper() + string[1:]\n return AffectNetExpressions[string]\n\n # _expressions = {0: 'neutral', 1:'happy', 2:'sad', 3:'surprise', 4:'fear', 5:'disgust', 6:'anger', 7:'contempt', 8:'none'}" }, { "identifier": "_log_array_image", "path": "inferno/utils/lightning_logging.py", "snippet": "def _log_array_image(path, image, caption=None):\n image = _fix_image(image)\n if path is not None:\n imsave(path, image)\n return image" }, { "identifier": "_log_wandb_image", "path": "inferno/utils/lightning_logging.py", "snippet": "def _log_wandb_image(path, image, caption=None):\n path.parent.mkdir(parents=True, exist_ok=True)\n image = _fix_image(image)\n imsave(path, image)\n if caption is not None:\n caption_file = Path(path).parent / (Path(path).stem + \".txt\")\n with open(caption_file, \"w\") as f:\n f.write(caption)\n wandb_image = Image(str(path), caption=caption)\n return wandb_image" }, { "identifier": "_torch_image2np", "path": "inferno/utils/lightning_logging.py", "snippet": "def _torch_image2np(torch_image):\n image = torch_image.detach().cpu().numpy()\n if len(image.shape) == 4:\n image = image.transpose([0, 2, 3, 1])\n elif len(image.shape) == 3:\n image = image.transpose([1, 2, 0])\n return image" }, { "identifier": "class_from_str", "path": "inferno/utils/other.py", "snippet": "def class_from_str(str, module=None, none_on_fail = False) -> type:\n if module is None:\n module = sys.modules[__name__]\n if hasattr(module, str):\n cl = getattr(module, str)\n return cl\n elif str.lower() == 'none' or none_on_fail:\n return None\n raise RuntimeError(f\"Class '{str}' not found.\")" }, { "identifier": "get_path_to_assets", "path": "inferno/utils/other.py", "snippet": "def get_path_to_assets() -> Path:\n import inferno\n return Path(inferno.__file__).parents[1] / \"assets\"" }, { "identifier": "VGG19Loss", "path": "inferno/layers/losses/VGGLoss.py", "snippet": "class VGG19Loss(nn.Module):\n\n def __init__(self, layer_activation_indices_weights, diff=torch.nn.functional.l1_loss, batch_norm=False):\n super().__init__()\n self.batch_norm = batch_norm\n self.vgg19 = VGG19(sorted(layer_activation_indices_weights.keys()), batch_norm=batch_norm)\n self.layer_activation_indices_weights = layer_activation_indices_weights\n self.diff = diff\n\n def forward(self, x, y):\n feat_x = self.vgg19(x)\n feat_y = self.vgg19(y)\n\n out = {}\n loss = 0\n for idx, weight in self.layer_activation_indices_weights.items():\n d = self.diff(feat_x[idx], feat_y[idx])\n out[idx] = d\n loss += d*weight\n return loss, out" }, { "identifier": "EmoNetRegressor", "path": "inferno/models/EmoNetRegressor.py", "snippet": "class EmoNetRegressor(torch.nn.Module):\n\n def __init__(self, outsize, last_op=None):\n super().__init__()\n self.emonet = get_emonet().eval()\n # self.emonet.eval()\n # self.emonet = self.emonet.requires_grad_(False)\n # self.transforms = Resize((256, 256))\n self.input_image_size = (256, 256) # for now, emonet is pretrained for this particual image size (the official impl)\n\n self.feature_to_use = 'emo_feat_2'\n\n if self.feature_to_use == 'emo_feat_2':\n self.emonet_feature_size = 256\n self.fc_size = 256\n else:\n raise NotImplementedError(f\"Not yet implemented for feature '{self.feature_to_use}'\")\n\n self.layers = torch.nn.Sequential(\n torch.nn.Linear(self.emonet_feature_size, self.fc_size),\n torch.nn.ReLU(),\n torch.nn.Linear(self.fc_size, outsize)\n )\n self.last_op = last_op\n\n def forward(self, images):\n images = F.interpolate(images, self.input_image_size, mode='bilinear')\n out = self.emonet(images, intermediate_features=True)\n # out has the following keys: 'heatmap', 'expression' 'valence', 'arousal', 'emo_feat', 'emo_feat_2'\n out = self.layers(out[self.feature_to_use])\n return out\n\n def reset_last_layer(self):\n # initialize the last layer to zero to help the network \n # predict the initial pose a bit more stable\n torch.nn.init.constant_(self.layers[-1].weight, 0)\n torch.nn.init.constant_(self.layers[-1].bias, 0)" }, { "identifier": "EmonetRegressorStatic", "path": "inferno/models/EmoNetRegressor.py", "snippet": "class EmonetRegressorStatic(EmoNetRegressor):\n\n def __init__(self, outsize, last_op=None):\n super().__init__(outsize, last_op)\n self.emonet.requires_grad_(False)\n self.emonet.eval()\n\n def train(self, mode=True):\n # this one only trains the FC layers\n self.emonet.eval()\n self.layers.train(mode)\n return self\n\n\n def reset_last_layer(self):\n # initialize the last layer to zero to help the network \n # predict the initial pose a bit more stable\n torch.nn.init.constant_(self.layers[-1].weight, 0)\n torch.nn.init.constant_(self.layers[-1].bias, 0)" } ]
import os, sys import torch import torchvision import torch.nn.functional as F import torchvision.transforms.functional as F_v import numpy as np import cv2 import inferno.layers.losses.DecaLosses as lossfunc import inferno.layers.losses.MediaPipeLandmarkLosses as lossfunc_mp import inferno.utils.DecaUtils as util import pytorch_lightning.plugins.environments.lightning_environment as le import psutil import adabound import copy from pytorch_lightning import LightningModule from pytorch_lightning.loggers import WandbLogger from inferno.layers.losses.EmoNetLoss import EmoNetLoss, create_emo_loss, create_au_loss from skimage.io import imread from skimage.transform import resize from pathlib import Path from inferno.models.Renderer import SRenderY from inferno.models.DecaEncoder import ResnetEncoder, SecondHeadResnet, SwinEncoder from inferno.models.DecaDecoder import Generator, GeneratorAdaIn from inferno.models.DecaFLAME import FLAME, FLAMETex, FLAME_mediapipe from inferno.models.EmotionMLP import EmotionMLP from inferno.datasets.AffWild2Dataset import Expression7 from inferno.datasets.AffectNetDataModule import AffectNetExpressions from inferno.utils.lightning_logging import _log_array_image, _log_wandb_image, _torch_image2np from enum import Enum from inferno.utils.other import class_from_str, get_path_to_assets from inferno.layers.losses.VGGLoss import VGG19Loss from omegaconf import OmegaConf, open_dict from inferno.models.temporal.external.LipReadingLoss import LipReadingLoss from .StarGAN import StarGANWrapper from inferno.models.EmoNetRegressor import EmoNetRegressor, EmonetRegressorStatic from .mica.config import get_cfg_defaults from .mica.mica import MICA from .mica.MicaInputProcessing import MicaInputProcessor from inferno.utils.other import get_path_to_assets from inferno.models.IO import locate_checkpoint
19,545
uv_z = uv_z * self.uv_face_eye_mask # detail vertices = coarse vertice + predicted displacement*normals + fixed displacement*normals uv_detail_vertices = uv_coarse_vertices + \ uv_z * uv_coarse_normals + \ self.fixed_uv_dis[None, None, :,:] * uv_coarse_normals #.detach() dense_vertices = uv_detail_vertices.permute(0, 2, 3, 1).reshape([batch_size, -1, 3]) uv_detail_normals = util.vertex_normals(dense_vertices, self.render.dense_faces.expand(batch_size, -1, -1)) uv_detail_normals = uv_detail_normals.reshape( [batch_size, uv_coarse_vertices.shape[2], uv_coarse_vertices.shape[3], 3]).permute(0, 3, 1, 2) # uv_detail_normals = uv_detail_normals*self.uv_face_eye_mask + uv_coarse_normals*(1-self.uv_face_eye_mask) # uv_detail_normals = util.gaussian_blur(uv_detail_normals) return uv_detail_normals, uv_coarse_vertices def visualize(self, visdict, savepath, catdim=1): grids = {} for key in visdict: # print(key) if visdict[key] is None: continue grids[key] = torchvision.utils.make_grid( F.interpolate(visdict[key], [self.config.image_size, self.config.image_size])).detach().cpu() grid = torch.cat(list(grids.values()), catdim) grid_image = (grid.numpy().transpose(1, 2, 0).copy() * 255)[:, :, [2, 1, 0]] grid_image = np.minimum(np.maximum(grid_image, 0), 255).astype(np.uint8) if savepath is not None: cv2.imwrite(savepath, grid_image) return grid_image def create_mesh(self, opdict, dense_template): ''' vertices: [nv, 3], tensor texture: [3, h, w], tensor ''' i = 0 vertices = opdict['verts'][i].cpu().numpy() faces = self.render.faces[0].cpu().numpy() if 'uv_texture_gt' in opdict.keys(): texture = util.tensor2image(opdict['uv_texture_gt'][i]) else: texture = None uvcoords = self.render.raw_uvcoords[0].cpu().numpy() uvfaces = self.render.uvfaces[0].cpu().numpy() # save coarse mesh, with texture and normal map if 'uv_detail_normals' in opdict.keys(): normal_map = util.tensor2image(opdict['uv_detail_normals'][i]*0.5 + 0.5) # upsample mesh, save detailed mesh texture = texture[:, :, [2, 1, 0]] normals = opdict['normals'][i].cpu().numpy() displacement_map = opdict['displacement_map'][i].detach().cpu().numpy().squeeze() dense_vertices, dense_colors, dense_faces = util.upsample_mesh(vertices, normals, faces, displacement_map, texture, dense_template) else: normal_map = None dense_vertices = None dense_colors = None dense_faces = None return vertices, faces, texture, uvcoords, uvfaces, normal_map, dense_vertices, dense_faces, dense_colors def save_obj(self, filename, opdict, dense_template, mode ='detail'): if mode not in ['coarse', 'detail', 'both']: raise ValueError(f"Invalid mode '{mode}. Expected modes are: 'coarse', 'detail', 'both'") vertices, faces, texture, uvcoords, uvfaces, normal_map, dense_vertices, dense_faces, dense_colors \ = self.create_mesh(opdict, dense_template) if mode == 'both': if isinstance(filename, list): filename_coarse = filename[0] filename_detail = filename[1] else: filename_coarse = filename filename_detail = filename.replace('.obj', '_detail.obj') elif mode == 'coarse': filename_coarse = filename else: filename_detail = filename if mode in ['coarse', 'both']: util.write_obj(str(filename_coarse), vertices, faces, texture=texture, uvcoords=uvcoords, uvfaces=uvfaces, normal_map=normal_map) if mode in ['detail', 'both']: util.write_obj(str(filename_detail), dense_vertices, dense_faces, colors = dense_colors, inverse_face_order=True) class ExpDECAInterface(object): """ This serves as an interface for EMOCA-like classes that need to use a different sub class but retain the EMOCA functionality. See EMICA_v2 for an example. """ def _create_model(self): # E_flame should be fixed for expression EMOCA self.E_flame.requires_grad_(False) # 2) add expression decoder if self.config.expression_backbone == 'deca_parallel': ## a) Attach a parallel flow of FCs onto the original DECA coarse backbone. (Only the second FC head is trainable) self.E_expression = SecondHeadResnet(self.E_flame, self.n_exp_param, 'same') elif self.config.expression_backbone == 'deca_clone': ## b) Clones the original DECA coarse decoder (and the entire decoder will be trainable) - This is in final EMOCA. #TODO this will only work for Resnet. Make this work for the other backbones (Swin) as well. self.E_expression = ResnetEncoder(self.n_exp_param) # clone parameters of the ResNet self.E_expression.encoder.load_state_dict(self.E_flame.encoder.state_dict()) elif self.config.expression_backbone == 'emonet_trainable': # Trainable EmoNet instead of Resnet (deprecated)
""" Author: Radek Danecek Copyright (c) 2022, Radek Danecek All rights reserved. # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is # holder of all proprietary rights on this computer program. # Using this computer program means that you agree to the terms # in the LICENSE file included with this software distribution. # Any use not explicitly granted by the LICENSE is prohibited. # # Copyright©2022 Max-Planck-Gesellschaft zur Förderung # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute # for Intelligent Systems. All rights reserved. # # For comments or questions, please email us at [email protected] # For commercial licensing contact, please contact [email protected] Parts of the code were adapted from the original DECA release: https://github.com/YadiraF/DECA/ """ # from time import time torch.backends.cudnn.benchmark = True class DecaMode(Enum): COARSE = 1 # when switched on, only coarse part of DECA-based networks is used DETAIL = 2 # when switched on, only coarse and detail part of DECA-based networks is used class DecaModule(LightningModule): """ DecaModule is a PL module that implements DECA-inspired face reconstruction networks. """ def __init__(self, model_params, learning_params, inout_params, stage_name = ""): """ :param model_params: a DictConfig of parameters about the model itself :param learning_params: a DictConfig of parameters corresponding to the learning process (such as optimizer, lr and others) :param inout_params: a DictConfig of parameters about input and output (where checkpoints and visualizations are saved) """ super().__init__() self.learning_params = learning_params self.inout_params = inout_params # detail conditioning - what is given as the conditioning input to the detail generator in detail stage training if 'detail_conditioning' not in model_params.keys(): # jaw, expression and detail code by default self.detail_conditioning = ['jawpose', 'expression', 'detail'] OmegaConf.set_struct(model_params, True) with open_dict(model_params): model_params.detail_conditioning = self.detail_conditioning else: self.detail_conditioning = model_params.detail_conditioning # deprecated and is not used if 'detailemo_conditioning' not in model_params.keys(): self.detailemo_conditioning = [] OmegaConf.set_struct(model_params, True) with open_dict(model_params): model_params.detailemo_conditioning = self.detailemo_conditioning else: self.detailemo_conditioning = model_params.detailemo_conditioning supported_conditioning_keys = ['identity', 'jawpose', 'expression', 'detail', 'detailemo'] for c in self.detail_conditioning: if c not in supported_conditioning_keys: raise ValueError(f"Conditioning on '{c}' is not supported. Supported conditionings: {supported_conditioning_keys}") for c in self.detailemo_conditioning: if c not in supported_conditioning_keys: raise ValueError(f"Conditioning on '{c}' is not supported. Supported conditionings: {supported_conditioning_keys}") # which type of DECA network is used if 'deca_class' not in model_params.keys() or model_params.deca_class is None: print(f"Deca class is not specified. Defaulting to {str(DECA.__class__.__name__)}") # vanilla DECA by default (not EMOCA) deca_class = DECA else: # other type of DECA-inspired networks possible (such as ExpDECA, which is what EMOCA) deca_class = class_from_str(model_params.deca_class, sys.modules[__name__]) # instantiate the network self.deca = deca_class(config=model_params) self.mode = DecaMode[str(model_params.mode).upper()] self.stage_name = stage_name if self.stage_name is None: self.stage_name = "" if len(self.stage_name) > 0: self.stage_name += "_" # initialize the emotion perceptual loss (used for EMOCA supervision) self.emonet_loss = None self._init_emotion_loss() # initialize the au perceptual loss (not currently used in EMOCA) self.au_loss = None self._init_au_loss() # initialize the lip reading perceptual loss (not currently used in original EMOCA) self.lipread_loss = None self._init_lipread_loss() # MPL regressor from the encoded space to emotion labels (not used in EMOCA but could be used for direct emotion supervision) if 'mlp_emotion_predictor' in self.deca.config.keys(): # self._build_emotion_mlp(self.deca.config.mlp_emotion_predictor) self.emotion_mlp = EmotionMLP(self.deca.config.mlp_emotion_predictor, model_params) else: self.emotion_mlp = None def get_input_image_size(self): return (self.deca.config.image_size, self.deca.config.image_size) def _instantiate_deca(self, model_params): """ Instantiate the DECA network. """ # which type of DECA network is used if 'deca_class' not in model_params.keys() or model_params.deca_class is None: print(f"Deca class is not specified. Defaulting to {str(DECA.__class__.__name__)}") # vanilla DECA by default (not EMOCA) deca_class = DECA else: # other type of DECA-inspired networks possible (such as ExpDECA, which is what EMOCA) deca_class = class_from_str(model_params.deca_class, sys.modules[__name__]) # instantiate the network self.deca = deca_class(config=model_params) def _init_emotion_loss(self): """ Initialize the emotion perceptual loss (used for EMOCA supervision) """ if 'emonet_weight' in self.deca.config.keys() and bool(self.deca.config.get('emonet_model_path', False)): if self.emonet_loss is not None: emoloss_force_override = True if 'emoloss_force_override' in self.deca.config.keys() and self.deca.config.emoloss_force_override else False if self.emonet_loss.is_trainable(): if not emoloss_force_override: print("The old emonet loss is trainable and will not be overrided or replaced.") return # raise NotImplementedError("The old emonet loss was trainable. Changing a trainable loss is probably now " # "what you want implicitly. If you need this, use the '`'emoloss_force_override' config.") else: print("The old emonet loss is trainable but override is set so it will be replaced.") else: print("The old emonet loss is not trainable. It will be replaced.") if 'emonet_model_path' in self.deca.config.keys(): emonet_model_path = self.deca.config.emonet_model_path else: emonet_model_path=None # self.emonet_loss = EmoNetLoss(self.device, emonet=emonet_model_path) emoloss_trainable = True if 'emoloss_trainable' in self.deca.config.keys() and self.deca.config.emoloss_trainable else False emoloss_dual = True if 'emoloss_dual' in self.deca.config.keys() and self.deca.config.emoloss_dual else False normalize_features = self.deca.config.normalize_features if 'normalize_features' in self.deca.config.keys() else None emo_feat_loss = self.deca.config.emo_feat_loss if 'emo_feat_loss' in self.deca.config.keys() else None old_emonet_loss = self.emonet_loss self.emonet_loss = create_emo_loss(self.device, emoloss=emonet_model_path, trainable=emoloss_trainable, dual=emoloss_dual, normalize_features=normalize_features, emo_feat_loss=emo_feat_loss) if old_emonet_loss is not None and type(old_emonet_loss) != self.emonet_loss: print(f"The old emonet loss {old_emonet_loss.__class__.__name__} is replaced during reconfiguration by " f"new emotion loss {self.emonet_loss.__class__.__name__}") else: self.emonet_loss = None def _init_au_loss(self): """ Initialize the au perceptual loss (not currently used in EMOCA) """ if 'au_loss' in self.deca.config.keys(): if self.au_loss is not None: force_override = True if 'force_override' in self.deca.config.au_loss.keys() \ and self.deca.config.au_loss.force_override else False if self.au_loss.is_trainable(): if not force_override: print("The old AU loss is trainable and will not be overrided or replaced.") return # raise NotImplementedError("The old emonet loss was trainable. Changing a trainable loss is probably now " # "what you want implicitly. If you need this, use the '`'emoloss_force_override' config.") else: print("The old AU loss is trainable but override is set so it will be replaced.") else: print("The old AU loss is not trainable. It will be replaced.") old_au_loss = self.emonet_loss self.au_loss = create_au_loss(self.device, self.deca.config.au_loss) else: self.au_loss = None def _init_lipread_loss(self): """ Initialize the au perceptual loss (not currently used in EMOCA) """ if 'lipread_loss' in self.deca.config.keys() and self.deca.config.lipread_loss.get('load', True): if self.lipread_loss is not None: force_override = True if 'force_override' in self.deca.config.lipread_loss.keys() \ and self.deca.config.lipread_loss.force_override else False assert self.lipread_loss.is_trainable(), "Trainable lip reading loss is not supported yet." if self.lipread_loss.is_trainable(): if not force_override: print("The old lip reading loss is trainable and will not be overrided or replaced.") return # raise NotImplementedError("The old emonet loss was trainable. Changing a trainable loss is probably now " # "what you want implicitly. If you need this, use the '`'emoloss_force_override' config.") else: print("The old lip reading loss is trainable but override is set so it will be replaced.") else: print("The old lip reading loss is not trainable. It will be replaced.") # old_lipread_loss = self.emonet_loss self.lipread_loss = LipReadingLoss(self.device, self.deca.config.lipread_loss.lipread_loss) self.lipread_loss.eval() self.lipread_loss.requires_grad_(False) else: self.lipread_loss = None def reconfigure(self, model_params, inout_params, learning_params, stage_name="", downgrade_ok=False, train=True): """ Reconfigure the model. Usually used to switch between detail and coarse stages (which have separate configs) """ if (self.mode == DecaMode.DETAIL and model_params.mode != DecaMode.DETAIL) and not downgrade_ok: raise RuntimeError("You're switching the EMOCA mode from DETAIL to COARSE. Is this really what you want?!") self.inout_params = inout_params self.learning_params = learning_params if self.deca.__class__.__name__ != model_params.deca_class: old_deca_class = self.deca.__class__.__name__ state_dict = self.deca.state_dict() if 'deca_class' in model_params.keys(): deca_class = class_from_str(model_params.deca_class, sys.modules[__name__]) else: deca_class = DECA self.deca = deca_class(config=model_params) diff = set(state_dict.keys()).difference(set(self.deca.state_dict().keys())) if len(diff) > 0: raise RuntimeError(f"Some values from old state dict will not be used. This is probably not what you " f"want because it most likely means that the pretrained model's weights won't be used. " f"Maybe you messed up backbone compatibility (i.e. SWIN vs ResNet?) {diff}") ret = self.deca.load_state_dict(state_dict, strict=False) if len(ret.unexpected_keys) > 0: raise print(f"Unexpected keys: {ret.unexpected_keys}") missing_modules = set([s.split(".")[0] for s in ret.missing_keys]) print(f"Missing modules when upgrading from {old_deca_class} to {model_params.deca_class}:") print(missing_modules) else: self.deca._reconfigure(model_params) self._init_emotion_loss() self._init_au_loss() self.stage_name = stage_name if self.stage_name is None: self.stage_name = "" if len(self.stage_name) > 0: self.stage_name += "_" self.mode = DecaMode[str(model_params.mode).upper()] self.train(mode=train) print(f"EMOCA MODE RECONFIGURED TO: {self.mode}") if 'shape_contrain_type' in self.deca.config.keys() and str(self.deca.config.shape_constrain_type).lower() != 'none': shape_constraint = self.deca.config.shape_constrain_type else: shape_constraint = None if 'expression_constrain_type' in self.deca.config.keys() and str(self.deca.config.expression_constrain_type).lower() != 'none': expression_constraint = self.deca.config.expression_constrain_type else: expression_constraint = None if shape_constraint is not None and expression_constraint is not None: raise ValueError("Both shape constraint and expression constraint are active. This is probably not what we want.") def uses_texture(self): """ Check if the model uses texture """ return self.deca.uses_texture() def visualize(self, visdict, savepath, catdim=1): return self.deca.visualize(visdict, savepath, catdim) def train(self, mode: bool = True): # super().train(mode) # not necessary self.deca.train(mode) if self.emotion_mlp is not None: self.emotion_mlp.train(mode) if self.emonet_loss is not None: self.emonet_loss.eval() if self.deca.perceptual_loss is not None: self.deca.perceptual_loss.eval() if self.deca.id_loss is not None: self.deca.id_loss.eval() return self def to(self, *args, **kwargs): super().to(*args, **kwargs) return self def cuda(self, device=None): super().cuda(device) return self def cpu(self): super().cpu() return self def forward(self, batch): values = self.encode(batch, training=False) values = self.decode(values, training=False) return values def _unwrap_list(self, codelist): shapecode, texcode, expcode, posecode, cam, lightcode = codelist return shapecode, texcode, expcode, posecode, cam, lightcode def _unwrap_list_to_dict(self, codelist): shapecode, texcode, expcode, posecode, cam, lightcode = codelist return {'shape': shapecode, 'tex': texcode, 'exp': expcode, 'pose': posecode, 'cam': cam, 'light': lightcode} # return shapecode, texcode, expcode, posecode, cam, lightcode def _encode_flame(self, images, **kwargs): if self.mode == DecaMode.COARSE or \ (self.mode == DecaMode.DETAIL and self.deca.config.train_coarse): # forward pass with gradients (for coarse stage (used), or detail stage with coarse training (not used)) parameters = self.deca._encode_flame(images, **kwargs) elif self.mode == DecaMode.DETAIL: # in detail stage, the coarse forward pass does not need gradients with torch.no_grad(): parameters = self.deca._encode_flame(images, **kwargs) else: raise ValueError(f"Invalid EMOCA Mode {self.mode}") code_list, original_code = self.deca.decompose_code(parameters) # shapecode, texcode, expcode, posecode, cam, lightcode = code_list # return shapecode, texcode, expcode, posecode, cam, lightcode, original_code return code_list, original_code def _expression_ring_exchange(self, original_batch_size, K, expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, detailcode=None, detailemocode=None, exprw=None, lmk_mp=None, mica_images=None): """ Deprecated. Expression ring exchange is not used in EMOCA (nor DECA). """ new_order = np.array([np.random.permutation(K) + i * K for i in range(original_batch_size)]) new_order = new_order.flatten() expcode_new = expcode[new_order] ## append new shape code data expcode = torch.cat([expcode, expcode_new], dim=0) texcode = torch.cat([texcode, texcode], dim=0) shapecode = torch.cat([shapecode, shapecode], dim=0) globpose = posecode[..., :3] jawpose = posecode[..., 3:] if self.deca.config.expression_constrain_use_jaw_pose: jawpose_new = jawpose[new_order] jawpose = torch.cat([jawpose, jawpose_new], dim=0) else: jawpose = torch.cat([jawpose, jawpose], dim=0) if self.deca.config.expression_constrain_use_global_pose: globpose_new = globpose[new_order] globpose = torch.cat([globpose, globpose_new], dim=0) else: globpose = torch.cat([globpose, globpose], dim=0) if self.deca.config.expression_constrain_use_jaw_pose or self.deca.config.expression_constrain_use_global_pose: posecode = torch.cat([globpose, jawpose], dim=-1) # posecode_new = torch.cat([globpose, jawpose], dim=-1) else: # posecode_new = posecode # posecode_new = posecode posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) if lmk_mp is not None: lmk_mp = torch.cat([lmk_mp, lmk_mp], dim=0) masks = torch.cat([masks, masks], dim=0) # NOTE: # Here we could think about what makes sense to exchange # 1) Do we exchange all emotion GT (VA and expression) within the ring? # 2) Do we exchange only the GT on which the ring is constructed (AffectNet ring based on binned VA or expression or Emonet feature?) # note: if we use EmoMLP that goes from (expression, jawpose, detailcode) -> (v,a,expr) and we exchange # ALL of these, the EmoMLP prediction will of course be the same. The output image still changes, # so EmoNet loss (if used) would be different. Same for the photometric/landmark losses. # TODO: # For now I decided to exchange everything but this should probably be experimented with # I would argue though, that exchanging the GT is the right thing to do if va is not None: va = torch.cat([va, va[new_order]], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7[new_order]], dim=0) if affectnetexp is not None: affectnetexp = torch.cat([affectnetexp, affectnetexp[new_order]], dim=0) if exprw is not None: exprw = torch.cat([exprw, exprw[new_order]], dim=0) if detailcode is not None: #TODO: to exchange or not to exchange, that is the question, the answer is probably NO detailcode = torch.cat([detailcode, detailcode], dim=0) # detailcode = torch.cat([detailcode, detailcode[new_order]], dim=0) if detailemocode is not None: # TODO: to exchange or not to exchange, that is the question, the answer is probably YES detailemocode = torch.cat([detailemocode, detailemocode[new_order]], dim=0) return expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, \ detailcode, detailemocode, exprw, lmk_mp, mica_images # return expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7 def encode(self, batch, training=True) -> dict: """ Forward encoding pass of the model. Takes a batch of images and returns the corresponding latent codes for each image. :param batch: Batch of images to encode. batch['image'] [batch_size, ring_size, 3, image_size, image_size]. For a training forward pass, additional corresponding data are necessery such as 'landmarks' and 'masks'. For a testing pass, the images suffice. :param training: Whether the forward pass is for training or testing. """ codedict = {} original_batch_size = batch['image'].shape[0] images = batch['image'] if 'mica_images' in batch.keys(): mica_images = batch['mica_images'] else: mica_images = None if len(images.shape) == 5: K = images.shape[1] elif len(images.shape) == 4: K = 1 else: raise RuntimeError("Invalid image batch dimensions.") # [B, K, 3, size, size] ==> [BxK, 3, size, size] images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = mica_images.view(-1, mica_images.shape[-3], mica_images.shape[-2], mica_images.shape[-1]) if 'landmark' in batch.keys(): lmk = batch['landmark'] lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) if 'landmark_mediapipe' in batch.keys(): lmk_mp = batch['landmark_mediapipe'] lmk_mp = lmk_mp.view(-1, lmk_mp.shape[-2], lmk_mp.shape[-1]) else: lmk_mp = None if 'mask' in batch.keys(): masks = batch['mask'] masks = masks.view(-1, images.shape[-2], images.shape[-1]) # valence / arousal - not necessary unless we want to use VA for supervision (not done in EMOCA) if 'va' in batch: va = batch['va'] va = va.view(-1, va.shape[-1]) else: va = None # 7 basic expression - not necessary unless we want to use expression for supervision (not done in EMOCA or DECA) if 'expr7' in batch: expr7 = batch['expr7'] expr7 = expr7.view(-1, expr7.shape[-1]) else: expr7 = None # affectnet basic expression - not necessary unless we want to use expression for supervision (not done in EMOCA or DECA) if 'affectnetexp' in batch: affectnetexp = batch['affectnetexp'] affectnetexp = affectnetexp.view(-1, affectnetexp.shape[-1]) else: affectnetexp = None # expression weights if supervising by expression is used (to balance the classification loss) - not done in EMOCA or DECA if 'expression_weight' in batch: exprw = batch['expression_weight'] exprw = exprw.view(-1, exprw.shape[-1]) else: exprw = None # 1) COARSE STAGE # forward pass of the coarse encoder # shapecode, texcode, expcode, posecode, cam, lightcode = self._encode_flame(images) code, original_code = self._encode_flame(images, mica_image=mica_images) shapecode, texcode, expcode, posecode, cam, lightcode = self._unwrap_list(code) if original_code is not None: original_code = self._unwrap_list_to_dict(original_code) if training: # If training, we employ the disentanglement strategy if self.mode == DecaMode.COARSE: if self.deca.config.shape_constrain_type == 'same': ## Enforce that all identity shape codes within ring are the same. The batch is duplicated ## and the duplicated part's shape codes are shuffled. # reshape shapecode => [B, K, n_shape] # shapecode_idK = shapecode.view(self.batch_size, self.deca.K, -1) shapecode_idK = shapecode.view(original_batch_size, K, -1) # get mean id shapecode_mean = torch.mean(shapecode_idK, dim=[1]) # shapecode_new = shapecode_mean[:, None, :].repeat(1, self.deca.K, 1) shapecode_new = shapecode_mean[:, None, :].repeat(1, K, 1) shapecode = shapecode_new.view(-1, self.deca._get_num_shape_params()) # do the same for the original code dict shapecode_orig = original_code['shape'] shapecode_orig_idK = shapecode_orig.view(original_batch_size, K, -1) shapecode_orig_mean = torch.mean(shapecode_orig_idK, dim=[1]) shapecode_orig_new = shapecode_orig_mean[:, None, :].repeat(1, K, 1) original_code['shape'] = shapecode_orig_new.view(-1, self.deca._get_num_shape_params()) elif self.deca.config.shape_constrain_type == 'exchange': ## Shuffle identitys shape codes within ring (they should correspond to the same identity) ''' make sure s0, s1 is something to make shape close the difference from ||so - s1|| is the later encourage s0, s1 is cloase in l2 space, but not really ensure shape will be close ''' # new_order = np.array([np.random.permutation(self.deca.config.train_K) + i * self.deca.config.train_K for i in range(self.deca.config.batch_size_train)]) # new_order = np.array([np.random.permutation(self.deca.config.train_K) + i * self.deca.config.train_K for i in range(original_batch_size)]) new_order = np.array([np.random.permutation(K) + i * K for i in range(original_batch_size)]) new_order = new_order.flatten() shapecode_new = shapecode[new_order] ## append new shape code data shapecode = torch.cat([shapecode, shapecode_new], dim=0) texcode = torch.cat([texcode, texcode], dim=0) expcode = torch.cat([expcode, expcode], dim=0) posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) if lmk_mp is not None: lmk_mp = torch.cat([lmk_mp, lmk_mp], dim=0) masks = torch.cat([masks, masks], dim=0) if va is not None: va = torch.cat([va, va], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7], dim=0) # do the same for the original code dict shapecode_orig = original_code['shape'] shapecode_orig_new = shapecode_orig[new_order] original_code['shape'] = torch.cat([shapecode_orig, shapecode_orig_new], dim=0) original_code['tex'] = torch.cat([original_code['tex'], original_code['tex']], dim=0) original_code['exp'] = torch.cat([original_code['exp'], original_code['exp']], dim=0) original_code['pose'] = torch.cat([original_code['pose'], original_code['pose']], dim=0) original_code['cam'] = torch.cat([original_code['cam'], original_code['cam']], dim=0) original_code['light'] = torch.cat([original_code['light'], original_code['light']], dim=0) elif self.deca.config.shape_constrain_type == 'shuffle_expression': assert original_code is not None ## DEPRECATED, NOT USED IN EMOCA OR DECA new_order = np.random.permutation(K*original_batch_size) old_order = np.arange(K*original_batch_size) while (new_order == old_order).any(): # ugly hacky way of assuring that every element is permuted new_order = np.random.permutation(K * original_batch_size) codedict['new_order'] = new_order # exchange expression expcode_new = expcode[new_order] expcode = torch.cat([expcode, expcode_new], dim=0) # exchange jaw pose (but not global pose) global_pose = posecode[:, :3] jaw_pose = posecode[:, 3:] jaw_pose_new = jaw_pose[new_order] jaw_pose = torch.cat([jaw_pose, jaw_pose_new], dim=0) global_pose = torch.cat([global_pose, global_pose], dim=0) posecode = torch.cat([global_pose, jaw_pose], dim=1) ## duplicate the rest shapecode = torch.cat([shapecode, shapecode], dim=0) texcode = torch.cat([texcode, texcode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## duplicate gt if any images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) print(f"TRAINING: {training}") if lmk is not None: lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) if lmk_mp is not None: lmk_mp = torch.cat([lmk_mp, lmk_mp], dim=0) masks = torch.cat([masks, masks], dim=0) ref_images_identity_idxs = np.concatenate([old_order, old_order]) ref_images_expression_idxs = np.concatenate([old_order, new_order]) codedict["ref_images_identity_idxs"] = ref_images_identity_idxs codedict["ref_images_expression_idxs"] = ref_images_expression_idxs if va is not None: va = torch.cat([va, va[new_order]], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7[new_order]], dim=0) # do the same for the original code dict original_code['shape'] = torch.cat([original_code['shape'], original_code['shape']], dim=0) original_code['tex'] = torch.cat([original_code['tex'], original_code['tex']], dim=0) original_code['exp'] = torch.cat([original_code['exp'], original_code['exp'][new_order]], dim=0) original_global_pose = original_code['pose'][:, :3] original_jaw_pose = original_code['pose'][:, 3:] original_jaw_pose = torch.cat([original_jaw_pose, original_jaw_pose[new_order]], dim=0) original_global_pose = torch.cat([original_global_pose, original_global_pose], dim=0) original_code['pose'] = torch.cat([original_global_pose, original_jaw_pose], dim=1) original_code['cam'] = torch.cat([original_code['cam'], original_code['cam']], dim=0) original_code['light'] = torch.cat([original_code['light'], original_code['light']], dim=0) elif self.deca.config.shape_constrain_type == 'shuffle_shape': ## The shape codes are shuffled without duplication new_order = np.random.permutation(K*original_batch_size) old_order = np.arange(K*original_batch_size) while (new_order == old_order).any(): # ugly hacky way of assuring that every element is permuted new_order = np.random.permutation(K * original_batch_size) codedict['new_order'] = new_order shapecode_new = shapecode[new_order] ## append new shape code data shapecode = torch.cat([shapecode, shapecode_new], dim=0) texcode = torch.cat([texcode, texcode], dim=0) expcode = torch.cat([expcode, expcode], dim=0) posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) if lmk is not None: lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) masks = torch.cat([masks, masks], dim=0) ref_images_identity_idxs = np.concatenate([old_order, new_order]) ref_images_expression_idxs = np.concatenate([old_order, old_order]) codedict["ref_images_identity_idxs"] = ref_images_identity_idxs codedict["ref_images_expression_idxs"] = ref_images_expression_idxs if va is not None: va = torch.cat([va, va], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7], dim=0) # do the same for the original code dict shapecode_orig = original_code['shape'] shapecode_orig_new = shapecode_orig[new_order] original_code['shape'] = torch.cat([shapecode_orig, shapecode_orig_new], dim=0) original_code['tex'] = torch.cat([original_code['tex'], original_code['tex']], dim=0) original_code['exp'] = torch.cat([original_code['exp'], original_code['exp']], dim=0) original_code['pose'] = torch.cat([original_code['pose'], original_code['pose']], dim=0) original_code['cam'] = torch.cat([original_code['cam'], original_code['cam']], dim=0) original_code['light'] = torch.cat([original_code['light'], original_code['light']], dim=0) original_code['ref_images_identity_idxs'] = ref_images_identity_idxs original_code['ref_images_expression_idxs'] = ref_images_expression_idxs elif 'expression_constrain_type' in self.deca.config.keys() and \ self.deca.config.expression_constrain_type == 'same': ## NOT USED IN EMOCA OR DECA, deprecated # reshape shapecode => [B, K, n_shape] # shapecode_idK = shapecode.view(self.batch_size, self.deca.K, -1) expcode_idK = expcode.view(original_batch_size, K, -1) # get mean id expcode_mean = torch.mean(expcode_idK, dim=[1]) # shapecode_new = shapecode_mean[:, None, :].repeat(1, self.deca.K, 1) expcode_new = expcode_mean[:, None, :].repeat(1, K, 1) expcode = expcode_new.view(-1, self.deca._get_num_shape_params()) # do the same thing for the original code dict expcode_idK = original_code['exp'].view(original_batch_size, K, -1) expcode_mean = torch.mean(expcode_idK, dim=[1]) expcode_new = expcode_mean[:, None, :].repeat(1, K, 1) original_code['exp'] = expcode_new.view(-1, self.deca._get_num_shape_params()) elif 'expression_constrain_type' in self.deca.config.keys() and \ self.deca.config.expression_constrain_type == 'exchange': ## NOT USED IN EMOCA OR DECA, deprecated expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, \ masks, va, expr7, affectnetexp, _, _, exprw, lmk_mp, mica_images = \ self._expression_ring_exchange(original_batch_size, K, expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, None, None, exprw, lmk_mp, mica_images) # (self, original_batch_size, K, # expcode, posecode, shapecode, lightcode, texcode, # images, cam, lmk, masks, va, expr7, affectnetexp, # detailcode=None, detailemocode=None, exprw=None): # 2) DETAIL STAGE if self.mode == DecaMode.DETAIL: all_detailcode = self.deca.E_detail(images) # identity-based detail code detailcode = all_detailcode[:, :self.deca.n_detail] # detail emotion code is deprecated and will be empty detailemocode = all_detailcode[:, self.deca.n_detail:(self.deca.n_detail + self.deca.n_detail_emo)] if training: # If training, we employ the disentanglement strategy if self.deca.config.detail_constrain_type == 'exchange': # Identity within the same ring should be the same, so they should have the same code. # This can be enforced by shuffling. The batch is duplicated and the duplicated part's code shuffled ''' make sure s0, s1 is something to make shape close the difference from ||so - s1|| is the later encourage s0, s1 is cloase in l2 space, but not really ensure shape will be close ''' # this creates a per-ring random permutation. The detail exchange happens ONLY between the same # identities (within the ring) but not outside (no cross-identity detail exchange) new_order = np.array( # [np.random.permutation(self.deca.config.train_K) + i * self.deca.config.train_K for i in range(original_batch_size)]) [np.random.permutation(K) + i * K for i in range(original_batch_size)]) new_order = new_order.flatten() detailcode_new = detailcode[new_order] detailcode = torch.cat([detailcode, detailcode_new], dim=0) detailemocode = torch.cat([detailemocode, detailemocode], dim=0) ## append new shape code data shapecode = torch.cat([shapecode, shapecode], dim=0) texcode = torch.cat([texcode, texcode], dim=0) expcode = torch.cat([expcode, expcode], dim=0) posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) masks = torch.cat([masks, masks], dim=0) if va is not None: va = torch.cat([va, va], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7], dim=0) elif self.deca.config.detail_constrain_type == 'shuffle_expression': ## Deprecated and not used in EMOCA or DECA new_order = np.random.permutation(K*original_batch_size) old_order = np.arange(K*original_batch_size) while (new_order == old_order).any(): # ugly hacky way of assuring that every element is permuted new_order = np.random.permutation(K * original_batch_size) codedict['new_order'] = new_order # exchange expression expcode_new = expcode[new_order] expcode = torch.cat([expcode, expcode_new], dim=0) # exchange emotion code, but not (identity-based) detailcode detailemocode_new = detailemocode[new_order] detailemocode = torch.cat([detailemocode, detailemocode_new], dim=0) detailcode = torch.cat([detailcode, detailcode], dim=0) # exchange jaw pose (but not global pose) global_pose = posecode[:, :3] jaw_pose = posecode[:, 3:] jaw_pose_new = jaw_pose[new_order] jaw_pose = torch.cat([jaw_pose, jaw_pose_new], dim=0) global_pose = torch.cat([global_pose, global_pose], dim=0) posecode = torch.cat([global_pose, jaw_pose], dim=1) ## duplicate the rest shapecode = torch.cat([shapecode, shapecode], dim=0) texcode = torch.cat([texcode, texcode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## duplicate gt if any images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) print(f"TRAINING: {training}") if lmk is not None: lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) masks = torch.cat([masks, masks], dim=0) ref_images_identity_idxs = np.concatenate([old_order, old_order]) ref_images_expression_idxs = np.concatenate([old_order, new_order]) codedict["ref_images_identity_idxs"] = ref_images_identity_idxs codedict["ref_images_expression_idxs"] = ref_images_expression_idxs if va is not None: va = torch.cat([va, va[new_order]], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7[new_order]], dim=0) elif self.deca.config.detail_constrain_type == 'shuffle_shape': ## Shuffles teh shape code without duplicating the batch new_order = np.random.permutation(K*original_batch_size) old_order = np.arange(K*original_batch_size) while (new_order == old_order).any(): # ugly hacky way of assuring that every element is permuted new_order = np.random.permutation(K * original_batch_size) codedict['new_order'] = new_order shapecode_new = shapecode[new_order] ## append new shape code data shapecode = torch.cat([shapecode, shapecode_new], dim=0) # exchange (identity-based) detailcode, but not emotion code detailcode_new = detailcode[new_order] detailcode = torch.cat([detailcode, detailcode_new], dim=0) detailemocode = torch.cat([detailemocode, detailemocode], dim=0) texcode = torch.cat([texcode, texcode], dim=0) expcode = torch.cat([expcode, expcode], dim=0) posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) if lmk is not None: lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) masks = torch.cat([masks, masks], dim=0) ref_images_identity_idxs = np.concatenate([old_order, new_order]) ref_images_expression_idxs = np.concatenate([old_order, old_order]) codedict["ref_images_identity_idxs"] = ref_images_identity_idxs codedict["ref_images_expression_idxs"] = ref_images_expression_idxs if va is not None: va = torch.cat([va, va], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7], dim=0) elif 'expression_constrain_type' in self.deca.config.keys() and \ self.deca.config.expression_constrain_type == 'exchange': expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, detailcode, detailemocode, exprw, lmk_mp, mica_images = \ self._expression_ring_exchange(original_batch_size, K, expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, detailcode, detailemocode, exprw, lmk_mp, mica_images) codedict['shapecode'] = shapecode codedict['texcode'] = texcode codedict['expcode'] = expcode codedict['posecode'] = posecode codedict['cam'] = cam codedict['lightcode'] = lightcode if self.mode == DecaMode.DETAIL: codedict['detailcode'] = detailcode codedict['detailemocode'] = detailemocode codedict['images'] = images if mica_images is not None: codedict['mica_images'] = mica_images if 'mask' in batch.keys(): codedict['masks'] = masks if 'landmark' in batch.keys(): codedict['lmk'] = lmk if lmk_mp is not None: codedict['lmk_mp'] = lmk_mp if 'va' in batch.keys(): codedict['va'] = va if 'expr7' in batch.keys(): codedict['expr7'] = expr7 if 'affectnetexp' in batch.keys(): codedict['affectnetexp'] = affectnetexp if 'expression_weight' in batch.keys(): codedict['expression_weight'] = exprw if original_code is not None: codedict['original_code'] = original_code return codedict def _create_conditioning_lists(self, codedict, condition_list): detail_conditioning_list = [] if 'globalpose' in condition_list: detail_conditioning_list += [codedict["posecode"][:, :3]] if 'jawpose' in condition_list: detail_conditioning_list += [codedict["posecode"][:, 3:]] if 'identity' in condition_list: detail_conditioning_list += [codedict["shapecode"]] if 'expression' in condition_list: detail_conditioning_list += [codedict["expcode"]] if isinstance(self.deca.D_detail, Generator): # the detail codes might be excluded from conditioning based on the Generator architecture (for instance # for AdaIn Generator) if 'detail' in condition_list: detail_conditioning_list += [codedict["detailcode"]] if 'detailemo' in condition_list: detail_conditioning_list += [codedict["detailemocode"]] return detail_conditioning_list def decode(self, codedict, training=True, render=True, **kwargs) -> dict: """ Forward decoding pass of the model. Takes the latent code predicted by the encoding stage and reconstructs and renders the shape. :param codedict: Batch dict of the predicted latent codes :param training: Whether the forward pass is for training or testing. """ shapecode = codedict['shapecode'] expcode = codedict['expcode'] posecode = codedict['posecode'] texcode = codedict['texcode'] cam = codedict['cam'] lightcode = codedict['lightcode'] images = codedict['images'] if 'masks' in codedict.keys(): masks = codedict['masks'] else: masks = None effective_batch_size = images.shape[0] # this is the current batch size after all training augmentations modifications # 1) Reconstruct the face mesh # FLAME - world space if not isinstance(self.deca.flame, FLAME_mediapipe): verts, landmarks2d, landmarks3d = self.deca.flame(shape_params=shapecode, expression_params=expcode, pose_params=posecode) landmarks2d_mediapipe = None else: verts, landmarks2d, landmarks3d, landmarks2d_mediapipe = self.deca.flame(shapecode, expcode, posecode) # world to camera trans_verts = util.batch_orth_proj(verts, cam) predicted_landmarks = util.batch_orth_proj(landmarks2d, cam)[:, :, :2] # camera to image space trans_verts[:, :, 1:] = -trans_verts[:, :, 1:] predicted_landmarks[:, :, 1:] = - predicted_landmarks[:, :, 1:] if landmarks2d_mediapipe is not None: predicted_landmarks_mediapipe = util.batch_orth_proj(landmarks2d_mediapipe, cam)[:, :, :2] predicted_landmarks_mediapipe[:, :, 1:] = - predicted_landmarks_mediapipe[:, :, 1:] if self.uses_texture(): albedo = self.deca.flametex(texcode) else: # if not using texture, default to gray albedo = torch.ones([effective_batch_size, 3, self.deca.config.uv_size, self.deca.config.uv_size], device=images.device) * 0.5 # 2) Render the coarse image if render: ops = self.deca.render(verts, trans_verts, albedo, lightcode) # mask mask_face_eye = F.grid_sample(self.deca.uv_face_eye_mask.expand(effective_batch_size, -1, -1, -1), ops['grid'].detach(), align_corners=False) # images predicted_images = ops['images'] # predicted_images = ops['images'] * mask_face_eye * ops['alpha_images'] # predicted_images_no_mask = ops['images'] #* mask_face_eye * ops['alpha_images'] segmentation_type = None if isinstance(self.deca.config.useSeg, bool): if self.deca.config.useSeg: segmentation_type = 'gt' else: segmentation_type = 'rend' elif isinstance(self.deca.config.useSeg, str): segmentation_type = self.deca.config.useSeg else: raise RuntimeError(f"Invalid 'useSeg' type: '{type(self.deca.config.useSeg)}'") if segmentation_type not in ["gt", "rend", "intersection", "union"]: raise ValueError(f"Invalid segmentation type for masking '{segmentation_type}'") if masks is None: # if mask not provided, the only mask available is the rendered one segmentation_type = 'rend' elif masks.shape[-1] != predicted_images.shape[-1] or masks.shape[-2] != predicted_images.shape[-2]: # resize masks if need be (this is only done if configuration was changed at some point after training) dims = masks.ndim == 3 if dims: masks = masks[:, None, :, :] masks = F.interpolate(masks, size=predicted_images.shape[-2:], mode='bilinear') if dims: masks = masks[:, 0, ...] # resize images if need be (this is only done if configuration was changed at some point after training) if images.shape[-1] != predicted_images.shape[-1] or images.shape[-2] != predicted_images.shape[-2]: ## special case only for inference time if the rendering image sizes have been changed images_resized = F.interpolate(images, size=predicted_images.shape[-2:], mode='bilinear') else: images_resized = images # what type of segmentation we use if segmentation_type == "gt": # GT stands for external segmetnation predicted by face parsing or similar masks = masks[:, None, :, :] elif segmentation_type == "rend": # mask rendered as a silhouette of the face mesh masks = mask_face_eye * ops['alpha_images'] elif segmentation_type == "intersection": # intersection of the two above masks = masks[:, None, :, :] * mask_face_eye * ops['alpha_images'] elif segmentation_type == "union": # union of the first two options masks = torch.max(masks[:, None, :, :], mask_face_eye * ops['alpha_images']) else: raise RuntimeError(f"Invalid segmentation type for masking '{segmentation_type}'") if self.deca.config.background_from_input in [True, "input"]: if images.shape[-1] != predicted_images.shape[-1] or images.shape[-2] != predicted_images.shape[-2]: ## special case only for inference time if the rendering image sizes have been changed predicted_images = (1. - masks) * images_resized + masks * predicted_images else: predicted_images = (1. - masks) * images + masks * predicted_images elif self.deca.config.background_from_input in [False, "black"]: predicted_images = masks * predicted_images elif self.deca.config.background_from_input in ["none"]: predicted_images = predicted_images else: raise ValueError(f"Invalid type of background modification {self.deca.config.background_from_input}") # 3) Render the detail image if self.mode == DecaMode.DETAIL: detailcode = codedict['detailcode'] detailemocode = codedict['detailemocode'] # a) Create the detail conditioning lists detail_conditioning_list = self._create_conditioning_lists(codedict, self.detail_conditioning) detailemo_conditioning_list = self._create_conditioning_lists(codedict, self.detailemo_conditioning) final_detail_conditioning_list = detail_conditioning_list + detailemo_conditioning_list # b) Pass the detail code and the conditions through the detail generator to get displacement UV map if isinstance(self.deca.D_detail, Generator): uv_z = self.deca.D_detail(torch.cat(final_detail_conditioning_list, dim=1)) elif isinstance(self.deca.D_detail, GeneratorAdaIn): uv_z = self.deca.D_detail(z=torch.cat([detailcode, detailemocode], dim=1), cond=torch.cat(final_detail_conditioning_list, dim=1)) else: raise ValueError(f"This class of generarator is not supported: '{self.deca.D_detail.__class__.__name__}'") # if there is a displacement mask, apply it (DEPRECATED and not USED in DECA or EMOCA) if hasattr(self.deca, 'displacement_mask') and self.deca.displacement_mask is not None: if 'apply_displacement_masks' in self.deca.config.keys() and self.deca.config.apply_displacement_masks: uv_z = uv_z * self.deca.displacement_mask # uv_z = self.deca.D_detail(torch.cat([posecode[:, 3:], expcode, detailcode], dim=1)) # render detail if render: detach_from_coarse_geometry = not self.deca.config.train_coarse uv_detail_normals, uv_coarse_vertices = self.deca.displacement2normal(uv_z, verts, ops['normals'], detach=detach_from_coarse_geometry) uv_shading = self.deca.render.add_SHlight(uv_detail_normals, lightcode.detach()) uv_texture = albedo.detach() * uv_shading # batch size X image_rows X image_cols X 2 # you can query the grid for UV values of the face mesh at pixel locations grid = ops['grid'] if detach_from_coarse_geometry: # if the grid is detached, the gradient of the positions of UV-values in image space won't flow back to the geometry grid = grid.detach() predicted_detailed_image = F.grid_sample(uv_texture, grid, align_corners=False) if self.deca.config.background_from_input in [True, "input"]: if images.shape[-1] != predicted_images.shape[-1] or images.shape[-2] != predicted_images.shape[-2]: ## special case only for inference time if the rendering image sizes have been changed # images_resized = F.interpolate(images, size=predicted_images.shape[-2:], mode='bilinear') ## before bugfix # predicted_images = (1. - masks) * images_resized + masks * predicted_images ## after bugfix predicted_detailed_image = (1. - masks) * images_resized + masks * predicted_detailed_image else: predicted_detailed_image = (1. - masks) * images + masks * predicted_detailed_image elif self.deca.config.background_from_input in [False, "black"]: predicted_detailed_image = masks * predicted_detailed_image elif self.deca.config.background_from_input in ["none"]: predicted_detailed_image = predicted_detailed_image else: raise ValueError(f"Invalid type of background modification {self.deca.config.background_from_input}") # --- extract texture uv_pverts = self.deca.render.world2uv(trans_verts).detach() uv_gt = F.grid_sample(torch.cat([images_resized, masks], dim=1), uv_pverts.permute(0, 2, 3, 1)[:, :, :, :2], mode='bilinear') uv_texture_gt = uv_gt[:, :3, :, :].detach() uv_mask_gt = uv_gt[:, 3:, :, :].detach() # self-occlusion normals = util.vertex_normals(trans_verts, self.deca.render.faces.expand(effective_batch_size, -1, -1)) uv_pnorm = self.deca.render.world2uv(normals) uv_mask = (uv_pnorm[:, -1, :, :] < -0.05).float().detach() uv_mask = uv_mask[:, None, :, :] ## combine masks uv_vis_mask = uv_mask_gt * uv_mask * self.deca.uv_face_eye_mask else: uv_detail_normals = None predicted_detailed_image = None ## 4) (Optional) NEURAL RENDERING - not used in neither DECA nor EMOCA # If neural rendering is enabled, the differentiable rendered synthetic images are translated using an image translation net (such as StarGan) predicted_translated_image = None predicted_detailed_translated_image = None translated_uv_texture = None if render: if self.deca._has_neural_rendering(): predicted_translated_image = self.deca.image_translator( { "input_image" : predicted_images, "ref_image" : images, "target_domain" : torch.tensor([0]*predicted_images.shape[0], dtype=torch.int64, device=predicted_images.device) } ) if self.mode == DecaMode.DETAIL: predicted_detailed_translated_image = self.deca.image_translator( { "input_image" : predicted_detailed_image, "ref_image" : images, "target_domain" : torch.tensor([0]*predicted_detailed_image.shape[0], dtype=torch.int64, device=predicted_detailed_image.device) } ) translated_uv = F.grid_sample(torch.cat([predicted_detailed_translated_image, masks], dim=1), uv_pverts.permute(0, 2, 3, 1)[:, :, :, :2], mode='bilinear') translated_uv_texture = translated_uv[:, :3, :, :].detach() else: predicted_detailed_translated_image = None translated_uv_texture = None # no need in coarse mode # translated_uv = F.grid_sample(torch.cat([predicted_translated_image, masks], dim=1), uv_pverts.permute(0, 2, 3, 1)[:, :, :, :2], # mode='bilinear') # translated_uv_texture = translated_uv_gt[:, :3, :, :].detach() if self.emotion_mlp is not None: codedict = self.emotion_mlp(codedict, "emo_mlp_") # populate the value dict for metric computation/visualization if render: codedict['predicted_images'] = predicted_images codedict['predicted_detailed_image'] = predicted_detailed_image codedict['predicted_translated_image'] = predicted_translated_image codedict['ops'] = ops codedict['normals'] = ops['normals'] codedict['mask_face_eye'] = mask_face_eye codedict['verts'] = verts codedict['albedo'] = albedo codedict['landmarks2d'] = landmarks2d codedict['landmarks3d'] = landmarks3d codedict['predicted_landmarks'] = predicted_landmarks if landmarks2d_mediapipe is not None: codedict['predicted_landmarks_mediapipe'] = predicted_landmarks_mediapipe codedict['trans_verts'] = trans_verts codedict['masks'] = masks if self.mode == DecaMode.DETAIL: if render: codedict['predicted_detailed_translated_image'] = predicted_detailed_translated_image codedict['translated_uv_texture'] = translated_uv_texture codedict['uv_texture_gt'] = uv_texture_gt codedict['uv_texture'] = uv_texture codedict['uv_detail_normals'] = uv_detail_normals codedict['uv_shading'] = uv_shading codedict['uv_vis_mask'] = uv_vis_mask codedict['uv_mask'] = uv_mask codedict['uv_z'] = uv_z codedict['displacement_map'] = uv_z + self.deca.fixed_uv_dis[None, None, :, :] return codedict def _compute_emotion_loss(self, images, predicted_images, loss_dict, metric_dict, prefix, va=None, expr7=None, with_grad=True, batch_size=None, ring_size=None): def loss_or_metric(name, loss, is_loss): if not is_loss: metric_dict[name] = loss else: loss_dict[name] = loss # if self.deca.config.use_emonet_loss: if with_grad: d = loss_dict emo_feat_loss_1, emo_feat_loss_2, valence_loss, arousal_loss, expression_loss, au_loss = \ self.emonet_loss.compute_loss(images, predicted_images, batch_size=batch_size, ring_size=ring_size) else: d = metric_dict with torch.no_grad(): emo_feat_loss_1, emo_feat_loss_2, valence_loss, arousal_loss, expression_loss, au_loss = \ self.emonet_loss.compute_loss(images, predicted_images, batch_size=batch_size, ring_size=ring_size) # EmoNet self-consistency loss terms if emo_feat_loss_1 is not None: loss_or_metric(prefix + '_emonet_feat_1_L1', emo_feat_loss_1 * self.deca.config.emonet_weight, self.deca.config.use_emonet_feat_1 and self.deca.config.use_emonet_loss) loss_or_metric(prefix + '_emonet_feat_2_L1', emo_feat_loss_2 * self.deca.config.emonet_weight, self.deca.config.use_emonet_feat_2 and self.deca.config.use_emonet_loss) loss_or_metric(prefix + '_emonet_valence_L1', valence_loss * self.deca.config.emonet_weight, self.deca.config.use_emonet_valence and self.deca.config.use_emonet_loss) loss_or_metric(prefix + '_emonet_arousal_L1', arousal_loss * self.deca.config.emonet_weight, self.deca.config.use_emonet_arousal and self.deca.config.use_emonet_loss) # loss_or_metric(prefix + 'emonet_expression_KL', expression_loss * self.deca.config.emonet_weight) # KL seems to be causing NaN's loss_or_metric(prefix + '_emonet_expression_L1',expression_loss * self.deca.config.emonet_weight, self.deca.config.use_emonet_expression and self.deca.config.use_emonet_loss) loss_or_metric(prefix + '_emonet_combined', ((emo_feat_loss_1 if emo_feat_loss_1 is not None else 0) + emo_feat_loss_2 + valence_loss + arousal_loss + expression_loss) * self.deca.config.emonet_weight, self.deca.config.use_emonet_combined and self.deca.config.use_emonet_loss) # Log also the VA metric_dict[prefix + "_valence_input"] = self.emonet_loss.input_emotion['valence'].mean().detach() metric_dict[prefix + "_valence_output"] = self.emonet_loss.output_emotion['valence'].mean().detach() metric_dict[prefix + "_arousal_input"] = self.emonet_loss.input_emotion['arousal'].mean().detach() metric_dict[prefix + "_arousal_output"] = self.emonet_loss.output_emotion['arousal'].mean().detach() input_ex = self.emonet_loss.input_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'].detach().cpu().numpy() input_ex = np.argmax(input_ex, axis=1).mean() output_ex = self.emonet_loss.output_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'].detach().cpu().numpy() output_ex = np.argmax(output_ex, axis=1).mean() metric_dict[prefix + "_expression_input"] = torch.tensor(input_ex, device=self.device) metric_dict[prefix + "_expression_output"] = torch.tensor(output_ex, device=self.device) # # GT emotion loss terms # if self.deca.config.use_gt_emotion_loss: # d = loss_dict # else: # d = metric_dict # TODO: uncomment this after you handle the case when certain entries are NaN (GT missing, not a bug) # if va is not None: # d[prefix + 'emo_sup_val_L1'] = F.l1_loss(self.emonet_loss.output_emotion['valence'], va[:, 0]) \ # * self.deca.config.gt_emotion_reg # d[prefix + 'emo_sup_ar_L1'] = F.l1_loss(self.emonet_loss.output_emotion['arousal'], va[:, 1]) \ # * self.deca.config.gt_emotion_reg # # metric_dict[prefix + "_valence_gt"] = va[:, 0].mean().detach() # metric_dict[prefix + "_arousal_gt"] = va[:, 1].mean().detach() # # if expr7 is not None: # affectnet_gt = [expr7_to_affect_net(int(expr7[i])).value for i in range(len(expr7))] # affectnet_gt = torch.tensor(np.array(affectnet_gt), device=self.device, dtype=torch.long) # d[prefix + '_emo_sup_expr_CE'] = F.cross_entropy(self.emonet_loss.output_emotion['expression'], affectnet_gt) * self.deca.config.gt_emotion_reg # metric_dict[prefix + "_expr_gt"] = affectnet_gt.mean().detach() def _compute_au_loss(self, images, predicted_images, loss_dict, metric_dict, prefix, au=None, with_grad=True): def loss_or_metric(name, loss, is_loss): if not is_loss: metric_dict[name] = loss else: loss_dict[name] = loss # if self.deca.config.use_emonet_loss: if with_grad: d = loss_dict au_feat_loss_1, au_feat_loss_2, _, _, _, au_loss = \ self.au_loss.compute_loss(images, predicted_images) else: d = metric_dict with torch.no_grad(): au_feat_loss_1, au_feat_loss_2, _, _, _, au_loss = \ self.au_loss.compute_loss(images, predicted_images) # EmoNet self-consistency loss terms if au_feat_loss_1 is not None: loss_or_metric(prefix + '_au_feat_1_L1', au_feat_loss_1 * self.deca.config.au_loss.au_weight, self.deca.config.au_loss.use_feat_1 and self.deca.config.au_loss.use_as_loss) loss_or_metric(prefix + '_au_feat_2_L1', au_feat_loss_2 * self.deca.config.au_loss.au_weight, self.deca.config.au_loss.use_feat_2 and self.deca.config.au_loss.use_as_loss) loss_or_metric(prefix + '_au_loss', au_loss * self.deca.config.au_loss.au_weight, self.deca.config.au_loss.use_aus and self.deca.config.au_loss.use_as_loss) # loss_or_metric(prefix + '_au_losses_L1', arousal_loss * self.deca.config.au_loss.au_weight, # self.deca.config.au_loss.use_emonet_arousal and self.deca.config.au_loss.use_as_loss) # loss_or_metric(prefix + 'emonet_expression_KL', expression_loss * self.deca.config.au_loss.au_weight) # KL seems to be causing NaN's # # Log also the VA # metric_dict[prefix + "_valence_input"] = self.emonet_loss.input_emotion['valence'].mean().detach() # metric_dict[prefix + "_valence_output"] = self.emonet_loss.output_emotion['valence'].mean().detach() # metric_dict[prefix + "_arousal_input"] = self.emonet_loss.input_emotion['arousal'].mean().detach() # metric_dict[prefix + "_arousal_output"] = self.emonet_loss.output_emotion['arousal'].mean().detach() # input_ex = self.emonet_loss.input_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'].detach().cpu().numpy() # input_ex = np.argmax(input_ex, axis=1).mean() # output_ex = self.emonet_loss.output_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'].detach().cpu().numpy() # output_ex = np.argmax(output_ex, axis=1).mean() # metric_dict[prefix + "_expression_input"] = torch.tensor(input_ex, device=self.device) # metric_dict[prefix + "_expression_output"] = torch.tensor(output_ex, device=self.device) # # GT emotion loss terms # if self.deca.config.use_gt_emotion_loss: # d = loss_dict # else: # d = metric_dict def _cut_mouth_vectorized(self, images, landmarks, convert_grayscale=True): # mouth_window_margin = 12 mouth_window_margin = 1 # not temporal mouth_crop_height = 96 mouth_crop_width = 96 mouth_landmark_start_idx = 48 mouth_landmark_stop_idx = 68 B, T = images.shape[:2] landmarks = landmarks.to(torch.float32) with torch.no_grad(): image_size = images.shape[-1] / 2 landmarks = landmarks * image_size + image_size # #1) smooth the landmarks with temporal convolution # landmarks are of shape (T, 68, 2) # reshape to (T, 136) landmarks_t = landmarks.reshape(*landmarks.shape[:2], -1) # make temporal dimension last landmarks_t = landmarks_t.permute(0, 2, 1) # change chape to (N, 136, T) # landmarks_t = landmarks_t.unsqueeze(0) # smooth with temporal convolution temporal_filter = torch.ones(mouth_window_margin, device=images.device) / mouth_window_margin # pad the the landmarks landmarks_t_padded = F.pad(landmarks_t, (mouth_window_margin // 2, mouth_window_margin // 2), mode='replicate') # convolve each channel separately with the temporal filter num_channels = landmarks_t.shape[1] if temporal_filter.numel() > 1: smooth_landmarks_t = F.conv1d(landmarks_t_padded, temporal_filter.unsqueeze(0).unsqueeze(0).expand(num_channels,1,temporal_filter.numel()), groups=num_channels, padding='valid' ) smooth_landmarks_t = smooth_landmarks_t[..., 0:landmarks_t.shape[-1]] else: smooth_landmarks_t = landmarks_t # reshape back to the original shape smooth_landmarks_t = smooth_landmarks_t.permute(0, 2, 1).view(landmarks.shape) smooth_landmarks_t = smooth_landmarks_t + landmarks.mean(dim=2, keepdims=True) - smooth_landmarks_t.mean(dim=2, keepdims=True) # #2) get the mouth landmarks mouth_landmarks_t = smooth_landmarks_t[..., mouth_landmark_start_idx:mouth_landmark_stop_idx, :] # #3) get the mean of the mouth landmarks mouth_landmarks_mean_t = mouth_landmarks_t.mean(dim=-2, keepdims=True) # #4) get the center of the mouth center_x_t = mouth_landmarks_mean_t[..., 0] center_y_t = mouth_landmarks_mean_t[..., 1] # #5) use grid_sample to crop the mouth in every image # create the grid height = mouth_crop_height//2 width = mouth_crop_width//2 torch.arange(0, mouth_crop_width, device=images.device) grid = torch.stack(torch.meshgrid(torch.linspace(-height, height, mouth_crop_height).to(images.device) / (images.shape[-2] /2), torch.linspace(-width, width, mouth_crop_width).to(images.device) / (images.shape[-1] /2) ), dim=-1) grid = grid[..., [1, 0]] grid = grid.unsqueeze(0).unsqueeze(0).repeat(*images.shape[:2], 1, 1, 1) center_x_t -= images.shape[-1] / 2 center_y_t -= images.shape[-2] / 2 center_x_t /= images.shape[-1] / 2 center_y_t /= images.shape[-2] / 2 grid = grid + torch.cat([center_x_t, center_y_t ], dim=-1).unsqueeze(-2).unsqueeze(-2) images = images.view(B*T, *images.shape[2:]) grid = grid.view(B*T, *grid.shape[2:]) if convert_grayscale: images = F_v.rgb_to_grayscale(images) image_crops = F.grid_sample( images, grid, align_corners=True, padding_mode='zeros', mode='bicubic' ) image_crops = image_crops.view(B, T, *image_crops.shape[1:]) if convert_grayscale: image_crops = image_crops#.squeeze(1) # import matplotlib.pyplot as plt # plt.figure() # plt.imshow(image_crops[0, 0].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[0, 10].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[0, 20].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[1, 0].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[1, 10].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[1, 20].permute(1,2,0).cpu().numpy()) # plt.show() return image_crops def _compute_lipread_loss(self, images, predicted_images, landmarks, predicted_landmarks, loss_dict, metric_dict, prefix, with_grad=True): def loss_or_metric(name, loss, is_loss): if not is_loss: metric_dict[name] = loss else: loss_dict[name] = loss # shape of images is: (B, R, C, H, W) # convert to (B * R, 1, H, W, C) images = images.unsqueeze(1) predicted_images = predicted_images.unsqueeze(1) landmarks = landmarks.unsqueeze(1) predicted_landmarks = predicted_landmarks.unsqueeze(1) # cut out the mouth region images_mouth = self._cut_mouth_vectorized(images, landmarks) predicted_images_mouth = self._cut_mouth_vectorized(predicted_images, predicted_landmarks) # make sure that the lip reading net interprests things with depth=1, # if self.deca.config.use_emonet_loss: if with_grad: d = loss_dict loss = self.lipread_loss.compute_loss(images_mouth, predicted_images_mouth) else: d = metric_dict with torch.no_grad(): loss = self.lipread_loss.compute_loss(images_mouth, predicted_images_mouth) d[prefix + '_lipread'] = loss * self.deca.config.lipread_loss.weight def _metric_or_loss(self, loss_dict, metric_dict, is_loss): if is_loss: d = loss_dict else: d = metric_dict return d def _compute_id_loss(self, codedict, batch, training, testing, losses, batch_size, ring_size): # if self.deca.config.idw > 1e-3: if self.deca.id_loss is not None: images = codedict["images"] ops = codedict["ops"] mask_face_eye = codedict["mask_face_eye"] shading_images = self.deca.render.add_SHlight(ops['normal_images'], codedict["lightcode"].detach()) albedo_images = F.grid_sample(codedict["albedo"].detach(), ops['grid'], align_corners=False) # TODO: get to the bottom of this weird overlay thing - why is it there? # answer: This renders the face and takes background from the image overlay = albedo_images * shading_images * mask_face_eye + images * (1 - mask_face_eye) if self.global_step >= self.deca.id_loss_start_step: if 'id_metric' in self.deca.config.keys() and 'barlow_twins' in self.deca.config.id_metric: assert ring_size == 1 or ring_size == 2 effective_bs = images.shape[0] # losses['identity'] = self.deca.id_loss(overlay, images, batch_size=batch_size, # ring_size=ring_size) * self.deca.config.idw if "ref_images_identity_idxs" in codedict.keys(): # in case there was shuffling, this ensures that the proper images are used for identity loss images_ = images[codedict["ref_images_identity_idxs"]] else: images_ = images losses['identity'] = self.deca.id_loss(overlay, images_, batch_size=effective_bs, ring_size=1) * self.deca.config.idw if 'id_contrastive' in self.deca.config.keys() and bool(self.deca.config.id_contrastive): if ring_size == 2: assert effective_bs % 2 == 0 assert self.deca.id_loss.trainable has_been_shuffled = 'new_order' in codedict.keys() idxs_a = torch.arange(0, images.shape[0], 2) # indices of first images within the ring idxs_b = torch.arange(1, images.shape[0], 2) # indices of second images within the ring # WARNING - this assumes the ring is identity-based if self.deca.config.id_contrastive in [True, "real", "both"]: # we are taking this from the original batch dict because we do not care about the # shuffled, duplicated samples (they don't have to be doubled) images_0 = batch["image"][:, 0, ...] images_1 = batch["image"][:, 1, ...] losses['identity_contrastive_real'] = self.deca.id_loss( images_0, # first images within the ring images_1, # second images within the ring batch_size=images_0.shape[0], ring_size=1) * self.deca.config.idw * 2 if self.deca.config.id_contrastive in [True, "synth", "both"]: if self.deca.config.shape_constrain_type in ['exchange', 'same']: # we can take all when identity has been exchange within rings overlay_0 = overlay[idxs_a] overlay_1 = overlay[idxs_b] else: #if the batch was double otherwise (global shuffling) we only take the first half # if batch_size * ring_size < effective_bs: overlay_0 = overlay[0:batch_size * ring_size:2] overlay_1 = overlay[1:batch_size * ring_size:2] losses['identity_contrastive_synthetic'] = self.deca.id_loss( overlay_0, # first images within the ring overlay_1, # second images within the ring batch_size=overlay_0.shape[0], ring_size=1) * self.deca.config.idw if has_been_shuffled: new_order = codedict['new_order'] # TODO: compare the idxs to these: # codedict["ref_images_identity_idxs"] if self.deca.config.shape_constrain_type == 'shuffle_expression': idxs_a_synth = np.arange(new_order.shape[0]) # first half of the batch idxs_b_synth = np.arange(new_order.shape[0], 2 * new_order.shape[0]) # second half of the batch elif self.deca.config.shape_constrain_type == 'shuffle_shape': idxs_a_synth = new_order # shuffled first half of the batch idxs_b_synth = np.arange(new_order.shape[0], 2 * new_order.shape[0]) # second half of the batch else: raise NotImplementedError("Unexpected shape consistency value ") # if this doesn't go through, something went wrong with the shuffling indexations assert codedict["shapecode"][idxs_a_synth].allclose(codedict["shapecode"][idxs_b_synth]) losses['identity_contrastive_synthetic_shuffled'] = self.deca.id_loss( overlay[idxs_a_synth], # synthetic images of identities with reconstructed expressions overlay[idxs_b_synth], # synthetic images of identities with shuffled expressions batch_size=idxs_a_synth.size, ring_size=1) * self.deca.config.idw losses['identity_contrastive_synthetic2real_shuffled'] = self.deca.id_loss( images[idxs_a_synth], # synthetic images of identities with reconstructed expressions overlay[idxs_b_synth], # synthetic images of identities with shuffled expressions batch_size=idxs_a_synth.size, ring_size=1) * self.deca.config.idw elif ring_size > 2: raise NotImplementedError("Contrastive loss does not support ring sizes > 2.") return losses def _compute_emonet_loss_wrapper(self, codedict, batch, training, testing, losses, metrics, prefix, image_key, with_grad, batch_size, ring_size): if self.emonet_loss is not None: if 'va' in codedict: va = codedict['va'] va = va.view(-1, va.shape[-1]) else: va = None if 'expr7' in codedict: expr7 = codedict['expr7'] expr7 = expr7.view(-1, expr7.shape[-1]) else: expr7 = None # with torch.no_grad(): # TODO: if expression shuffled, this needs to be changed, the input images no longer correspond images = codedict["images"] predicted_images = codedict[image_key] effective_bs = images.shape[0] if "ref_images_expression_idxs" in codedict.keys(): # in case there was shuffling, this ensures that the proper images are used for emotion loss images_ = images[codedict["ref_images_expression_idxs"]] else: images_ = images effective_bs = images.shape[0] self._compute_emotion_loss(images_, predicted_images, losses, metrics, f"{prefix}", va, expr7, with_grad=with_grad, batch_size=effective_bs, ring_size=1) codedict[f"{prefix}_valence_input"] = self.emonet_loss.input_emotion['valence'] codedict[f"{prefix}_arousal_input"] = self.emonet_loss.input_emotion['arousal'] codedict[f"{prefix}_expression_input"] = self.emonet_loss.input_emotion[ 'expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] codedict[f"{prefix}_valence_output"] = self.emonet_loss.output_emotion['valence'] codedict[f"{prefix}_arousal_output"] = self.emonet_loss.output_emotion['arousal'] codedict[f"{prefix}_expression_output"] = self.emonet_loss.output_emotion[ 'expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] if 'emo_contrastive' in self.deca.config.keys() and self.deca.config.emo_contrastive: assert ring_size == 2 or ring_size == 1 assert self.emonet_loss.trainable or ( hasattr(self.emonet_loss, 'clone_is_trainable') and self.emonet_lossclone_is_trainable) has_been_shuffled = 'new_order' in codedict.keys() # if self.deca.config.shape_constrain_type == 'shuffle_expression' and has_been_shuffled: # new_order = codedict['new_order'] # if self.deca.config.emo_contrastive in [True, "real", "both"]: if ring_size == 2: assert effective_bs % 2 == 0 if not isinstance(self.deca, ExpDECA): raise NotImplementedError("Cross-ring emotion contrast means the ring has to be " "expression based, not identity based. This is not guaranteed " "for vanilla EMOCA (or its datasets).") # we are taking this from the original batch dict because we do not care about the # shuffled, duplicated samples (they don't have to be doubled) images_0 = batch["image"][:, 0, ...] images_1 = batch["image"][:, 1, ...] self._compute_emotion_loss(images_0, # real images of first expressions in the ring images_1, # real images of second expressions in the ring losses, metrics, f"{prefix}_contrastive_real", va, expr7, with_grad=self.deca.config.use_emonet_loss, batch_size=images_0.shape[0], ring_size=1) else: print("[WARNING] Cannot compute real contrastive emotion loss because there is no ring!") if self.deca.config.emo_contrastive in [True, "synth", "both"]: if ring_size == 2: assert effective_bs % 2 == 0 idxs_a = torch.arange(0, images.shape[0], 2) # indices of first expressions within a ring idxs_b = torch.arange(1, images.shape[0], 2) # indices of second expressions within a ring if 'expression_constrain_type' in self.deca.config.keys() and \ self.deca.config.expression_constrain_type in ['exchange', 'same']: # we can take all when identity has been exchange within rings predicted_images_0 = predicted_images[idxs_a] predicted_images_1 = predicted_images[idxs_b] raise RuntimeError("This should work but it was never tested or intended. Make sure this works.") else: # if the batch was double otherwise (global shuffling) we only take the first half # if batch_size * ring_size < effective_bs: predicted_images_0 = predicted_images[0:batch_size * ring_size:2] predicted_images_1 = predicted_images[1:batch_size * ring_size:2] if not isinstance(self.deca, ExpDECA): raise NotImplementedError("Cross-ring emotion contrast means the ring has to be " "expression based, not identity based. This is not guaranteed " "for vanilla EMOCA.") self._compute_emotion_loss(predicted_images_0, # rec images of first expressions in the ring predicted_images_1, # rec images of second expressions in the ring losses, metrics, f"{prefix}_contrastive_synth", va, expr7, with_grad=self.deca.config.use_emonet_loss, batch_size=predicted_images_1.shape[0], ring_size=1) else: print("[WARNING] Cannot compute synthetic contrastive emotion loss because there is no ring!") if has_been_shuffled: new_order = codedict['new_order'] if self.deca.config.shape_constrain_type == 'shuffle_expression': # this gets tricky, in this case the images are not duplicates -> we need all, but the second # half's order is shuffled, so we need to be careful here idxs_a_synth = new_order # shuffled first half of the batch idxs_b_synth = np.arange(new_order.shape[0], 2 * new_order.shape[0]) # second half of the batch elif self.deca.config.shape_constrain_type == 'shuffle_shape': idxs_a_synth = np.arange(new_order.shape[0]) # first half of the batch idxs_b_synth = np.arange(new_order.shape[0], 2 * new_order.shape[0]) # second half of the batch # if this doesn't go through, something went wrong with the shuffling indexations assert codedict["expcode"][idxs_a_synth].allclose(codedict["expcode"][idxs_b_synth]) # the expressions at corresponding index positions of idxs_a_synth and idxs_b_synth should match now self._compute_emotion_loss(predicted_images[idxs_a_synth], # synthetic images of reconstructed expressions and corresponding identities predicted_images[idxs_b_synth], # synthetic images of reconstructed expressions and shuffled identities losses, metrics, f"{prefix}_contrastive_synth_shuffled", va, expr7, with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), batch_size=idxs_a_synth.size, ring_size=1) self._compute_emotion_loss(images[idxs_a_synth], # synthetic images of reconstructed expressions and corresponding identities predicted_images[idxs_b_synth], # synthetic images of reconstructed expressions and shuffled identities losses, metrics, f"{prefix}_contrastive_synth2real_shuffled", va, expr7, with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), batch_size=idxs_a_synth.size, ring_size=1) if va is not None: codedict[f"{prefix}_valence_gt"] = va[:, 0] codedict[f"{prefix}_arousal_gt"] = va[:, 1] if expr7 is not None: codedict[f"{prefix}_expression_gt"] = expr7 if self.deca._has_neural_rendering(): assert 'emo_contrastive' not in self.deca.config.keys() or self.deca.config.emo_contrastive is False # TODO possible to make this more GPU efficient by not recomputing emotion for input image self._compute_emotion_loss(images, predicted_translated_image, losses, metrics, f"{prefix}_translated", va, expr7, with_grad=self.deca.config.use_emonet_loss and self.deca._has_neural_rendering(), batch_size=bs, ring_size=1) # codedict[f"{prefix}_valence_input"] = self.emonet_loss.input_emotion['valence'] # codedict[f"{prefix}_arousal_input"] = self.emonet_loss.input_emotion['arousal'] # codedict[f"{prefix}_expression_input"] = self.emonet_loss.input_emotion['expression'] codedict[f"{prefix}_translated_valence_output"] = self.emonet_loss.output_emotion['valence'] codedict[f"{prefix}_translated_arousal_output"] = self.emonet_loss.output_emotion['arousal'] codedict[f"{prefix}_translated_expression_output"] = self.emonet_loss.output_emotion[ 'expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] return losses, metrics, codedict def _compute_loss(self, codedict, batch, training=True, testing=False): #### ----------------------- Losses losses = {} metrics = {} predicted_landmarks = codedict["predicted_landmarks"] predicted_landmarks_mediapipe = codedict.get("predicted_landmarks_mediapipe", None) if "lmk" in codedict.keys(): lmk = codedict["lmk"] else: lmk = None if "lmk_mp" in codedict.keys(): lmk_mp = codedict["lmk_mp"] else: lmk_mp = None if "masks" in codedict.keys(): masks = codedict["masks"] else: masks = None batch_size = codedict["predicted_images"].shape[0] use_geom_losses = 'use_geometric_losses_expression_exchange' in self.deca.config.keys() and \ self.deca.config.use_geometric_losses_expression_exchange if training and ('expression_constrain_type' in self.deca.config.keys() \ and ('expression_constrain_type' in self.deca.config.keys() and self.deca.config.expression_constrain_type == 'exchange') or ( 'shape_constrain_type' in self.deca.config.keys() and self.deca.config.shape_constrain_type in ['shuffle_expression', 'shuffle_shape'])) \ and (self.deca.mode == DecaMode.COARSE or self.deca.config.train_coarse) \ and (not use_geom_losses): if batch_size % 2 != 0: raise RuntimeError("The batch size should be even because it should have " f"got doubled in expression ring exchange. Instead it was odd: {batch_size}") # THIS IS DONE BECAUSE LANDMARK AND PHOTOMETRIC LOSSES MAKE NO SENSE FOR EXPRESSION EXCHANGE geom_losses_idxs = batch_size // 2 else: geom_losses_idxs = batch_size predicted_images = codedict["predicted_images"] images = codedict["images"] lightcode = codedict["lightcode"] albedo = codedict["albedo"] mask_face_eye = codedict["mask_face_eye"] shapecode = codedict["shapecode"] expcode = codedict["expcode"] texcode = codedict["texcode"] ops = codedict["ops"] if self.mode == DecaMode.DETAIL: uv_texture = codedict["uv_texture"] uv_texture_gt = codedict["uv_texture_gt"] # this determines the configured batch size that is currently used (training, validation or testing) # the reason why this is important is because of potential multi-gpu training and loss functions (such as Barlow Twins) # that might need the full size of the batch (not just the chunk of the current GPU). if training: bs = self.learning_params.batch_size_train rs = self.learning_params.train_K else: if not testing: bs = self.learning_params.batch_size_val rs = self.learning_params.val_K else: bs = self.learning_params.batch_size_test rs = self.learning_params.test_K ## COARSE loss only if self.mode == DecaMode.COARSE or (self.mode == DecaMode.DETAIL and self.deca.config.train_coarse): # landmark losses (only useful if coarse model is being trained # if training or lmk is not None: if lmk is not None: # if self.deca.config.use_landmarks: # d = losses # else: # d = metrics d = self._metric_or_loss(losses, metrics, self.deca.config.use_landmarks) if self.deca.config.useWlmk: d['landmark'] = \ lossfunc.weighted_landmark_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.lmk_weight else: d['landmark'] = \ lossfunc.landmark_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.lmk_weight d = self._metric_or_loss(losses, metrics, 'use_eye_distance' not in self.deca.config.keys() or self.deca.config.use_eye_distance) # losses['eye_distance'] = lossfunc.eyed_loss(predicted_landmarks, lmk) * self.deca.config.lmk_weight * 2 d['eye_distance'] = lossfunc.eyed_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.eyed d = self._metric_or_loss(losses, metrics, 'use_lip_distance' not in self.deca.config.keys() or self.deca.config.use_lip_distance) d['lip_distance'] = lossfunc.lipd_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.lipd d = self._metric_or_loss(losses, metrics, 'use_mouth_corner_distance' in self.deca.config.keys() and self.deca.config.use_mouth_corner_distance) d['mouth_corner_distance'] = lossfunc.mouth_corner_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.lipd if predicted_landmarks_mediapipe is not None and lmk_mp is not None: use_mediapipe_landmarks = self.deca.config.get('use_mediapipe_landmarks', False) d = self._metric_or_loss(losses, metrics, use_mediapipe_landmarks) d['landmark_mediapipe'] =lossfunc_mp.landmark_loss(predicted_landmarks_mediapipe[:geom_losses_idxs, ...], lmk_mp[:geom_losses_idxs, ...]) * self.deca.config.lmk_weight_mp d = self._metric_or_loss(losses, metrics, self.deca.config.get('use_eye_distance_mediapipe', False) ) d['eye_distance_mediapipe'] = lossfunc_mp.eyed_loss(predicted_landmarks_mediapipe[:geom_losses_idxs, ...], lmk_mp[:geom_losses_idxs, ...]) * self.deca.config.eyed_mp d = self._metric_or_loss(losses, metrics, self.deca.config.get('use_lip_distance_mediapipe', False) ) d['lip_distance_mediapipe'] = lossfunc_mp.lipd_loss(predicted_landmarks_mediapipe[:geom_losses_idxs, ...], lmk_mp[:geom_losses_idxs, ...]) * self.deca.config.lipd_mp d = self._metric_or_loss(losses, metrics, self.deca.config.get('use_mouth_corner_distance_mediapipe', False)) d['mouth_corner_distance_mediapipe'] = lossfunc_mp.mouth_corner_loss(predicted_landmarks_mediapipe[:geom_losses_idxs, ...], lmk_mp[:geom_losses_idxs, ...]) * self.deca.config.lipd_mp #TODO: fix this on the next iteration lipd_loss # d['lip_distance'] = lossfunc.lipd_loss(predicted_landmarks, lmk) * self.deca.config.lipd # photometric loss # if training or masks is not None: if masks is not None: # if self.deca.config.use_photometric: # d = losses # else: # d = metrics # d['photometric_texture'] = (masks * (predicted_images - images).abs()).mean() * self.deca.config.photow photometric = masks[:geom_losses_idxs, ...] * ((predicted_images[:geom_losses_idxs, ...] - images[:geom_losses_idxs, ...]).abs()) if 'photometric_normalization' not in self.deca.config.keys() or self.deca.config.photometric_normalization == 'mean': photometric = photometric.mean() elif self.deca.config.photometric_normalization == 'rel_mask_value': photometric = photometric * masks[:geom_losses_idxs, ...].mean(dim=tuple(range(1,masks.ndim)), keepdim=True) photometric = photometric.mean() elif self.deca.config.photometric_normalization == 'neg_rel_mask_value': mu = 1. - masks[:geom_losses_idxs, ...].mean(dim=tuple(range(1,masks.ndim)), keepdim=True) photometric = photometric * mu photometric = photometric.mean() elif self.deca.config.photometric_normalization == 'inv_rel_mask_value': mu = 1./ masks[:geom_losses_idxs, ...].mean(dim=tuple(range(1,masks.ndim)), keepdim=True) photometric = photometric * mu photometric = photometric.mean() elif self.deca.config.photometric_normalization == 'abs_mask_value': photometric = photometric * masks[:geom_losses_idxs, ...].sum(dim=tuple(range(1,masks.ndim)), keepdim=True) photometric = photometric.mean() else: raise ValueError(f"Invalid photometric loss normalization: '{self.deca.config.photometric_normalization}'") self._metric_or_loss(losses, metrics, self.deca.config.use_photometric)['photometric_texture'] = \ photometric * self.deca.config.photow if self.deca.vgg_loss is not None: vggl, _ = self.deca.vgg_loss( masks[:geom_losses_idxs, ...] * images[:geom_losses_idxs, ...], # masked input image masks[:geom_losses_idxs, ...] * predicted_images[:geom_losses_idxs, ...], # masked output image ) self._metric_or_loss(losses, metrics, self.deca.config.use_vgg)['vgg'] = vggl * self.deca.config.vggw if self.deca._has_neural_rendering(): predicted_translated_image = codedict["predicted_translated_image"] photometric_translated = (masks[:geom_losses_idxs, ...] * ( predicted_translated_image[:geom_losses_idxs, ...] - images[:geom_losses_idxs, ...]).abs()).mean() * self.deca.config.photow if self.deca.config.use_photometric: losses['photometric_translated_texture'] = photometric_translated else: metrics['photometric_translated_texture'] = photometric_translated if self.deca.vgg_loss is not None: vggl, _ = self.deca.vgg_loss( masks[:geom_losses_idxs, ...] * images[:geom_losses_idxs, ...], # masked input image masks[:geom_losses_idxs, ...] * predicted_translated_image[:geom_losses_idxs, ...], # masked output image ) self._metric_or_loss(losses, metrics, self.deca.config.use_vgg)['vgg_translated'] = vggl * self.deca.config.vggw else: raise ValueError("Is this line ever reached?") losses = self._compute_id_loss(codedict, batch, training, testing, losses, batch_size=bs, ring_size=rs) losses['shape_reg'] = (torch.sum(shapecode ** 2) / 2) * self.deca.config.shape_reg losses['expression_reg'] = (torch.sum(expcode ** 2) / 2) * self.deca.config.exp_reg losses['tex_reg'] = (torch.sum(texcode ** 2) / 2) * self.deca.config.tex_reg losses['light_reg'] = ((torch.mean(lightcode, dim=2)[:, :, None] - lightcode) ** 2).mean() * self.deca.config.light_reg if 'original_code' in codedict.keys(): # original jaw pose regularization if self.deca.config.get('exp_deca_jaw_pose', False) and \ 'deca_jaw_reg' in self.deca.config.keys() and self.deca.config.deca_jaw_reg > 0: jaw_pose_orig = codedict['original_code']['pose'][:, 3:] jaw_pose = codedict['posecode'][..., 3:] deca_jaw_pose_reg = (torch.sum((jaw_pose - jaw_pose_orig) ** 2) / 2) * self.deca.config.deca_jaw_reg losses['deca_jaw_pose_reg'] = deca_jaw_pose_reg if self.deca.config.get('exp_deca_global_pose', False) and \ 'deca_global_reg' in self.deca.config.keys() and self.deca.config.deca_global_reg > 0: global_pose_orig = codedict['original_code']['pose'][:, :3] global_pose = codedict['posecode'][..., :3] global_pose_reg = (torch.sum((global_pose - global_pose_orig) ** 2) / 2) * self.deca.config.deca_global_reg losses['deca_global_pose_reg'] = global_pose_reg # original expression regularization if 'deca_expression_reg' in self.deca.config.keys() and self.deca.config.deca_expression_reg > 0: expression_orig = codedict['original_code']['exp'] expression = codedict['expcode'] deca_expression_reg = (torch.sum((expression - expression_orig) ** 2) / 2) * self.deca.config.deca_expression_reg losses['deca_expression_reg'] = deca_expression_reg losses, metrics, codedict = self._compute_emonet_loss_wrapper(codedict, batch, training, testing, losses, metrics, prefix="coarse", image_key="predicted_images", with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), batch_size=bs, ring_size=rs) if self.deca._has_neural_rendering(): losses, metrics, codedict = self._compute_emonet_loss_wrapper(codedict, batch, training, testing, losses, metrics, prefix="coarse_translated", image_key="predicted_translated_image", with_grad=self.deca.config.use_emonet_loss and self.deca._has_neural_rendering(), batch_size=bs, ring_size=rs ) if self.au_loss is not None: # with torch.no_grad(): self._compute_au_loss(images, predicted_images, losses, metrics, "coarse", au=None, with_grad=self.deca.config.au_loss.use_as_loss and not self.deca._has_neural_rendering()) if self.deca._has_neural_rendering(): self._compute_au_loss(images, predicted_translated_image, losses, metrics, "coarse", au=None, with_grad=self.deca.config.au_loss.use_as_loss and self.deca._has_neural_rendering()) if self.lipread_loss is not None: # with torch.no_grad(): self._compute_lipread_loss(images, predicted_images, lmk, predicted_landmarks, losses, metrics, "coarse", with_grad=self.deca.config.lipread_loss.use_as_loss and not self.deca._has_neural_rendering()) if self.deca._has_neural_rendering(): self._compute_lipread_loss(images, predicted_translated_image, lmk, predicted_landmarks, losses, metrics, "coarse", with_grad=self.deca.config.lipread_loss.use_as_loss and self.deca._has_neural_rendering()) ## DETAIL loss only if self.mode == DecaMode.DETAIL: predicted_detailed_image = codedict["predicted_detailed_image"] uv_z = codedict["uv_z"] # UV displacement map uv_shading = codedict["uv_shading"] uv_vis_mask = codedict["uv_vis_mask"] # uv_mask of what is visible photometric_detailed = (masks[:geom_losses_idxs, ...] * ( predicted_detailed_image[:geom_losses_idxs, ...] - images[:geom_losses_idxs, ...]).abs()).mean() * self.deca.config.photow if self.deca.config.use_detailed_photo: losses['photometric_detailed_texture'] = photometric_detailed else: metrics['photometric_detailed_texture'] = photometric_detailed if self.deca.vgg_loss is not None: vggl, _ = self.deca.vgg_loss( masks[:geom_losses_idxs, ...] * images[:geom_losses_idxs, ...], # masked input image masks[:geom_losses_idxs, ...] * predicted_detailed_image[:geom_losses_idxs, ...], # masked output image ) self._metric_or_loss(losses, metrics, self.deca.config.use_vgg)['vgg_detailed'] = vggl * self.deca.config.vggw if self.deca._has_neural_rendering(): predicted_detailed_translated_image = codedict["predicted_detailed_translated_image"] photometric_detailed_translated = (masks[:geom_losses_idxs, ...] * ( predicted_detailed_translated_image[:geom_losses_idxs, ...] - images[:geom_losses_idxs, ...]).abs()).mean() * self.deca.config.photow if self.deca.config.use_detailed_photo: losses['photometric_translated_detailed_texture'] = photometric_detailed_translated else: metrics['photometric_translated_detailed_texture'] = photometric_detailed_translated if self.deca.vgg_loss is not None: vggl, _ = self.deca.vgg_loss( masks[:geom_losses_idxs, ...] * images[:geom_losses_idxs, ...], # masked input image masks[:geom_losses_idxs, ...] * predicted_detailed_translated_image[:geom_losses_idxs, ...], # masked output image ) self._metric_or_loss(losses, metrics, self.deca.config.use_vgg)[ 'vgg_detailed_translated'] = vggl * self.deca.config.vggw losses, metrics, codedict = self._compute_emonet_loss_wrapper(codedict, batch, training, testing, losses, metrics, prefix="detail", image_key = "predicted_detailed_image", with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), batch_size=bs, ring_size=rs) if self.deca._has_neural_rendering(): losses, metrics, codedict = self._compute_emonet_loss_wrapper(codedict, batch, training, testing, losses, metrics, prefix="detail_translated", image_key="predicted_detailed_translated_image", with_grad=self.deca.config.use_emonet_loss and self.deca._has_neural_rendering(), batch_size=bs, ring_size=rs) # if self.emonet_loss is not None: # self._compute_emotion_loss(images, predicted_detailed_image, losses, metrics, "detail", # with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), # batch_size=bs, ring_size=rs) # codedict["detail_valence_input"] = self.emonet_loss.input_emotion['valence'] # codedict["detail_arousal_input"] = self.emonet_loss.input_emotion['arousal'] # codedict["detail_expression_input"] = self.emonet_loss.input_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] # codedict["detail_valence_output"] = self.emonet_loss.output_emotion['valence'] # codedict["detail_arousal_output"] = self.emonet_loss.output_emotion['arousal'] # codedict["detail_expression_output"] = self.emonet_loss.output_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] # # if va is not None: # codedict["detail_valence_gt"] = va[:,0] # codedict["detail_arousal_gt"] = va[:,1] # if expr7 is not None: # codedict["detail_expression_gt"] = expr7 # if self.deca._has_neural_rendering(): # #TODO possible to make this more GPU efficient by not recomputing emotion for input image # self._compute_emotion_loss(images, predicted_detailed_translated_image, # losses, metrics, "detail_translated", # va, expr7, # with_grad= self.deca.config.use_emonet_loss and self.deca._has_neural_rendering(), # batch_size=bs, ring_size=rs) # # # codedict["coarse_valence_input"] = self.emonet_loss.input_emotion['valence'] # # codedict["coarse_arousal_input"] = self.emonet_loss.input_emotion['arousal'] # # codedict["coarse_expression_input"] = self.emonet_loss.input_emotion['expression'] # codedict["detail_translated_valence_output"] = self.emonet_loss.output_emotion['valence'] # codedict["detail_translated_arousal_output"] = self.emonet_loss.output_emotion['arousal'] # codedict["detail_translated_expression_output"] = self.emonet_loss.output_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] if self.au_loss is not None: self._compute_au_loss(images, predicted_images, losses, metrics, "detail", au=None, with_grad=self.deca.config.au_loss.use_as_loss and not self.deca._has_neural_rendering()) if self.deca._has_neural_rendering(): self._compute_au_loss(images, predicted_detailed_translated_image, losses, metrics, "detail", au=None, with_grad=self.deca.config.au_loss.use_as_loss and self.deca._has_neural_rendering()) for pi in range(3): # self.deca.face_attr_mask.shape[0]): if self.deca.config.sfsw[pi] != 0: # if pi==0: new_size = 256 # else: # new_size = 128 # if self.deca.config.uv_size != 256: # new_size = 128 uv_texture_patch = F.interpolate( uv_texture[:geom_losses_idxs, :, self.deca.face_attr_mask[pi][2]:self.deca.face_attr_mask[pi][3], self.deca.face_attr_mask[pi][0]:self.deca.face_attr_mask[pi][1]], [new_size, new_size], mode='bilinear') uv_texture_gt_patch = F.interpolate( uv_texture_gt[:geom_losses_idxs, :, self.deca.face_attr_mask[pi][2]:self.deca.face_attr_mask[pi][3], self.deca.face_attr_mask[pi][0]:self.deca.face_attr_mask[pi][1]], [new_size, new_size], mode='bilinear') uv_vis_mask_patch = F.interpolate( uv_vis_mask[:geom_losses_idxs, :, self.deca.face_attr_mask[pi][2]:self.deca.face_attr_mask[pi][3], self.deca.face_attr_mask[pi][0]:self.deca.face_attr_mask[pi][1]], [new_size, new_size], mode='bilinear') detail_l1 = (uv_texture_patch * uv_vis_mask_patch - uv_texture_gt_patch * uv_vis_mask_patch).abs().mean() * \ self.deca.config.sfsw[pi] if self.deca.config.use_detail_l1 and not self.deca._has_neural_rendering(): losses['detail_l1_{}'.format(pi)] = detail_l1 else: metrics['detail_l1_{}'.format(pi)] = detail_l1 if self.deca.config.use_detail_mrf and not self.deca._has_neural_rendering(): mrf = self.deca.perceptual_loss(uv_texture_patch * uv_vis_mask_patch, uv_texture_gt_patch * uv_vis_mask_patch) * \ self.deca.config.sfsw[pi] * self.deca.config.mrfwr losses['detail_mrf_{}'.format(pi)] = mrf else: with torch.no_grad(): mrf = self.deca.perceptual_loss(uv_texture_patch * uv_vis_mask_patch, uv_texture_gt_patch * uv_vis_mask_patch) * \ self.deca.config.sfsw[pi] * self.deca.config.mrfwr metrics['detail_mrf_{}'.format(pi)] = mrf if self.deca._has_neural_rendering(): # raise NotImplementedError("Gotta implement the texture extraction first.") translated_uv_texture = codedict["translated_uv_texture"] translated_uv_texture_patch = F.interpolate( translated_uv_texture[:geom_losses_idxs, :, self.deca.face_attr_mask[pi][2]:self.deca.face_attr_mask[pi][3], self.deca.face_attr_mask[pi][0]:self.deca.face_attr_mask[pi][1]], [new_size, new_size], mode='bilinear') translated_detail_l1 = (translated_uv_texture_patch * uv_vis_mask_patch - uv_texture_gt_patch * uv_vis_mask_patch).abs().mean() * \ self.deca.config.sfsw[pi] if self.deca.config.use_detail_l1: losses['detail_translated_l1_{}'.format(pi)] = translated_detail_l1 else: metrics['detail_translated_l1_{}'.format(pi)] = translated_detail_l1 if self.deca.config.use_detail_mrf: translated_mrf = self.deca.perceptual_loss(translated_uv_texture_patch * uv_vis_mask_patch, uv_texture_gt_patch * uv_vis_mask_patch) * \ self.deca.config.sfsw[pi] * self.deca.config.mrfwr losses['detail_translated_mrf_{}'.format(pi)] = translated_mrf else: with torch.no_grad(): mrf = self.deca.perceptual_loss(translated_uv_texture_patch * uv_vis_mask_patch, uv_texture_gt_patch * uv_vis_mask_patch) * \ self.deca.config.sfsw[pi] * self.deca.config.mrfwr metrics['detail_translated_mrf_{}'.format(pi)] = mrf # Old piece of debug code. Good to delete. # if pi == 2: # uv_texture_gt_patch_ = uv_texture_gt_patch # uv_texture_patch_ = uv_texture_patch # uv_vis_mask_patch_ = uv_vis_mask_patch losses['z_reg'] = torch.mean(uv_z.abs()) * self.deca.config.zregw losses['z_diff'] = lossfunc.shading_smooth_loss(uv_shading) * self.deca.config.zdiffw nonvis_mask = (1 - util.binary_erosion(uv_vis_mask)) losses['z_sym'] = (nonvis_mask * (uv_z - torch.flip(uv_z, [-1]).detach()).abs()).sum() * self.deca.config.zsymw if self.emotion_mlp is not None:# and not testing: mlp_losses, mlp_metrics = self.emotion_mlp.compute_loss( codedict, batch, training=training, pred_prefix="emo_mlp_") for key in mlp_losses.keys(): if key in losses.keys(): raise RuntimeError(f"Duplicate loss label {key}") losses[key] = self.deca.config.mlp_emotion_predictor_weight * mlp_losses[key] for key in mlp_metrics.keys(): if key in metrics.keys(): raise RuntimeError(f"Duplicate metric label {key}") # let's report the metrics (which are a superset of losses when it comes to EmoMLP) without the weight, # it's hard to plot the metrics otherwise metrics[key] = mlp_metrics[key] # metrics[key] = self.deca.config.mlp_emotion_predictor_weight * mlp_metrics[key] # else: # uv_texture_gt_patch_ = None # uv_texture_patch_ = None # uv_vis_mask_patch_ = None return losses, metrics def compute_loss(self, values, batch, training=True, testing=False) -> dict: """ The function used to compute the loss on a training batch. : training should be set to true when calling from training_step only """ losses, metrics = self._compute_loss(values, batch, training=training, testing=testing) all_loss = 0. losses_key = losses.keys() for key in losses_key: all_loss = all_loss + losses[key] # losses['all_loss'] = all_loss losses = {'loss_' + key: value for key, value in losses.items()} # add prefix loss for better logging losses['loss'] = all_loss # add metrics that do not effect the loss function (if any) for key in metrics.keys(): losses['metric_' + key] = metrics[key] return losses def _val_to_be_logged(self, d): if not hasattr(self, 'val_dict_list'): self.val_dict_list = [] self.val_dict_list += [d] def _train_to_be_logged(self, d): if not hasattr(self, 'train_dict_list'): self.train_dict_list = [] self.train_dict_list += [d] def validation_step(self, batch, batch_idx, dataloader_idx=None): """ Training step override of pytorch lightning module. It makes the encoding, decoding passes, computes the loss and logs the losses/visualizations. :param batch: Batch of images to encode. batch['image'] [batch_size, ring_size, 3, image_size, image_size]. For a training forward pass, additional corresponding data are necessery such as 'landmarks' and 'masks'. :batch_idx batch index """ with torch.no_grad(): training = False values = self.encode(batch, training=training) values = self.decode(values, training=training) losses_and_metrics = self.compute_loss(values, batch, training=training) #### self.log_dict(losses_and_metrics, on_step=False, on_epoch=True) # prefix = str(self.mode.name).lower() prefix = self._get_logging_prefix() # if dataloader_idx is not None: # dataloader_str = str(dataloader_idx) + "_" # else: dataloader_str = '' stage_str = dataloader_str + 'val_' # losses_and_metrics_to_log = {prefix + dataloader_str +'_val_' + key: value.detach().cpu() for key, value in losses_and_metrics.items()} # losses_and_metrics_to_log = {prefix + '_' + stage_str + key: value.detach() for key, value in losses_and_metrics.items()} losses_and_metrics_to_log = {prefix + '_' + stage_str + key: value.detach().cpu().item() for key, value in losses_and_metrics.items()} losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = self.current_epoch # losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) # log val_loss also without any prefix for a model checkpoint to track it losses_and_metrics_to_log[stage_str + 'loss'] = losses_and_metrics_to_log[prefix + '_' + stage_str + 'loss'] losses_and_metrics_to_log[prefix + '_' + stage_str + 'step'] = self.global_step losses_and_metrics_to_log[prefix + '_' + stage_str + 'batch_idx'] = batch_idx losses_and_metrics_to_log[stage_str + 'step'] = self.global_step losses_and_metrics_to_log[stage_str + 'batch_idx'] = batch_idx losses_and_metrics_to_log[prefix + '_' + stage_str + 'mem_usage'] = self.process.memory_info().rss losses_and_metrics_to_log[stage_str + 'mem_usage'] = self.process.memory_info().rss # self._val_to_be_logged(losses_and_metrics_to_log) if self.logger is not None: self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch # recommended if self.trainer.is_global_zero: if self.deca.config.val_vis_frequency > 0: if batch_idx % self.deca.config.val_vis_frequency == 0: uv_detail_normals = None if 'uv_detail_normals' in values.keys(): uv_detail_normals = values['uv_detail_normals'] visualizations, grid_image = self._visualization_checkpoint(values['verts'], values['trans_verts'], values['ops'], uv_detail_normals, values, batch_idx, stage_str[:-1], prefix) vis_dict = self._create_visualizations_to_log(stage_str[:-1], visualizations, values, batch_idx, indices=0, dataloader_idx=dataloader_idx) # image = Image(grid_image, caption="full visualization") # vis_dict[prefix + '_val_' + "visualization"] = image if isinstance(self.logger, WandbLogger): self.logger.log_metrics(vis_dict) return None def _get_logging_prefix(self): prefix = self.stage_name + str(self.mode.name).lower() return prefix def test_step(self, batch, batch_idx, dataloader_idx=None): """ Testing step override of pytorch lightning module. It makes the encoding, decoding passes, computes the loss and logs the losses/visualizations without gradient :param batch: Batch of images to encode. batch['image'] [batch_size, ring_size, 3, image_size, image_size]. For a training forward pass, additional corresponding data are necessery such as 'landmarks' and 'masks'. :batch_idx batch index """ prefix = self._get_logging_prefix() losses_and_metrics_to_log = {} # if dataloader_idx is not None: # dataloader_str = str(dataloader_idx) + "_" # else: dataloader_str = '' stage_str = dataloader_str + 'test_' with torch.no_grad(): training = False testing = True values = self.encode(batch, training=training) values = self.decode(values, training=training) if 'mask' in batch.keys(): losses_and_metrics = self.compute_loss(values, batch, training=False, testing=testing) # losses_and_metrics_to_log = {prefix + '_' + stage_str + key: value.detach().cpu() for key, value in losses_and_metrics.items()} losses_and_metrics_to_log = {prefix + '_' + stage_str + key: value.detach().cpu().item() for key, value in losses_and_metrics.items()} else: losses_and_metric = None # losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = self.current_epoch # losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) # losses_and_metrics_to_log[prefix + '_' + stage_str + 'step'] = torch.tensor(self.global_step, device=self.device) # losses_and_metrics_to_log[prefix + '_' + stage_str + 'batch_idx'] = torch.tensor(batch_idx, device=self.device) # losses_and_metrics_to_log[stage_str + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) # losses_and_metrics_to_log[stage_str + 'step'] = torch.tensor(self.global_step, device=self.device) # losses_and_metrics_to_log[stage_str + 'batch_idx'] = torch.tensor(batch_idx, device=self.device) losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = self.current_epoch losses_and_metrics_to_log[prefix + '_' + stage_str + 'step'] = self.global_step losses_and_metrics_to_log[prefix + '_' + stage_str + 'batch_idx'] = batch_idx losses_and_metrics_to_log[prefix + '_' + stage_str + 'mem_usage'] = self.process.memory_info().rss losses_and_metrics_to_log[stage_str + 'epoch'] = self.current_epoch losses_and_metrics_to_log[stage_str + 'step'] = self.global_step losses_and_metrics_to_log[stage_str + 'batch_idx'] = batch_idx losses_and_metrics_to_log[stage_str + 'mem_usage'] = self.process.memory_info().rss if self.logger is not None: # self.logger.log_metrics(losses_and_metrics_to_log) self.log_dict(losses_and_metrics_to_log, sync_dist=True, on_step=False, on_epoch=True) # if self.global_step % 200 == 0: uv_detail_normals = None if 'uv_detail_normals' in values.keys(): uv_detail_normals = values['uv_detail_normals'] if self.deca.config.test_vis_frequency > 0: # Log visualizations every once in a while if batch_idx % self.deca.config.test_vis_frequency == 0: # if self.trainer.is_global_zero: visualizations, grid_image = self._visualization_checkpoint(values['verts'], values['trans_verts'], values['ops'], uv_detail_normals, values, self.global_step, stage_str[:-1], prefix) visdict = self._create_visualizations_to_log(stage_str[:-1], visualizations, values, batch_idx, indices=0, dataloader_idx=dataloader_idx) self.logger.log_metrics(visdict) return None @property def process(self): if not hasattr(self,"process_"): self.process_ = psutil.Process(os.getpid()) return self.process_ def training_step(self, batch, batch_idx, *args, **kwargs): #, debug=True): """ Training step override of pytorch lightning module. It makes the encoding, decoding passes, computes the loss and logs the losses/visualizations. :param batch: Batch of images to encode. batch['image'] [batch_size, ring_size, 3, image_size, image_size]. For a training forward pass, additional corresponding data are necessery such as 'landmarks' and 'masks'. :batch_idx batch index """ values = self.encode(batch, training=True) values = self.decode(values, training=True) losses_and_metrics = self.compute_loss(values, batch, training=True) uv_detail_normals = None if 'uv_detail_normals' in values.keys(): uv_detail_normals = values['uv_detail_normals'] # prefix = str(self.mode.name).lower() prefix = self._get_logging_prefix() # losses_and_metrics_to_log = {prefix + '_train_' + key: value.detach().cpu() for key, value in losses_and_metrics.items()} # losses_and_metrics_to_log = {prefix + '_train_' + key: value.detach() for key, value in losses_and_metrics.items()} losses_and_metrics_to_log = {prefix + '_train_' + key: value.detach().cpu().item() for key, value in losses_and_metrics.items()} # losses_and_metrics_to_log[prefix + '_train_' + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) losses_and_metrics_to_log[prefix + '_train_' + 'epoch'] = self.current_epoch losses_and_metrics_to_log[prefix + '_train_' + 'step'] = self.global_step losses_and_metrics_to_log[prefix + '_train_' + 'batch_idx'] = batch_idx losses_and_metrics_to_log[prefix + '_' + "train_" + 'mem_usage'] = self.process.memory_info().rss # losses_and_metrics_to_log['train_' + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) losses_and_metrics_to_log['train_' + 'epoch'] = self.current_epoch losses_and_metrics_to_log['train_' + 'step'] = self.global_step losses_and_metrics_to_log['train_' + 'batch_idx'] = batch_idx losses_and_metrics_to_log["train_" + 'mem_usage'] = self.process.memory_info().rss # log loss also without any prefix for a model checkpoint to track it losses_and_metrics_to_log['loss'] = losses_and_metrics_to_log[prefix + '_train_loss'] if self.logger is not None: self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch, # recommended if self.deca.config.train_vis_frequency > 0: if self.global_step % self.deca.config.train_vis_frequency == 0: if self.trainer.is_global_zero: visualizations, grid_image = self._visualization_checkpoint(values['verts'], values['trans_verts'], values['ops'], uv_detail_normals, values, batch_idx, "train", prefix) visdict = self._create_visualizations_to_log('train', visualizations, values, batch_idx, indices=0) if isinstance(self.logger, WandbLogger): self.logger.log_metrics(visdict)#, step=self.global_step) # self.log_dict(visdict, sync_dist=True) # self.log_dict(losses_and_metrics_to_log, on_step=True, on_epoch=False) # log per step # self.log_dict(losses_and_metrics_to_log, on_step=True, on_epoch=True) # log per both # return losses_and_metrics return losses_and_metrics['loss'] ### STEP ENDS ARE PROBABLY NOT NECESSARY BUT KEEP AN EYE ON THEM IF MULI-GPU TRAINING DOESN'T WORK # def training_step_end(self, batch_parts): # return self._step_end(batch_parts) # # def validation_step_end(self, batch_parts): # return self._step_end(batch_parts) # # def _step_end(self, batch_parts): # # gpu_0_prediction = batch_parts.pred[0]['pred'] # # gpu_1_prediction = batch_parts.pred[1]['pred'] # N = len(batch_parts) # loss_dict = {} # for key in batch_parts[0]: # for i in range(N): # if key not in loss_dict.keys(): # loss_dict[key] = batch_parts[i] # else: # loss_dict[key] = batch_parts[i] # loss_dict[key] = loss_dict[key] / N # return loss_dict def vae_2_str(self, valence=None, arousal=None, affnet_expr=None, expr7=None, prefix=""): caption = "" if len(prefix) > 0: prefix += "_" if valence is not None and not np.isnan(valence).any(): caption += prefix + "valence= %.03f\n" % valence if arousal is not None and not np.isnan(arousal).any(): caption += prefix + "arousal= %.03f\n" % arousal if affnet_expr is not None and not np.isnan(affnet_expr).any(): caption += prefix + "expression= %s \n" % AffectNetExpressions(affnet_expr).name if expr7 is not None and not np.isnan(expr7).any(): caption += prefix +"expression= %s \n" % Expression7(expr7).name return caption def _create_visualizations_to_log(self, stage, visdict, values, step, indices=None, dataloader_idx=None, output_dir=None): mode_ = str(self.mode.name).lower() prefix = self._get_logging_prefix() output_dir = output_dir or self.inout_params.full_run_dir log_dict = {} for key in visdict.keys(): images = _torch_image2np(visdict[key]) if images.dtype == np.float32 or images.dtype == np.float64 or images.dtype == np.float16: images = np.clip(images, 0, 1) if indices is None: indices = np.arange(images.shape[0]) if isinstance(indices, int): indices = [indices,] if isinstance(indices, str) and indices == 'all': image = np.concatenate([images[i] for i in range(images.shape[0])], axis=1) savepath = Path(f'{output_dir}/{prefix}_{stage}/{key}/{self.current_epoch:04d}_{step:04d}_all.png') # im2log = Image(image, caption=key) if isinstance(self.logger, WandbLogger): im2log = _log_wandb_image(savepath, image) else: im2log = _log_array_image(savepath, image) name = prefix + "_" + stage + "_" + key if dataloader_idx is not None: name += "/dataloader_idx_" + str(dataloader_idx) log_dict[name] = im2log else: for i in indices: caption = key + f" batch_index={step}\n" caption += key + f" index_in_batch={i}\n" if self.emonet_loss is not None: if key == 'inputs': if mode_ + "_valence_input" in values.keys(): caption += self.vae_2_str( values[mode_ + "_valence_input"][i].detach().cpu().item(), values[mode_ + "_arousal_input"][i].detach().cpu().item(), np.argmax(values[mode_ + "_expression_input"][i].detach().cpu().numpy()), prefix="emonet") + "\n" if 'va' in values.keys() and mode_ + "valence_gt" in values.keys(): # caption += self.vae_2_str( # values[mode_ + "_valence_gt"][i].detach().cpu().item(), # values[mode_ + "_arousal_gt"][i].detach().cpu().item(), caption += self.vae_2_str( values[mode_ + "valence_gt"][i].detach().cpu().item(), values[mode_ + "arousal_gt"][i].detach().cpu().item(), prefix="gt") + "\n" if 'expr7' in values.keys() and mode_ + "_expression_gt" in values.keys(): caption += "\n" + self.vae_2_str( expr7=values[mode_ + "_expression_gt"][i].detach().cpu().numpy(), prefix="gt") + "\n" if 'affectnetexp' in values.keys() and mode_ + "_expression_gt" in values.keys(): caption += "\n" + self.vae_2_str( affnet_expr=values[mode_ + "_expression_gt"][i].detach().cpu().numpy(), prefix="gt") + "\n" elif 'geometry_detail' in key: if "emo_mlp_valence" in values.keys(): caption += self.vae_2_str( values["emo_mlp_valence"][i].detach().cpu().item(), values["emo_mlp_arousal"][i].detach().cpu().item(), prefix="mlp") if 'emo_mlp_expr_classification' in values.keys(): caption += "\n" + self.vae_2_str( affnet_expr=values["emo_mlp_expr_classification"][i].detach().cpu().argmax().numpy(), prefix="mlp") + "\n" elif key == 'output_images_' + mode_: if mode_ + "_valence_output" in values.keys(): caption += self.vae_2_str(values[mode_ + "_valence_output"][i].detach().cpu().item(), values[mode_ + "_arousal_output"][i].detach().cpu().item(), np.argmax(values[mode_ + "_expression_output"][i].detach().cpu().numpy())) + "\n" elif key == 'output_translated_images_' + mode_: if mode_ + "_translated_valence_output" in values.keys(): caption += self.vae_2_str(values[mode_ + "_translated_valence_output"][i].detach().cpu().item(), values[mode_ + "_translated_arousal_output"][i].detach().cpu().item(), np.argmax(values[mode_ + "_translated_expression_output"][i].detach().cpu().numpy())) + "\n" # elif key == 'output_images_detail': # caption += "\n" + self.vae_2_str(values["detail_output_valence"][i].detach().cpu().item(), # values["detail_output_valence"][i].detach().cpu().item(), # np.argmax(values["detail_output_expression"][ # i].detach().cpu().numpy())) savepath = Path(f'{output_dir}/{prefix}_{stage}/{key}/{self.current_epoch:04d}_{step:04d}_{i:02d}.png') image = images[i] # im2log = Image(image, caption=caption) if isinstance(self.logger, WandbLogger): im2log = _log_wandb_image(savepath, image, caption) elif self.logger is not None: im2log = _log_array_image(savepath, image, caption) else: im2log = _log_array_image(None, image, caption) name = prefix + "_" + stage + "_" + key if dataloader_idx is not None: name += "/dataloader_idx_" + str(dataloader_idx) log_dict[name] = im2log return log_dict def _visualization_checkpoint(self, verts, trans_verts, ops, uv_detail_normals, additional, batch_idx, stage, prefix, save=False): batch_size = verts.shape[0] visind = np.arange(batch_size) shape_images = self.deca.render.render_shape(verts, trans_verts) if uv_detail_normals is not None: detail_normal_images = F.grid_sample(uv_detail_normals.detach(), ops['grid'].detach(), align_corners=False) shape_detail_images = self.deca.render.render_shape(verts, trans_verts, detail_normal_images=detail_normal_images) else: shape_detail_images = None visdict = {} if 'images' in additional.keys(): visdict['inputs'] = additional['images'][visind] if 'images' in additional.keys() and 'lmk' in additional.keys(): visdict['landmarks_gt'] = util.tensor_vis_landmarks(additional['images'][visind], additional['lmk'][visind]) if 'images' in additional.keys() and 'predicted_landmarks' in additional.keys(): visdict['landmarks_predicted'] = util.tensor_vis_landmarks(additional['images'][visind], additional['predicted_landmarks'][visind]) if 'predicted_images' in additional.keys(): visdict['output_images_coarse'] = additional['predicted_images'][visind] if 'predicted_translated_image' in additional.keys() and additional['predicted_translated_image'] is not None: visdict['output_translated_images_coarse'] = additional['predicted_translated_image'][visind] visdict['geometry_coarse'] = shape_images[visind] if shape_detail_images is not None: visdict['geometry_detail'] = shape_detail_images[visind] if 'albedo_images' in additional.keys(): visdict['albedo_images'] = additional['albedo_images'][visind] if 'masks' in additional.keys(): visdict['mask'] = additional['masks'].repeat(1, 3, 1, 1)[visind] if 'albedo' in additional.keys(): visdict['albedo'] = additional['albedo'][visind] if 'predicted_detailed_image' in additional.keys() and additional['predicted_detailed_image'] is not None: visdict['output_images_detail'] = additional['predicted_detailed_image'][visind] if 'predicted_detailed_translated_image' in additional.keys() and additional['predicted_detailed_translated_image'] is not None: visdict['output_translated_images_detail'] = additional['predicted_detailed_translated_image'][visind] if 'shape_detail_images' in additional.keys(): visdict['shape_detail_images'] = additional['shape_detail_images'][visind] if 'uv_detail_normals' in additional.keys(): visdict['uv_detail_normals'] = additional['uv_detail_normals'][visind] * 0.5 + 0.5 if 'uv_texture_patch' in additional.keys(): visdict['uv_texture_patch'] = additional['uv_texture_patch'][visind] if 'uv_texture_gt' in additional.keys(): visdict['uv_texture_gt'] = additional['uv_texture_gt'][visind] if 'translated_uv_texture' in additional.keys() and additional['translated_uv_texture'] is not None: visdict['translated_uv_texture'] = additional['translated_uv_texture'][visind] if 'uv_vis_mask_patch' in additional.keys(): visdict['uv_vis_mask_patch'] = additional['uv_vis_mask_patch'][visind] if save: savepath = f'{self.inout_params.full_run_dir}/{prefix}_{stage}/combined/{self.current_epoch:04d}_{batch_idx:04d}.png' Path(savepath).parent.mkdir(exist_ok=True, parents=True) visualization_image = self.deca.visualize(visdict, savepath) return visdict, visualization_image[..., [2, 1, 0]] else: visualization_image = None return visdict, None def _get_trainable_parameters(self): trainable_params = [] if self.mode == DecaMode.COARSE: trainable_params += self.deca._get_coarse_trainable_parameters() elif self.mode == DecaMode.DETAIL: trainable_params += self.deca._get_detail_trainable_parameters() else: raise ValueError(f"Invalid deca mode: {self.mode}") if self.emotion_mlp is not None: trainable_params += list(self.emotion_mlp.parameters()) if self.emonet_loss is not None: trainable_params += self.emonet_loss._get_trainable_params() if self.deca.id_loss is not None: trainable_params += self.deca.id_loss._get_trainable_params() return trainable_params def configure_optimizers(self): # optimizer = torch.optim.Adam(self.parameters(), lr=1e-3) print("Configuring optimizer") trainable_params = self._get_trainable_parameters() if self.learning_params.optimizer == 'Adam': self.deca.opt = torch.optim.Adam( trainable_params, lr=self.learning_params.learning_rate, amsgrad=False) elif self.config.learning.optimizer == 'AdaBound': self.deca.opt = adabound.AdaBound( trainable_params, lr=self.config.learning.learning_rate, final_lr=self.config.learning.final_learning_rate ) elif self.learning_params.optimizer == 'SGD': self.deca.opt = torch.optim.SGD( trainable_params, lr=self.learning_params.learning_rate) else: raise ValueError(f"Unsupported optimizer: '{self.learning_params.optimizer}'") optimizers = [self.deca.opt] schedulers = [] if 'learning_rate_decay' in self.learning_params.keys(): scheduler = torch.optim.lr_scheduler.ExponentialLR(self.deca.opt, gamma=self.learning_params.learning_rate_decay) schedulers += [scheduler] if len(schedulers) == 0: return self.deca.opt return optimizers, schedulers class DECA(torch.nn.Module): """ The original DECA class which contains the encoders, FLAME decoder and the detail decoder. """ def __init__(self, config): """ :config corresponds to a model_params from DecaModule """ super().__init__() # ID-MRF perceptual loss (kept here from the original DECA implementation) self.perceptual_loss = None # Face Recognition loss self.id_loss = None # VGG feature loss self.vgg_loss = None self._reconfigure(config) self._reinitialize() def _dirty_init(self): pass # not used here, implemented for EMICA def get_input_image_size(self): return (self.config.image_size, self.config.image_size) def _reconfigure(self, config): self.config = config self.n_param = config.n_shape + config.n_tex + config.n_exp + config.n_pose + config.n_cam + config.n_light # identity-based detail code self.n_detail = config.n_detail # emotion-based detail code (deprecated, not use by DECA or EMOCA) self.n_detail_emo = config.n_detail_emo if 'n_detail_emo' in config.keys() else 0 # count the size of the conidition vector if 'detail_conditioning' in self.config.keys(): self.n_cond = 0 if 'globalpose' in self.config.detail_conditioning: self.n_cond += 3 if 'jawpose' in self.config.detail_conditioning: self.n_cond += 3 if 'identity' in self.config.detail_conditioning: self.n_cond += config.n_shape if 'expression' in self.config.detail_conditioning: self.n_cond += config.n_exp else: self.n_cond = 3 + config.n_exp self.mode = DecaMode[str(config.mode).upper()] self._create_detail_generator() self._init_deep_losses() self._setup_neural_rendering() def _reinitialize(self): self._create_model() self._setup_renderer() self._init_deep_losses() self.face_attr_mask = util.load_local_mask(image_size=self.config.uv_size, mode='bbx') def _get_num_shape_params(self): return self.config.n_shape def _init_deep_losses(self): """ Initialize networks for deep losses """ # TODO: ideally these networks should be moved out the DECA class and into DecaModule, # but that would break backwards compatility with the original DECA and would not be able to load DECA's weights if 'mrfwr' not in self.config.keys() or self.config.mrfwr == 0: self.perceptual_loss = None else: if self.perceptual_loss is None: self.perceptual_loss = lossfunc.IDMRFLoss().eval() self.perceptual_loss.requires_grad_(False) # TODO, move this to the constructor if 'idw' not in self.config.keys() or self.config.idw == 0: self.id_loss = None else: if self.id_loss is None: id_metric = self.config.id_metric if 'id_metric' in self.config.keys() else None id_trainable = self.config.id_trainable if 'id_trainable' in self.config.keys() else False self.id_loss_start_step = self.config.id_loss_start_step if 'id_loss_start_step' in self.config.keys() else 0 self.id_loss = lossfunc.VGGFace2Loss(self.config.pretrained_vgg_face_path, id_metric, id_trainable) self.id_loss.freeze_nontrainable_layers() if 'vggw' not in self.config.keys() or self.config.vggw == 0: self.vgg_loss = None else: if self.vgg_loss is None: vgg_loss_batch_norm = 'vgg_loss_batch_norm' in self.config.keys() and self.config.vgg_loss_batch_norm self.vgg_loss = VGG19Loss(dict(zip(self.config.vgg_loss_layers, self.config.lambda_vgg_layers)), batch_norm=vgg_loss_batch_norm).eval() self.vgg_loss.requires_grad_(False) # TODO, move this to the constructor def _setup_renderer(self): self.render = SRenderY(self.config.image_size, obj_filename=self.config.topology_path, uv_size=self.config.uv_size) # .to(self.device) # face mask for rendering details mask = imread(self.config.face_mask_path).astype(np.float32) / 255. mask = torch.from_numpy(mask[:, :, 0])[None, None, :, :].contiguous() self.uv_face_mask = F.interpolate(mask, [self.config.uv_size, self.config.uv_size]) mask = imread(self.config.face_eye_mask_path).astype(np.float32) / 255. mask = torch.from_numpy(mask[:, :, 0])[None, None, :, :].contiguous() uv_face_eye_mask = F.interpolate(mask, [self.config.uv_size, self.config.uv_size]) self.register_buffer('uv_face_eye_mask', uv_face_eye_mask) # displacement mask is deprecated and not used by DECA or EMOCA if 'displacement_mask' in self.config.keys(): displacement_mask_ = 1-np.load(self.config.displacement_mask).astype(np.float32) # displacement_mask_ = np.load(self.config.displacement_mask).astype(np.float32) displacement_mask_ = torch.from_numpy(displacement_mask_)[None, None, ...].contiguous() displacement_mask_ = F.interpolate(displacement_mask_, [self.config.uv_size, self.config.uv_size]) self.register_buffer('displacement_mask', displacement_mask_) ## displacement correct if os.path.isfile(self.config.fixed_displacement_path): fixed_dis = np.load(self.config.fixed_displacement_path) fixed_uv_dis = torch.tensor(fixed_dis).float() else: fixed_uv_dis = torch.zeros([512, 512]).float() print("Warning: fixed_displacement_path not found, using zero displacement") self.register_buffer('fixed_uv_dis', fixed_uv_dis) def uses_texture(self): if 'use_texture' in self.config.keys(): return self.config.use_texture return True # true by default def _disable_texture(self, remove_from_model=False): self.config.use_texture = False if remove_from_model: self.flametex = None def _enable_texture(self): self.config.use_texture = True def _has_neural_rendering(self): return hasattr(self.config, "neural_renderer") and bool(self.config.neural_renderer) def _setup_neural_rendering(self): if self._has_neural_rendering(): if self.config.neural_renderer.class_ == "StarGAN": print("Creating StarGAN neural renderer") self.image_translator = StarGANWrapper(self.config.neural_renderer.cfg, self.config.neural_renderer.stargan_repo) else: raise ValueError(f"Unsupported neural renderer class '{self.config.neural_renderer.class_}'") if self.image_translator.background_mode == "input": if self.config.background_from_input not in [True, "input"]: raise NotImplementedError("The background mode of the neural renderer and deca is not synchronized. " "Background should be inpainted from the input") elif self.image_translator.background_mode == "black": if self.config.background_from_input not in [False, "black"]: raise NotImplementedError("The background mode of the neural renderer and deca is not synchronized. " "Background should be black.") elif self.image_translator.background_mode == "none": if self.config.background_from_input not in ["none"]: raise NotImplementedError("The background mode of the neural renderer and deca is not synchronized. " "The background should not be handled") else: raise NotImplementedError(f"Unsupported mode of the neural renderer backroungd: " f"'{self.image_translator.background_mode}'") def _create_detail_generator(self): #backwards compatibility hack: if hasattr(self, 'D_detail'): if (not "detail_conditioning_type" in self.config.keys() or self.config.detail_conditioning_type == "concat") \ and isinstance(self.D_detail, Generator): return if self.config.detail_conditioning_type == "adain" and isinstance(self.D_detail, GeneratorAdaIn): return print("[WARNING]: We are reinitializing the detail generator!") del self.D_detail # just to make sure we free the CUDA memory, probably not necessary if not "detail_conditioning_type" in self.config.keys() or str(self.config.detail_conditioning_type).lower() == "concat": # concatenates detail latent and conditioning (this one is used by DECA/EMOCA) print("Creating classic detail generator.") self.D_detail = Generator(latent_dim=self.n_detail + self.n_detail_emo + self.n_cond, out_channels=1, out_scale=0.01, sample_mode='bilinear') elif str(self.config.detail_conditioning_type).lower() == "adain": # conditioning passed in through adain layers (this one is experimental and not currently used) print("Creating AdaIn detail generator.") self.D_detail = GeneratorAdaIn(self.n_detail + self.n_detail_emo, self.n_cond, out_channels=1, out_scale=0.01, sample_mode='bilinear') else: raise NotImplementedError(f"Detail conditioning invalid: '{self.config.detail_conditioning_type}'") def _create_model(self): # 1) build coarse encoder e_flame_type = 'ResnetEncoder' if 'e_flame_type' in self.config.keys(): e_flame_type = self.config.e_flame_type if e_flame_type == 'ResnetEncoder': self.E_flame = ResnetEncoder(outsize=self.n_param) elif e_flame_type[:4] == 'swin': self.E_flame = SwinEncoder(outsize=self.n_param, img_size=self.config.image_size, swin_type=e_flame_type) else: raise ValueError(f"Invalid 'e_flame_type' = {e_flame_type}") flame_cfg = copy.deepcopy(self.config) flame_cfg.n_shape = self._get_num_shape_params() if 'flame_mediapipe_lmk_embedding_path' not in flame_cfg.keys(): self.flame = FLAME(flame_cfg) else: self.flame = FLAME_mediapipe(flame_cfg) if self.uses_texture(): self.flametex = FLAMETex(self.config) else: self.flametex = None # 2) build detail encoder e_detail_type = 'ResnetEncoder' if 'e_detail_type' in self.config.keys(): e_detail_type = self.config.e_detail_type if e_detail_type == 'ResnetEncoder': self.E_detail = ResnetEncoder(outsize=self.n_detail + self.n_detail_emo) elif e_flame_type[:4] == 'swin': self.E_detail = SwinEncoder(outsize=self.n_detail + self.n_detail_emo, img_size=self.config.image_size, swin_type=e_detail_type) else: raise ValueError(f"Invalid 'e_detail_type'={e_detail_type}") self._create_detail_generator() # self._load_old_checkpoint() def _get_coarse_trainable_parameters(self): print("Add E_flame.parameters() to the optimizer") return list(self.E_flame.parameters()) def _get_detail_trainable_parameters(self): trainable_params = [] if self.config.train_coarse: trainable_params += self._get_coarse_trainable_parameters() print("Add E_flame.parameters() to the optimizer") trainable_params += list(self.E_detail.parameters()) print("Add E_detail.parameters() to the optimizer") trainable_params += list(self.D_detail.parameters()) print("Add D_detail.parameters() to the optimizer") return trainable_params def train(self, mode: bool = True): super().train(mode) if mode: if self.mode == DecaMode.COARSE: self.E_flame.train() # print("Setting E_flame to train") self.E_detail.eval() # print("Setting E_detail to eval") self.D_detail.eval() # print("Setting D_detail to eval") elif self.mode == DecaMode.DETAIL: if self.config.train_coarse: # print("Setting E_flame to train") self.E_flame.train() else: # print("Setting E_flame to eval") self.E_flame.eval() self.E_detail.train() # print("Setting E_detail to train") self.D_detail.train() # print("Setting D_detail to train") else: raise ValueError(f"Invalid mode '{self.mode}'") else: self.E_flame.eval() # print("Setting E_flame to eval") self.E_detail.eval() # print("Setting E_detail to eval") self.D_detail.eval() # print("Setting D_detail to eval") # these are set to eval no matter what, they're never being trained (the FLAME shape and texture spaces are pretrained) self.flame.eval() if self.flametex is not None: self.flametex.eval() return self def _load_old_checkpoint(self): """ Loads the DECA model weights from the original DECA implementation: https://github.com/YadiraF/DECA """ if self.config.resume_training: model_path = self.config.pretrained_modelpath print(f"Loading model state from '{model_path}'") checkpoint = torch.load(model_path) # model util.copy_state_dict(self.E_flame.state_dict(), checkpoint['E_flame']) # util.copy_state_dict(self.opt.state_dict(), checkpoint['opt']) # deprecate # detail model if 'E_detail' in checkpoint.keys(): util.copy_state_dict(self.E_detail.state_dict(), checkpoint['E_detail']) util.copy_state_dict(self.D_detail.state_dict(), checkpoint['D_detail']) # training state self.start_epoch = 0 # checkpoint['epoch'] self.start_iter = 0 # checkpoint['iter'] else: print('Start training from scratch') self.start_epoch = 0 self.start_iter = 0 def _encode_flame(self, images, **kwargs): return self.E_flame(images) def decompose_code(self, code): ''' config.n_shape + config.n_tex + config.n_exp + config.n_pose + config.n_cam + config.n_light ''' code_list = [] # num_list = [self.config.n_shape, self.config.n_tex, self.config.n_exp, self.config.n_pose, self.config.n_cam, # self.config.n_light] num_list = [self._get_num_shape_params(), self.config.n_tex, self.config.n_exp, self.config.n_pose, self.config.n_cam, self.config.n_light] start = 0 for i in range(len(num_list)): code_list.append(code[:, start:start + num_list[i]]) start = start + num_list[i] # shapecode, texcode, expcode, posecode, cam, lightcode = code_list code_list[-1] = code_list[-1].reshape(code.shape[0], 9, 3) return code_list, None def displacement2normal(self, uv_z, coarse_verts, coarse_normals, detach=True): """ Converts the displacement uv map (uv_z) and coarse_verts to a normal map coarse_normals. """ batch_size = uv_z.shape[0] uv_coarse_vertices = self.render.world2uv(coarse_verts)#.detach() if detach: uv_coarse_vertices = uv_coarse_vertices.detach() uv_coarse_normals = self.render.world2uv(coarse_normals)#.detach() if detach: uv_coarse_normals = uv_coarse_normals.detach() uv_z = uv_z * self.uv_face_eye_mask # detail vertices = coarse vertice + predicted displacement*normals + fixed displacement*normals uv_detail_vertices = uv_coarse_vertices + \ uv_z * uv_coarse_normals + \ self.fixed_uv_dis[None, None, :,:] * uv_coarse_normals #.detach() dense_vertices = uv_detail_vertices.permute(0, 2, 3, 1).reshape([batch_size, -1, 3]) uv_detail_normals = util.vertex_normals(dense_vertices, self.render.dense_faces.expand(batch_size, -1, -1)) uv_detail_normals = uv_detail_normals.reshape( [batch_size, uv_coarse_vertices.shape[2], uv_coarse_vertices.shape[3], 3]).permute(0, 3, 1, 2) # uv_detail_normals = uv_detail_normals*self.uv_face_eye_mask + uv_coarse_normals*(1-self.uv_face_eye_mask) # uv_detail_normals = util.gaussian_blur(uv_detail_normals) return uv_detail_normals, uv_coarse_vertices def visualize(self, visdict, savepath, catdim=1): grids = {} for key in visdict: # print(key) if visdict[key] is None: continue grids[key] = torchvision.utils.make_grid( F.interpolate(visdict[key], [self.config.image_size, self.config.image_size])).detach().cpu() grid = torch.cat(list(grids.values()), catdim) grid_image = (grid.numpy().transpose(1, 2, 0).copy() * 255)[:, :, [2, 1, 0]] grid_image = np.minimum(np.maximum(grid_image, 0), 255).astype(np.uint8) if savepath is not None: cv2.imwrite(savepath, grid_image) return grid_image def create_mesh(self, opdict, dense_template): ''' vertices: [nv, 3], tensor texture: [3, h, w], tensor ''' i = 0 vertices = opdict['verts'][i].cpu().numpy() faces = self.render.faces[0].cpu().numpy() if 'uv_texture_gt' in opdict.keys(): texture = util.tensor2image(opdict['uv_texture_gt'][i]) else: texture = None uvcoords = self.render.raw_uvcoords[0].cpu().numpy() uvfaces = self.render.uvfaces[0].cpu().numpy() # save coarse mesh, with texture and normal map if 'uv_detail_normals' in opdict.keys(): normal_map = util.tensor2image(opdict['uv_detail_normals'][i]*0.5 + 0.5) # upsample mesh, save detailed mesh texture = texture[:, :, [2, 1, 0]] normals = opdict['normals'][i].cpu().numpy() displacement_map = opdict['displacement_map'][i].detach().cpu().numpy().squeeze() dense_vertices, dense_colors, dense_faces = util.upsample_mesh(vertices, normals, faces, displacement_map, texture, dense_template) else: normal_map = None dense_vertices = None dense_colors = None dense_faces = None return vertices, faces, texture, uvcoords, uvfaces, normal_map, dense_vertices, dense_faces, dense_colors def save_obj(self, filename, opdict, dense_template, mode ='detail'): if mode not in ['coarse', 'detail', 'both']: raise ValueError(f"Invalid mode '{mode}. Expected modes are: 'coarse', 'detail', 'both'") vertices, faces, texture, uvcoords, uvfaces, normal_map, dense_vertices, dense_faces, dense_colors \ = self.create_mesh(opdict, dense_template) if mode == 'both': if isinstance(filename, list): filename_coarse = filename[0] filename_detail = filename[1] else: filename_coarse = filename filename_detail = filename.replace('.obj', '_detail.obj') elif mode == 'coarse': filename_coarse = filename else: filename_detail = filename if mode in ['coarse', 'both']: util.write_obj(str(filename_coarse), vertices, faces, texture=texture, uvcoords=uvcoords, uvfaces=uvfaces, normal_map=normal_map) if mode in ['detail', 'both']: util.write_obj(str(filename_detail), dense_vertices, dense_faces, colors = dense_colors, inverse_face_order=True) class ExpDECAInterface(object): """ This serves as an interface for EMOCA-like classes that need to use a different sub class but retain the EMOCA functionality. See EMICA_v2 for an example. """ def _create_model(self): # E_flame should be fixed for expression EMOCA self.E_flame.requires_grad_(False) # 2) add expression decoder if self.config.expression_backbone == 'deca_parallel': ## a) Attach a parallel flow of FCs onto the original DECA coarse backbone. (Only the second FC head is trainable) self.E_expression = SecondHeadResnet(self.E_flame, self.n_exp_param, 'same') elif self.config.expression_backbone == 'deca_clone': ## b) Clones the original DECA coarse decoder (and the entire decoder will be trainable) - This is in final EMOCA. #TODO this will only work for Resnet. Make this work for the other backbones (Swin) as well. self.E_expression = ResnetEncoder(self.n_exp_param) # clone parameters of the ResNet self.E_expression.encoder.load_state_dict(self.E_flame.encoder.state_dict()) elif self.config.expression_backbone == 'emonet_trainable': # Trainable EmoNet instead of Resnet (deprecated)
self.E_expression = EmoNetRegressor(self.n_exp_param)
21
2023-11-07 20:13:32+00:00
24k
codefuse-ai/Collinear-Constrained-Attention
model/build_model.py
[ { "identifier": "get_model_params_num", "path": "utils/common_utils.py", "snippet": "def get_model_params_num(model):\n \"\"\"\n Get params number of the model\n Args:\n model: model(required)\n Returns:\n the number of parameters of model\n \"\"\"\n num = 0\n for _, param in model.named_parameters():\n num += param.nelement()\n return num" }, { "identifier": "GPTNeoXConfig", "path": "model/gpt_neox/configuration_gpt_neox.py", "snippet": "class GPTNeoXConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`GPTNeoXModel`]. It is used to instantiate an\n GPTNeoX model according to the specified arguments, defining the model architecture. Instantiating a configuration\n with the defaults will yield a similar configuration to that of the GPTNeoX\n [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n\n Args:\n vocab_size (`int`, *optional*, defaults to 50432):\n Vocabulary size of the GPTNeoX model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`GPTNeoXModel`].\n hidden_size (`int`, *optional*, defaults to 6144):\n Dimension of the encoder layers and the pooler layer.\n num_hidden_layers (`int`, *optional*, defaults to 44):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 64):\n Number of attention heads for each attention layer in the Transformer encoder.\n intermediate_size (`int`, *optional*, defaults to 24576):\n Dimension of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n hidden_act (`str` or `function`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"` and `\"gelu_new\"` are supported.\n rotary_pct (`float`, *optional*, defaults to 0.25):\n percentage of hidden dimensions to allocate to rotary embeddings\n rotary_emb_base (`int`, *optional*, defaults to 10000)\n base for computing rotary embeddings frequency\n max_position_embeddings (`int`, *optional*, defaults to 2048):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n initializer_range (`float`, *optional*, defaults to 1e-5):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n layer_norm_eps (`float`, *optional*, defaults to 1e-12):\n The epsilon used by the layer normalization layers.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n use_parallel_residual (`bool`, *optional*, defaults to `True`):\n Whether to use a \"parallel\" formulation in each Transformer layer, which can provide a slight training\n speedup at large scales (e.g. 20B).\n rope_scaling (`Dict`, *optional*):\n Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports three scaling\n strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format\n is `{\"type\": strategy name, \"factor\": scaling factor}`. When using this flag, don't update\n `max_position_embeddings` to the expected new maximum. See the following thread for more information on how\n these scaling strategies behave:\n https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an\n experimental feature, subject to breaking API changes in future versions.\n Example:\n\n ```python\n >>> from transformers import GPTNeoXConfig, GPTNeoXModel\n\n >>> # Initializing a GPTNeoX gpt-neox-20b style configuration\n >>> configuration = GPTNeoXConfig()\n\n >>> # Initializing a model (with random weights) from the gpt-neox-20b style configuration\n >>> model = GPTNeoXModel(configuration) # doctest: +SKIP\n\n >>> # Accessing the model configuration\n >>> configuration = model.config # doctest: +SKIP\n ```\"\"\"\n model_type = \"gpt_neox\"\n\n def __init__(\n self,\n vocab_size=50432,\n hidden_size=6144,\n num_hidden_layers=44,\n num_attention_heads=64,\n intermediate_size=24576,\n hidden_act=\"gelu\",\n rotary_pct=0.25,\n rotary_emb_base=10000,\n max_position_embeddings=2048,\n initializer_range=0.02,\n layer_norm_eps=1e-5,\n use_cache=True,\n bos_token_id=0,\n eos_token_id=2,\n tie_word_embeddings=False,\n use_parallel_residual=True,\n rope_scaling=None,\n **kwargs\n ):\n super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)\n self.vocab_size = vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.rotary_pct = rotary_pct\n self.rotary_emb_base = rotary_emb_base\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n self.use_cache = use_cache\n self.tie_word_embeddings = tie_word_embeddings\n self.use_parallel_residual = use_parallel_residual\n self.rope_scaling = rope_scaling\n self._rope_scaling_validation()\n\n if self.hidden_size % self.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size is not divisble by the number of attention heads! Make sure to update them!\"\n )\n\n # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation\n def _rope_scaling_validation(self):\n \"\"\"\n Validate the `rope_scaling` configuration.\n \"\"\"\n if self.rope_scaling is None:\n return\n\n if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:\n raise ValueError(\n \"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, \"\n f\"got {self.rope_scaling}\"\n )\n rope_scaling_type = self.rope_scaling.get(\"type\", None)\n rope_scaling_factor = self.rope_scaling.get(\"factor\", None)\n if rope_scaling_type is None or rope_scaling_type not in [\"linear\", \"dynamic\"]:\n raise ValueError(\n f\"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}\"\n )\n if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:\n raise ValueError(f\"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}\")" }, { "identifier": "GPTNeoXForCausalLM", "path": "model/gpt_neox/modeling_gpt_neox.py", "snippet": "class GPTNeoXForCausalLM(GPTNeoXPreTrainedModel):\n\n # _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"predictions.decoder.bias\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n self.gpt_neox = GPTNeoXModel(config)\n self.embed_out = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_output_embeddings(self):\n return self.embed_out\n\n def set_output_embeddings(self, new_embeddings):\n self.embed_out = new_embeddings\n\n @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n r\"\"\"\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are\n only required when the model is used as a decoder in a Sequence to Sequence model.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks that can be used (see\n `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in\n `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are\n ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, GPTNeoXForCausalLM, GPTNeoXConfig\n >>> import torch\n\n >>> tokenizer = AutoTokenizer.from_pretrained(\"EleutherAI/gpt-neox-20b\")\n >>> config = GPTNeoXConfig.from_pretrained(\"EleutherAI/gpt-neox-20b\")\n >>> config.is_decoder = True\n >>> model = GPTNeoXForCausalLM.from_pretrained(\"EleutherAI/gpt-neox-20b\", config=config)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.logits\n ```\"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.gpt_neox(\n input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n lm_logits = self.embed_out(hidden_states)\n\n lm_loss = None\n if labels is not None:\n # move labels to correct device to enable model parallelism\n labels = labels.to(lm_logits.device)\n # we are doing next-token prediction; shift prediction scores and input ids by one\n shift_logits = lm_logits[:, :-1, :].contiguous()\n labels = labels[:, 1:].contiguous()\n loss_fct = CrossEntropyLoss()\n lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), labels.view(-1))\n\n if not return_dict:\n output = (lm_logits,) + outputs[1:]\n return ((lm_loss,) + output) if lm_loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=lm_loss,\n logits=lm_logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs\n ):\n input_shape = input_ids.shape\n\n # cut decoder_input_ids if past is used\n if past_key_values and past_key_values[0] is not None:\n input_ids = input_ids[:, -1:]\n\n position_ids = kwargs.get(\"position_ids\", None)\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_shape)\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"attention_mask\": attention_mask,\n \"past_key_values\": past_key_values,\n \"position_ids\": position_ids,\n }\n )\n\n return model_inputs\n\n def _reorder_cache(self, past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],\n )\n return reordered_past" }, { "identifier": "GPTNeoXTokenizerFast", "path": "model/gpt_neox/tokenization_gpt_neox_fast.py", "snippet": "class GPTNeoXTokenizerFast(PreTrainedTokenizerFast):\n \"\"\"\n Construct a \"fast\" GPT-NeoX-20B tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level\n Byte-Pair-Encoding.\n\n This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will\n be encoded differently whether it is at the beginning of the sentence (without space) or not:\n\n ```python\n >>> from transformers import GPTNeoXTokenizerFast\n\n >>> tokenizer = GPTNeoXTokenizerFast.from_pretrained(\"gpt2\")\n >>> tokenizer(\"Hello world\")[\"input_ids\"]\n [15496, 995]\n\n >>> tokenizer(\" Hello world\")[\"input_ids\"]\n [18435, 995]\n ```\n\n You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since\n the model was not pretrained this way, it might yield a decrease in performance.\n\n <Tip>\n\n When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.\n\n </Tip>\n\n This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should\n refer to this superclass for more information regarding those methods.\n\n Args:\n vocab_file (`str`):\n Path to the vocabulary file.\n merges_file (`str`):\n Path to the merges file.\n errors (`str`, *optional*, defaults to `\"replace\"`):\n Paradigm to follow when decoding bytes to UTF-8. See\n [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.\n unk_token (`str`, *optional*, defaults to `<|endoftext|>`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n bos_token (`str`, *optional*, defaults to `<|endoftext|>`):\n The beginning of sequence token.\n eos_token (`str`, *optional*, defaults to `<|endoftext|>`):\n The end of sequence token.\n add_prefix_space (`bool`, *optional*, defaults to `False`):\n Whether or not to add an initial space to the input. This allows to treat the leading word just as any\n other word. (GPTNeoX tokenizer detect beginning of words by the preceding space).\n trim_offsets (`bool`, *optional*, defaults to `True`):\n Whether or not the post-processing step should trim offsets to avoid including whitespaces.\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP\n max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n model_input_names = [\"input_ids\", \"attention_mask\"]\n\n def __init__(\n self,\n vocab_file=None,\n merges_file=None,\n tokenizer_file=None,\n unk_token=\"<|endoftext|>\",\n bos_token=\"<|endoftext|>\",\n eos_token=\"<|endoftext|>\",\n add_prefix_space=False,\n **kwargs,\n ):\n super().__init__(\n vocab_file,\n merges_file,\n tokenizer_file=tokenizer_file,\n unk_token=unk_token,\n bos_token=bos_token,\n eos_token=eos_token,\n add_prefix_space=add_prefix_space,\n **kwargs,\n )\n\n pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())\n if pre_tok_state.get(\"add_prefix_space\", add_prefix_space) != add_prefix_space:\n pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop(\"type\"))\n pre_tok_state[\"add_prefix_space\"] = add_prefix_space\n self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)\n\n self.add_prefix_space = add_prefix_space\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:\n files = self._tokenizer.model.save(save_directory, name=filename_prefix)\n return tuple(files)\n\n def _build_conversation_input_ids(self, conversation: \"Conversation\") -> List[int]:\n \"\"\"This corresponds to DialoGPT variants of models.\"\"\"\n input_ids = []\n for is_user, text in conversation.iter_texts():\n input_ids.extend(self.encode(text, add_special_tokens=False) + [self.eos_token_id])\n\n if len(input_ids) > self.model_max_length:\n input_ids = input_ids[-self.model_max_length :]\n return input_ids" }, { "identifier": "LlamaConfig", "path": "model/llama/configuration_llama.py", "snippet": "class LlamaConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA\n model according to the specified arguments, defining the model architecture. Instantiating a configuration with the\n defaults will yield a similar configuration to that of the LLaMA-7B.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n\n Args:\n vocab_size (`int`, *optional*, defaults to 32000):\n Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`LlamaModel`]\n hidden_size (`int`, *optional*, defaults to 4096):\n Dimension of the hidden representations.\n intermediate_size (`int`, *optional*, defaults to 11008):\n Dimension of the MLP representations.\n num_hidden_layers (`int`, *optional*, defaults to 32):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 32):\n Number of attention heads for each attention layer in the Transformer encoder.\n num_key_value_heads (`int`, *optional*):\n This is the number of key_value heads that should be used to implement Grouped Query Attention. If\n `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if\n `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When\n converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed\n by meanpooling all the original heads within that group. For more details checkout [this\n paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to\n `num_attention_heads`.\n pretraining_tp (`int`, *optional*, defaults to `1`):\n Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this\n document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is\n necessary to ensure exact reproducibility of the pretraining results. Please refer to [this\n issue](https://github.com/pytorch/pytorch/issues/76232).\n hidden_act (`str` or `function`, *optional*, defaults to `\"silu\"`):\n The non-linear activation function (function or string) in the decoder.\n max_position_embeddings (`int`, *optional*, defaults to 2048):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n rms_norm_eps (`float`, *optional*, defaults to 1e-12):\n The epsilon used by the rms normalization layers.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n tie_word_embeddings(`bool`, *optional*, defaults to `False`):\n Whether to tie weight embeddings\n rope_scaling (`Dict`, *optional*):\n Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling\n strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format\n is `{\"type\": strategy name, \"factor\": scaling factor}`. When using this flag, don't update\n `max_position_embeddings` to the expected new maximum. See the following thread for more information on how\n these scaling strategies behave:\n https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an\n experimental feature, subject to breaking API changes in future versions.\n\n Example:\n\n ```python\n >>> from transformers import LlamaModel, LlamaConfig\n\n >>> # Initializing a LLaMA llama-7b style configuration\n >>> configuration = LlamaConfig()\n\n >>> # Initializing a model from the llama-7b style configuration\n >>> model = LlamaModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```\"\"\"\n model_type = \"llama\"\n keys_to_ignore_at_inference = [\"past_key_values\"]\n\n def __init__(\n self,\n vocab_size=32000,\n hidden_size=4096,\n intermediate_size=11008,\n num_hidden_layers=32,\n num_attention_heads=32,\n num_key_value_heads=None,\n hidden_act=\"silu\",\n max_position_embeddings=2048,\n initializer_range=0.02,\n rms_norm_eps=1e-6,\n use_cache=True,\n pad_token_id=None,\n bos_token_id=1,\n eos_token_id=2,\n pretraining_tp=1,\n tie_word_embeddings=False,\n rope_scaling=None,\n **kwargs,\n ):\n self.vocab_size = vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n\n # for backward compatibility\n if num_key_value_heads is None:\n num_key_value_heads = num_attention_heads\n\n self.num_key_value_heads = num_key_value_heads\n self.hidden_act = hidden_act\n self.initializer_range = initializer_range\n self.rms_norm_eps = rms_norm_eps\n self.pretraining_tp = pretraining_tp\n self.use_cache = use_cache\n self.rope_scaling = rope_scaling\n self._rope_scaling_validation()\n\n super().__init__(\n pad_token_id=pad_token_id,\n bos_token_id=bos_token_id,\n eos_token_id=eos_token_id,\n tie_word_embeddings=tie_word_embeddings,\n **kwargs,\n )\n\n def _rope_scaling_validation(self):\n \"\"\"\n Validate the `rope_scaling` configuration.\n \"\"\"\n if self.rope_scaling is None:\n return\n\n if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:\n raise ValueError(\n \"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, \"\n f\"got {self.rope_scaling}\"\n )\n rope_scaling_type = self.rope_scaling.get(\"type\", None)\n rope_scaling_factor = self.rope_scaling.get(\"factor\", None)\n if rope_scaling_type is None or rope_scaling_type not in [\"linear\", \"dynamic\"]:\n raise ValueError(\n f\"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}\"\n )\n if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:\n raise ValueError(f\"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}\")" }, { "identifier": "LlamaForCausalLM", "path": "model/llama/modeling_llama.py", "snippet": "class LlamaForCausalLM(LlamaPreTrainedModel):\n _tied_weights_keys = [\"lm_head.weight\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.model = LlamaModel(config)\n self.vocab_size = config.vocab_size\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.model.embed_tokens\n\n def set_input_embeddings(self, value):\n self.model.embed_tokens = value\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def set_decoder(self, decoder):\n self.model = decoder\n\n def get_decoder(self):\n return self.model\n\n @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n r\"\"\"\n Args:\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, LlamaForCausalLM\n\n >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)\n >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)\n\n >>> prompt = \"Hey, are you conscious? Can you talk to me?\"\n >>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n >>> # Generate\n >>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n \"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you.\"\n ```\"\"\"\n\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n outputs = self.model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n if self.config.pretraining_tp > 1:\n lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)\n logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]\n logits = torch.cat(logits, dim=-1)\n else:\n logits = self.lm_head(hidden_states)\n logits = logits.float()\n\n loss = None\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n shift_logits = shift_logits.view(-1, self.config.vocab_size)\n shift_labels = shift_labels.view(-1)\n # Enable model parallelism\n shift_labels = shift_labels.to(shift_logits.device)\n loss = loss_fct(shift_logits, shift_labels)\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs\n ):\n if past_key_values:\n input_ids = input_ids[:, -1:]\n\n position_ids = kwargs.get(\"position_ids\", None)\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"position_ids\": position_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": attention_mask,\n }\n )\n return model_inputs\n\n @staticmethod\n def _reorder_cache(past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),\n )\n return reordered_past" }, { "identifier": "LlamaTokenizer", "path": "model/llama/tokenization_llama.py", "snippet": "class LlamaTokenizer(PreTrainedTokenizer):\n \"\"\"\n Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is\n no padding token in the original model.\n\n Args:\n vocab_file (`str`):\n Path to the vocabulary file.\n legacy (`bool`, *optional*, defaults to `True`):\n Whether or not the `legacy` behaviour of the tokenizer should be used. Legacy is before the merge of #24622\n which includes fixes to properly handle tokens that appear after special tokens. A simple example:\n\n - `legacy=True`:\n ```python\n >>> from transformers import T5Tokenizer\n\n >>> tokenizer = T5Tokenizer.from_pretrained(\"t5-base\", legacy=True)\n >>> tokenizer.encode(\"Hello <extra_id_0>.\")\n [8774, 32099, 3, 5, 1]\n ```\n - `legacy=False`:\n ```python\n >>> from transformers import T5Tokenizer\n\n >>> tokenizer = T5Tokenizer.from_pretrained(\"t5-base\", legacy=False)\n >>> tokenizer.encode(\"Hello <extra_id_0>.\") # the extra space `[3]` is no longer here\n [8774, 32099, 5, 1]\n ```\n Checkout the pull request and the issue [here](https://github.com/huggingface/transformers/pull/24565) for\n more details.\n\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP\n max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n model_input_names = [\"input_ids\", \"attention_mask\"]\n\n def __init__(\n self,\n vocab_file,\n unk_token=\"<unk>\",\n bos_token=\"<s>\",\n eos_token=\"</s>\",\n pad_token=None,\n sp_model_kwargs: Optional[Dict[str, Any]] = None,\n add_bos_token=True,\n add_eos_token=False,\n clean_up_tokenization_spaces=False,\n legacy=None,\n **kwargs,\n ):\n self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs\n bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token\n eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token\n unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token\n pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token\n super().__init__(\n bos_token=bos_token,\n eos_token=eos_token,\n unk_token=unk_token,\n pad_token=pad_token,\n add_bos_token=add_bos_token,\n add_eos_token=add_eos_token,\n sp_model_kwargs=self.sp_model_kwargs,\n clean_up_tokenization_spaces=clean_up_tokenization_spaces,\n legacy=legacy,\n **kwargs,\n )\n if legacy is None:\n logger.warning_once(\n f\"You are using the default legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to\"\n \" read the related pull request available at https://github.com/huggingface/transformers/pull/24565, and set the legacy attribute accordingly.\"\n )\n legacy = True\n\n self.legacy = legacy\n self.vocab_file = vocab_file\n self.add_bos_token = add_bos_token\n self.add_eos_token = add_eos_token\n self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)\n self.sp_model.Load(vocab_file)\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state[\"sp_model\"] = None\n state[\"sp_model_proto\"] = self.sp_model.serialized_model_proto()\n return state\n\n def __setstate__(self, d):\n self.__dict__ = d\n self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)\n self.sp_model.LoadFromSerializedProto(self.sp_model_proto)\n\n @property\n def vocab_size(self):\n \"\"\"Returns vocab size\"\"\"\n return self.sp_model.get_piece_size()\n\n def get_vocab(self):\n \"\"\"Returns vocab as a dict\"\"\"\n vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}\n vocab.update(self.added_tokens_encoder)\n return vocab\n\n # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize\n def tokenize(self, text: \"TextInput\", **kwargs) -> List[str]:\n # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at\n # the beginning of the text\n if not self.legacy:\n text = SPIECE_UNDERLINE + text.replace(SPIECE_UNDERLINE, \" \")\n return super().tokenize(text, **kwargs)\n\n # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize\n def _tokenize(self, text, **kwargs):\n \"\"\"\n Returns a tokenized string.\n\n Since the sentencepiece internal model always adds a SPIECE_UNDERLINE, at the beginning of the provided text,\n we need to remove it by hand when the current text is a subsequence. This happens whenever the `self.tokenize`\n function is called with specials tokens: the input is split on the special tokens, and each subsequence is\n passed to `_tokenize`. Thus if a subsequence did not start with a `\" \"` or SPIECE_UNDERLINE, we have to remove\n the extra `SPIECE_UNDERLINE` prepended.\n \"\"\"\n if not self.legacy:\n is_first = text.startswith(SPIECE_UNDERLINE)\n if is_first:\n text = text[1:]\n\n tokens = self.sp_model.encode(text, out_type=str)\n\n if not self.legacy and not is_first and not text.startswith(\" \") and tokens[0].startswith(SPIECE_UNDERLINE):\n tokens = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]\n return tokens\n\n def _convert_token_to_id(self, token):\n \"\"\"Converts a token (str) in an id using the vocab.\"\"\"\n return self.sp_model.piece_to_id(token)\n\n def _convert_id_to_token(self, index):\n \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n token = self.sp_model.IdToPiece(index)\n return token\n\n def convert_tokens_to_string(self, tokens):\n \"\"\"Converts a sequence of tokens (string) in a single string.\"\"\"\n current_sub_tokens = []\n out_string = \"\"\n prev_is_special = False\n for i, token in enumerate(tokens):\n # make sure that special tokens are not decoded using sentencepiece model\n if token in self.all_special_tokens:\n if not prev_is_special and i != 0:\n out_string += \" \"\n out_string += self.sp_model.decode(current_sub_tokens) + token\n prev_is_special = True\n current_sub_tokens = []\n else:\n current_sub_tokens.append(token)\n prev_is_special = False\n out_string += self.sp_model.decode(current_sub_tokens)\n return out_string\n\n def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:\n \"\"\"\n Save the vocabulary and special tokens file to a directory.\n\n Args:\n save_directory (`str`):\n The directory in which to save the vocabulary.\n\n Returns:\n `Tuple(str)`: Paths to the files saved.\n \"\"\"\n if not os.path.isdir(save_directory):\n logger.error(f\"Vocabulary path ({save_directory}) should be a directory\")\n return\n out_vocab_file = os.path.join(\n save_directory, (filename_prefix + \"-\" if filename_prefix else \"\") + VOCAB_FILES_NAMES[\"vocab_file\"]\n )\n\n if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):\n copyfile(self.vocab_file, out_vocab_file)\n elif not os.path.isfile(self.vocab_file):\n with open(out_vocab_file, \"wb\") as fi:\n content_spiece_model = self.sp_model.serialized_model_proto()\n fi.write(content_spiece_model)\n\n return (out_vocab_file,)\n\n def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n bos_token_id = [self.bos_token_id] if self.add_bos_token else []\n eos_token_id = [self.eos_token_id] if self.add_eos_token else []\n\n output = bos_token_id + token_ids_0 + eos_token_id\n\n if token_ids_1 is not None:\n output = output + bos_token_id + token_ids_1 + eos_token_id\n\n return output\n\n def get_special_tokens_mask(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False\n ) -> List[int]:\n \"\"\"\n Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer `prepare_for_model` method.\n\n Args:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\n Returns:\n `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.\n \"\"\"\n if already_has_special_tokens:\n return super().get_special_tokens_mask(\n token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True\n )\n\n bos_token_id = [1] if self.add_bos_token else []\n eos_token_id = [1] if self.add_eos_token else []\n\n if token_ids_1 is None:\n return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id\n return (\n bos_token_id\n + ([0] * len(token_ids_0))\n + eos_token_id\n + bos_token_id\n + ([0] * len(token_ids_1))\n + eos_token_id\n )\n\n def create_token_type_ids_from_sequences(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n ) -> List[int]:\n \"\"\"\n Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT\n sequence pair mask has the following format:\n\n ```\n 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1\n | first sequence | second sequence |\n ```\n\n if token_ids_1 is None, only returns the first portion of the mask (0s).\n\n Args:\n token_ids_0 (`List[int]`):\n List of ids.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\n Returns:\n `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).\n \"\"\"\n bos_token_id = [self.bos_token_id] if self.add_bos_token else []\n eos_token_id = [self.eos_token_id] if self.add_eos_token else []\n\n output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)\n\n if token_ids_1 is not None:\n output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)\n\n return output\n\n def _build_conversation_input_ids(self, conversation: \"Conversation\") -> List[int]:\n r\"\"\"Builds the input ids for a conversation.\n This is the format used in the provided examples. System prompts should be manually added at the beginning of\n the conversation. If no system prompt is given, the `DEFAULT_SYSTEM_PROMPT` will be used.\n ```\n <bos>[INST] B_SYS SytemPrompt E_SYS Prompt [/INST] Answer <eos>\n <bos>[INST] Prompt [/INST] Answer <eos>\n <bos>[INST] Prompt [/INST]\n ```\n\n If you want to use your own system prompt, make sure to use both `B_SYS` and `E_SYS` use the following:\n ```python\n >>> from transformers import Conversation\n\n >>> Conversation(\n ... \"<<SYS>>\\n Only answer with emojis, and charades\\n<</SYS>>\\n\\nHow can I build a house in 10 septs?\"\n ... ) # doctest: +IGNORE_RESULT\n ```\n Args:\n conversation (`Conversation`):\n Conversation to build input ids for.\n Returns:\n `List[int]`:\n Input ids for the conversation.\n \"\"\"\n if len(conversation.past_user_inputs) > 0:\n if not conversation.past_user_inputs[0].startswith(B_SYS) or E_SYS not in conversation.past_user_inputs[0]:\n conversation.past_user_inputs[0] = (\n B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + conversation.past_user_inputs[0]\n )\n elif conversation.new_user_input:\n if not conversation.new_user_input.startswith(B_SYS) or E_SYS not in conversation.new_user_input:\n conversation.new_user_input = B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + conversation.new_user_input\n else:\n raise ValueError(\"Last message must be from user\")\n\n dialogue = list(conversation.iter_texts())\n if not all([is_user for is_user, msg in dialogue[::2]]) or not all(\n [not is_user for is_user, msg in dialogue[1::2]]\n ):\n raise ValueError(\n \"The model only supports 'user' and 'assistant' roles, starting with user and alternating (u/a/u/a/u...)\"\n )\n\n dialog_tokens: List[int] = []\n dialog_tokens += sum(\n [\n [self.bos_token_id]\n + self.encode(\n f\"{B_INST} {(prompt[1]).strip()} {E_INST} {(answer[1]).strip()} \", add_special_tokens=False\n )\n + [self.eos_token_id]\n for prompt, answer in zip(dialogue[::2], dialogue[1::2])\n ],\n [],\n )\n dialog_tokens += [self.bos_token_id] + self.encode(\n f\"{B_INST} {(dialogue[-1][1]).strip()} {E_INST}\", add_special_tokens=False\n )\n return dialog_tokens" }, { "identifier": "LlamaTokenizerFast", "path": "model/llama/tokenization_llama_fast.py", "snippet": "class LlamaTokenizerFast(PreTrainedTokenizerFast):\n \"\"\"\n Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding.\n\n This uses notably ByteFallback and no normalization.\n\n ```\n from transformers import LlamaTokenizerFast\n\n tokenizer = LlamaTokenizerFast.from_pretrained(\"hf-internal-testing/llama-tokenizer\")\n tokenizer.encode(\"Hello this is a test\")\n >>> [1, 15043, 445, 338, 263, 1243]\n ```\n\n If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or\n call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the\n values of the first token and final token of an encoded sequence will not be correct). For more details, checkout\n [post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation.\n\n\n This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should\n refer to this superclass for more information regarding those methods.\n\n Args:\n vocab_file (`str`):\n [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that\n contains the vocabulary necessary to instantiate a tokenizer.\n tokenizer_file (`str`):\n [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that\n contains everything needed to load the tokenizer.\n\n clean_up_tokenization_spaces (`str`, *optional*, defaults to `False`):\n Wether to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra\n spaces.\n\n bos_token (`str`, *optional*, defaults to `\"<s>\"`):\n The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.\n\n eos_token (`str`, *optional*, defaults to `\"</s>\"`):\n The end of sequence token.\n\n unk_token (`str`, *optional*, defaults to `\"<unk>\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n slow_tokenizer_class = LlamaTokenizer\n padding_side = \"left\"\n model_input_names = [\"input_ids\", \"attention_mask\"]\n\n def __init__(\n self,\n vocab_file=None,\n tokenizer_file=None,\n clean_up_tokenization_spaces=False,\n unk_token=\"<unk>\",\n bos_token=\"<s>\",\n eos_token=\"</s>\",\n add_bos_token=True,\n add_eos_token=False,\n **kwargs,\n ):\n super().__init__(\n vocab_file=vocab_file,\n tokenizer_file=tokenizer_file,\n clean_up_tokenization_spaces=clean_up_tokenization_spaces,\n unk_token=unk_token,\n bos_token=bos_token,\n eos_token=eos_token,\n **kwargs,\n )\n self._add_bos_token = add_bos_token\n self._add_eos_token = add_eos_token\n self.update_post_processor()\n\n self.vocab_file = vocab_file\n self.can_save_slow_tokenizer = False if not self.vocab_file else True\n\n def update_post_processor(self):\n \"\"\"\n Updates the underlying post processor with the current `bos_token` and `eos_token`.\n \"\"\"\n bos = self.bos_token\n bos_token_id = self.bos_token_id\n\n eos = self.eos_token\n eos_token_id = self.eos_token_id\n\n single = f\"{(bos+':0 ') * self.add_bos_token}$A:0{(' '+eos+':0') * self.add_eos_token}\"\n pair = f\"{single}{(' '+bos+':1') * self.add_bos_token} $B:1{(' '+eos+':1') * self.add_eos_token}\"\n\n special_tokens = []\n if self.add_bos_token:\n special_tokens.append((bos, bos_token_id))\n if self.add_eos_token:\n special_tokens.append((eos, eos_token_id))\n self._tokenizer.post_processor = processors.TemplateProcessing(\n single=single, pair=pair, special_tokens=special_tokens\n )\n\n @property\n def add_eos_token(self):\n return self._add_eos_token\n\n @property\n def add_bos_token(self):\n return self._add_bos_token\n\n @add_eos_token.setter\n def add_eos_token(self, value):\n self._add_eos_token = value\n self.update_post_processor()\n\n @add_bos_token.setter\n def add_bos_token(self, value):\n self._add_bos_token = value\n self.update_post_processor()\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:\n if not self.can_save_slow_tokenizer:\n raise ValueError(\n \"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow \"\n \"tokenizer.\"\n )\n\n if not os.path.isdir(save_directory):\n logger.error(f\"Vocabulary path ({save_directory}) should be a directory\")\n return\n out_vocab_file = os.path.join(\n save_directory, (filename_prefix + \"-\" if filename_prefix else \"\") + VOCAB_FILES_NAMES[\"vocab_file\"]\n )\n\n if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):\n copyfile(self.vocab_file, out_vocab_file)\n\n return (out_vocab_file,)\n\n def _build_conversation_input_ids(self, conversation: \"Conversation\"):\n \"\"\"Builds the input ids for a conversation.\n This is the format used in the provided examples. System prompts should be manually added at the beginning of\n the conversation. If no system prompt is given, the `DEFAULT_SYSTEM_PROMPT` will be used.\n ```\n <bos>[INST] B_SYS SytemPrompt E_SYS Prompt [/INST] Answer <eos>\n <bos>[INST] Prompt [/INST] Answer <eos>\n <bos>[INST] Prompt [/INST]\n ```\n\n If you want to use your own system prompt, make sure to use both `B_SYS` and `E_SYS` use the following:\n ```python\n >>> from transformers import Conversation\n\n >>> Conversation(\n ... \"<<SYS>>\\n Only answer with emojis, and charades\\n<</SYS>>\\n\\nHow can I build a house in 10 septs?\"\n ... )\n ```\n Args:\n conversation (`Conversation`):\n Conversation to build input ids for.\n Returns:\n `List[int]`:\n Input ids for the conversation.\n \"\"\"\n if len(conversation.past_user_inputs) > 0:\n if not conversation.past_user_inputs[0].startswith(B_SYS) or E_SYS not in conversation.past_user_inputs[0]:\n conversation.past_user_inputs[0] = (\n B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + conversation.past_user_inputs[0]\n )\n elif conversation.new_user_input:\n if not conversation.new_user_input.startswith(B_SYS) or E_SYS not in conversation.new_user_input:\n conversation.new_user_input = B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + conversation.new_user_input\n else:\n raise ValueError(\"Last message must be from user\")\n\n dialogue = list(conversation.iter_texts())\n if not all([is_user for is_user, msg in dialogue[::2]]) or not all(\n [not is_user for is_user, msg in dialogue[1::2]]\n ):\n raise ValueError(\n \"The model only supports 'user' and 'assistant' roles, starting with user and alternating (u/a/u/a/u...)\"\n )\n\n dialog_tokens = []\n dialog_tokens += sum(\n [\n [self.bos_token_id]\n + self.encode(\n f\"{B_INST} {(prompt[1]).strip()} {E_INST} {(answer[1]).strip()} \", add_special_tokens=False\n )\n + [self.eos_token_id]\n for prompt, answer in zip(dialogue[::2], dialogue[1::2])\n ],\n [],\n )\n dialog_tokens += [self.bos_token_id] + self.encode(\n f\"{B_INST} {(dialogue[-1][1]).strip()} {E_INST}\", add_special_tokens=False\n )\n return dialog_tokens" }, { "identifier": "print_rank_0", "path": "utils/common_utils.py", "snippet": "def print_rank_0(*message):\n \"\"\"If distributed is initialized print only on rank 0.\"\"\"\n if torch.distributed.is_initialized():\n if torch.distributed.get_rank() == 0:\n print(*message, flush=True)\n else:\n print(*message, flush=True)" }, { "identifier": "is_old_version", "path": "utils/common_utils.py", "snippet": "def is_old_version(path):\n new_vocab_files = ['merge.model']\n new_vocab_file_exists = []\n for filename in new_vocab_files:\n if not os.path.exists(os.path.join(path, filename)):\n new_vocab_file_exists.append(False)\n else:\n new_vocab_file_exists.append(True)\n if all(new_vocab_file_exists):\n return False\n if any(new_vocab_file_exists):\n return 'new_version_file_absent'\n else:\n return True" }, { "identifier": "build_tokenizer", "path": "tokenizer/tokenizer.py", "snippet": "def build_tokenizer(args):\n \"\"\"Initialize tokenizer.\"\"\"\n print_rank_0(\"> building {} tokenizer ...\".format(args.tokenizer_type))\n # if args.rank == 0:\n # print(\"> building {} tokenizer ...\".format(args.tokenizer_type), flush=True)\n\n # Select and instantiate the tokenizer.\n if args.tokenizer_type.lower() == \"GPT2BPETokenizer\".lower():\n assert args.vocab_file is not None\n assert args.merge_file is not None\n tokenizer = _GPT2BPETokenizer(args.vocab_file, args.merge_file)\n elif args.tokenizer_type.lower() == \"SPMTokenizer\".lower():\n assert args.vocab_file is not None\n tokenizer = SentencePieceTokenizer(args.vocab_file)\n elif args.tokenizer_type.lower() == \"HFTokenizer\".lower():\n assert args.vocab_file is not None\n tokenizer = HFTokenizer(args.vocab_file)\n elif args.tokenizer_type.lower() == \"HFGPT2Tokenizer\".lower():\n if args.vocab_file is None:\n print(\n \"WARNING: No vocab file found, loading Huggingface's pretrained GPT2Tokenizer\"\n )\n tokenizer = HFGPT2Tokenizer(args.vocab_file)\n elif args.tokenizer_type.lower() == \"CharLevelTokenizer\".lower():\n tokenizer = CharLevelTokenizer(vocab_size=512)\n elif args.tokenizer_type.lower() == \"TiktokenTokenizer\".lower():\n assert args.vocab_file is not None\n tokenizer = TiktokenTokenizer(args.vocab_file)\n elif args.tokenizer_type.lower() == \"GLMTokenizer\".lower():\n if is_old_version(args.pretrained_model_path):\n print('is an old version')\n from model.glm.tokenization_glm_deprecated import GLMChineseTokenizer\n args.glm_mask = '[sMASK]'\n old_version_tokenizer = True\n tokenizer = GLMChineseTokenizer.from_pretrained(args.pretrained_model_path, trust_remote_code=True)\n else:\n print('is not an old version')\n old_version_tokenizer = False\n tokenizer = GLMTokenizer.from_pretrained(args.pretrained_model_path, trust_remote_code=True)\n else:\n raise NotImplementedError(\n \"{} tokenizer is not \" \"implemented.\".format(args.tokenizer_type)\n )\n\n # Add vocab size.\n args.padded_vocab_size = _vocab_size_with_padding(tokenizer.vocab_size, args)\n\n return tokenizer" }, { "identifier": "HFTokenizer", "path": "tokenizer/tokenizer.py", "snippet": "class HFTokenizer(AbstractTokenizer):\n \"\"\"Designed to Integrate HF's Tokenizer library.\"\"\"\n\n def __init__(self, vocab_file):\n name = \"HFTokenizer\"\n super().__init__(name)\n\n self.tokenizer = Tokenizer.from_file(vocab_file)\n # self.eod_id = self.tokenizer.token_to_id(\"<|endoftext|>\")\n self.eod_id = self.tokenizer.token_to_id(\"<|end|>\")\n # self.pad_id = self.tokenizer.token_to_id(\"<|padding|>\")\n \n # 新词表没有<|padding|>, 用<|extratoken_1|>代替,和tokenization一致\n # self.pad_id = self.tokenizer.token_to_id(\"<|extratoken_1|>\")\n self.pad_id = self.tokenizer.token_to_id(\"<|pad|>\")\n\n @property\n def vocab_size(self):\n return self.tokenizer.get_vocab_size()\n\n @property\n def vocab(self):\n return self.tokenizer.get_vocab()\n\n @property\n def inv_vocab(self):\n return self.tokenizer.decoder\n\n def tokenize(self, text: str):\n return self.tokenizer.encode(text).ids\n\n def tokenize_batch(self, text_batch: Union[List[str], str]):\n return self.tokenizer.encode_batch(text_batch)\n\n def detokenize(self, token_ids):\n return self.tokenizer.decode(token_ids)\n\n @property\n def eod(self):\n return self.eod_id" }, { "identifier": "prepare_model_for_kbit_training", "path": "model/peft/utils/others.py", "snippet": "def prepare_model_for_kbit_training(model, use_gradient_checkpointing=True):\n r\"\"\"\n This method wraps the entire protocol for preparing a model before running a training. This includes:\n 1- Cast the layernorm in fp32 2- making output embedding layer require grads 3- Add the upcasting of the lm\n head to fp32\n\n Args:\n model, (`transformers.PreTrainedModel`):\n The loaded model from `transformers`\n \"\"\"\n loaded_in_kbit = getattr(model, \"is_loaded_in_8bit\", False) or getattr(model, \"is_loaded_in_4bit\", False)\n\n for name, param in model.named_parameters():\n # freeze base model's layers\n param.requires_grad = False\n \n # cast all non INT8 parameters to fp32\n for param in model.parameters():\n if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16):\n param.data = param.data.to(torch.float32)\n \n if loaded_in_kbit and use_gradient_checkpointing:\n # For backward compatibility\n if hasattr(model, \"enable_input_require_grads\"):\n model.enable_input_require_grads()\n else:\n \n def make_inputs_require_grad(module, input, output):\n output.requires_grad_(True)\n\n model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)\n\n # enable gradient checkpointing for memory efficiency\n model.gradient_checkpointing_enable()\n\n return model" }, { "identifier": "AdaLoraConfig", "path": "model/peft/tuner/adalora.py", "snippet": "class AdaLoraConfig(LoraConfig):\n \"\"\"\n This is the configuration class to store the configuration of a [`~peft.AdaLora`].\n\n Args:\n target_r (`int`): The target average rank of incremental matrix.\n init_r (`int`): The initial rank for each incremental matrix.\n tinit (`int`): The steps of initial fine-tuning warmup.\n tfinal (`int`): The step of final fine-tuning.\n deltaT (`int`): The time internval between two budget allocations.\n beta1 (`float`): The hyperparameter of EMA for sensitivity smoothing.\n beta2 (`float`): The hyperparameter of EMA for undertainty quantification.\n orth_reg_weight (`float`): The coefficient of orthogonal regularization.\n total_step (`int`): The total training steps that should be specified before training.\n rank_pattern (`list`): The allocated rank for each weight matrix by RankAllocator.\n \"\"\"\n\n target_r: int = field(default=8, metadata={\"help\": \"Target Lora matrix dimension.\"})\n init_r: int = field(default=12, metadata={\"help\": \"Intial Lora matrix dimension.\"})\n tinit: int = field(default=0, metadata={\"help\": \"The steps of initial warmup.\"})\n tfinal: int = field(default=0, metadata={\"help\": \"The steps of final warmup.\"})\n deltaT: int = field(default=1, metadata={\"help\": \"Step interval of rank allocation.\"})\n beta1: float = field(default=0.85, metadata={\"help\": \"Hyperparameter of EMA.\"})\n beta2: float = field(default=0.85, metadata={\"help\": \"Hyperparameter of EMA.\"})\n orth_reg_weight: float = field(default=0.5, metadata={\"help\": \"The orthogonal regularization coefficient.\"})\n total_step: Optional[int] = field(default=None, metadata={\"help\": \"The total training steps.\"})\n rank_pattern: Optional[dict] = field(default=None, metadata={\"help\": \"The saved rank pattern.\"})\n init_lora_weights: bool = field(\n default=True,\n metadata={\"help\": \"Whether to initialize the weights of the Lora layers.\"},\n )\n\n def __post_init__(self):\n self.peft_type = PeftType.ADALORA" } ]
import os import torch import sys import peft import model.peft.modeling_peft # noqa import bitsandbytes as bnb # noqa import accelerate # noqa from utils.common_utils import get_model_params_num from transformers import ( # noqa: E402 CONFIG_MAPPING, AutoConfig, AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast ) from .gpt_neox.configuration_gpt_neox import GPTNeoXConfig from .gpt_neox.modeling_gpt_neox import GPTNeoXForCausalLM from .gpt_neox.tokenization_gpt_neox_fast import GPTNeoXTokenizerFast from .llama.configuration_llama import LlamaConfig from .llama.modeling_llama import LlamaForCausalLM from .llama.tokenization_llama import LlamaTokenizer from .llama.tokenization_llama_fast import LlamaTokenizerFast from torch.distributed.fsdp import ( FullyShardedDataParallel as FSDP, StateDictType, ) from utils.common_utils import print_rank_0, is_old_version from tokenizer import build_tokenizer from tokenizer.tokenizer import HFTokenizer from peft.tuners.lora import LoraLayer from model.peft.utils import prepare_model_for_kbit_training from peft import ( # noqa LoraConfig, PrefixTuningConfig, PromptEncoderConfig, PromptEncoderReparameterizationType, PromptTuningConfig, PromptTuningInit, TaskType, get_peft_model ) from model.peft.tuner import AdaLoraConfig from transformers import BitsAndBytesConfig from packaging import version from .glm.tokenization_glm_deprecated import GLMChineseTokenizer
17,455
# coding=utf-8 # Copyright (c) 2023 Ant Group. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. sys.path.append("..") # from .glm.modeling_glm import GLMForConditionalGeneration # from .glm.configuration_glm import GLMConfig # from .glm.tokenization_glm import GLMTokenizer try: except ImportError: BitsAndBytesConfig = None try: except ImportError: bnb = None def find_all_linear_names(args, model): cls = bnb.nn.Linear4bit if args.bits == 4 else (bnb.nn.Linear8bitLt if args.bits == 8 else torch.nn.Linear) lora_module_names = set() for name, module in model.named_modules(): if isinstance(module, cls): names = name.split('.') lora_module_names.add(names[0] if len(names) == 1 else names[-1]) if 'lm_head' in lora_module_names: # needed for 16-bit lora_module_names.remove('lm_head') return list(lora_module_names) def setup_model(args, logger, use_cache=False): # Load pretrained model and tokenizer if args.pretrained_model_path: # TODO: 实现from pretrained读tokenizer if args.model_type == 'gpt_neox': # if args.tokenizer_type: # tokenizer = build_tokenizer(args) # tokenizer.eod_token = "<|endoftext|>" # tokenizer.pad_token = "<|pad|>" # # tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset # # tokenizer.eop_token = "<|endoftext|>" # tokenizer.eod_id = tokenizer.tokenize(tokenizer.eod_token)[0] # tokenizer.pad_id = tokenizer.tokenize(tokenizer.pad_token)[0] # else: tokenizer = GPTNeoXTokenizerFast.from_pretrained(args.pretrained_model_path) # tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') elif args.model_type == 'llama': tokenizer = LlamaTokenizerFast.from_pretrained(args.pretrained_model_path) # tokenizer = AutoTokenizer.from_pretrained( # args.pretrained_model_path, # trust_remote_code=True, # ) tokenizer.eod_token = "</s>" tokenizer.eos_token = "</s>" tokenizer.bos_token = "<s>" tokenizer.pad_token = "[PAD]" tokenizer.unk_token = "<unk>" tokenizer.sop_token = "</s>" # 适配multi task dataset tokenizer.eop_token = "</s>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.eos_id = tokenizer.convert_tokens_to_ids(tokenizer.eos_token) tokenizer.bos_id = tokenizer.convert_tokens_to_ids(tokenizer.bos_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) tokenizer.unk_id = tokenizer.convert_tokens_to_ids(tokenizer.unk_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.eos_token} id: {tokenizer.eos_id}') print_rank_0(f'tokenizer {tokenizer.bos_token} id: {tokenizer.bos_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') print_rank_0(f'tokenizer {tokenizer.unk_token} id: {tokenizer.unk_id}') elif args.model_type == 'glm': if is_old_version(args.pretrained_model_path): tokenizer = GLMChineseTokenizer.from_pretrained(args.pretrained_model_path) else: tokenizer = GLMTokenizer.from_pretrained(args.pretrained_model_path) elif args.train_mode == 'sst': # tokenizer = build_tokenizer(args) tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_path." ) if args.model_type == 'gpt_neox': auto_config = GPTNeoXConfig auto_model_class = GPTNeoXForCausalLM elif args.model_type == 'llama': auto_config = LlamaConfig
# coding=utf-8 # Copyright (c) 2023 Ant Group. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. sys.path.append("..") # from .glm.modeling_glm import GLMForConditionalGeneration # from .glm.configuration_glm import GLMConfig # from .glm.tokenization_glm import GLMTokenizer try: except ImportError: BitsAndBytesConfig = None try: except ImportError: bnb = None def find_all_linear_names(args, model): cls = bnb.nn.Linear4bit if args.bits == 4 else (bnb.nn.Linear8bitLt if args.bits == 8 else torch.nn.Linear) lora_module_names = set() for name, module in model.named_modules(): if isinstance(module, cls): names = name.split('.') lora_module_names.add(names[0] if len(names) == 1 else names[-1]) if 'lm_head' in lora_module_names: # needed for 16-bit lora_module_names.remove('lm_head') return list(lora_module_names) def setup_model(args, logger, use_cache=False): # Load pretrained model and tokenizer if args.pretrained_model_path: # TODO: 实现from pretrained读tokenizer if args.model_type == 'gpt_neox': # if args.tokenizer_type: # tokenizer = build_tokenizer(args) # tokenizer.eod_token = "<|endoftext|>" # tokenizer.pad_token = "<|pad|>" # # tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset # # tokenizer.eop_token = "<|endoftext|>" # tokenizer.eod_id = tokenizer.tokenize(tokenizer.eod_token)[0] # tokenizer.pad_id = tokenizer.tokenize(tokenizer.pad_token)[0] # else: tokenizer = GPTNeoXTokenizerFast.from_pretrained(args.pretrained_model_path) # tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') elif args.model_type == 'llama': tokenizer = LlamaTokenizerFast.from_pretrained(args.pretrained_model_path) # tokenizer = AutoTokenizer.from_pretrained( # args.pretrained_model_path, # trust_remote_code=True, # ) tokenizer.eod_token = "</s>" tokenizer.eos_token = "</s>" tokenizer.bos_token = "<s>" tokenizer.pad_token = "[PAD]" tokenizer.unk_token = "<unk>" tokenizer.sop_token = "</s>" # 适配multi task dataset tokenizer.eop_token = "</s>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.eos_id = tokenizer.convert_tokens_to_ids(tokenizer.eos_token) tokenizer.bos_id = tokenizer.convert_tokens_to_ids(tokenizer.bos_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) tokenizer.unk_id = tokenizer.convert_tokens_to_ids(tokenizer.unk_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.eos_token} id: {tokenizer.eos_id}') print_rank_0(f'tokenizer {tokenizer.bos_token} id: {tokenizer.bos_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') print_rank_0(f'tokenizer {tokenizer.unk_token} id: {tokenizer.unk_id}') elif args.model_type == 'glm': if is_old_version(args.pretrained_model_path): tokenizer = GLMChineseTokenizer.from_pretrained(args.pretrained_model_path) else: tokenizer = GLMTokenizer.from_pretrained(args.pretrained_model_path) elif args.train_mode == 'sst': # tokenizer = build_tokenizer(args) tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_path." ) if args.model_type == 'gpt_neox': auto_config = GPTNeoXConfig auto_model_class = GPTNeoXForCausalLM elif args.model_type == 'llama': auto_config = LlamaConfig
auto_model_class = LlamaForCausalLM
5
2023-11-02 01:37:01+00:00
24k
bytedance/cryostar
projects/star/train_atom.py
[ { "identifier": "SpatialGridTranslate", "path": "cryostar/utils/transforms.py", "snippet": "class SpatialGridTranslate(torch.nn.Module):\n\n def __init__(self, D, device=None) -> None:\n super().__init__()\n self.D = D\n # yapf: disable\n coords = torch.stack(torch.meshgrid([\n torch.linspace(-1.0, 1.0, self.D, device=device),\n torch.linspace(-1.0, 1.0, self.D, device=device)],\n indexing=\"ij\"), dim=-1).reshape(-1, 2)\n # yapf: enable\n self.register_buffer(\"coords\", coords)\n\n def transform(self, images: torch.Tensor, trans: torch.Tensor):\n \"\"\"\n The `images` are stored in `YX` mode, so the `trans` is also `YX`!\n\n Supposing that D is 96, a point is at 0.0:\n - adding 48 should move it to the right corner which is 1.0\n 1.0 = 0.0 + 48 / (96 / 2)\n - adding 96(>48) should leave it at 0.0\n 0.0 = 0.0 + 96 / (96 / 2) - 2.0\n - adding -96(<48) should leave it at 0.0\n 0.0 = 0.0 - 96 / (96 / 2) + 2.0\n\n Input:\n images: (B, NY, NX)\n trans: (B, T, 2)\n\n Returns:\n images: (B, T, NY, NX)\n \"\"\"\n B, NY, NX = images.shape\n assert self.D == NY == NX\n assert images.shape[0] == trans.shape[0]\n\n grid = einops.rearrange(self.coords, \"N C2 -> 1 1 N C2\") - \\\n einops.rearrange(trans, \"B T C2 -> B T 1 C2\") * 2 / self.D\n grid = grid.flip(-1) # convert the first axis from slow-axis to fast-axis\n grid[grid >= 1] -= 2\n grid[grid <= -1] += 2\n grid.clamp_(-1.0, 1.0)\n\n sampled = F.grid_sample(einops.rearrange(images, \"B NY NX -> B 1 NY NX\"), grid, align_corners=True)\n\n sampled = einops.rearrange(sampled, \"B 1 T (NY NX) -> B T NY NX\", NX=NX, NY=NY)\n return sampled" }, { "identifier": "StarfileDataSet", "path": "cryostar/utils/dataio.py", "snippet": "class StarfileDataSet(Dataset):\n\n def __init__(self, cfg: StarfileDatasetConfig):\n super().__init__()\n self.cfg = cfg\n self.df = starfile.read(Path(cfg.starfile_path))\n\n if \"optics\" in self.df:\n optics_df = self.df[\"optics\"]\n particles_df = self.df[\"particles\"]\n else:\n optics_df = None\n particles_df = self.df\n self.particles_df = particles_df\n\n if cfg.apix is None:\n if optics_df is not None and \"rlnImagePixelSize\" in optics_df:\n self.apix = float(optics_df[\"rlnImagePixelSize\"][0])\n print(f\"Infer dataset apix={self.apix} from first optic group.\")\n elif \"rlnDetectorPixelSize\" in particles_df and \"rlnMagnification\" in particles_df:\n self.apix = float(particles_df[\"rlnDetectorPixelSize\"][0] / particles_df[\"rlnMagnification\"][0] * 1e4)\n print(f\"Infer dataset apix={self.apix} from first particle meta data.\")\n else:\n raise AttributeError(\"Cannot parse apix from starfile, please set it in config by hand.\")\n else:\n self.apix = cfg.apix\n\n if cfg.side_shape is None:\n tmp_mrc_path = osp.join(cfg.dataset_dir, particles_df[\"rlnImageName\"][0].split('@')[-1])\n with mrcfile.mmap(tmp_mrc_path, mode=\"r\", permissive=True) as m:\n self.side_shape = m.data.shape[-1]\n print(f\"Infer dataset side_shape={self.side_shape} from the 1st particle.\")\n else:\n self.side_shape = cfg.side_shape\n\n self.num_proj = len(particles_df)\n\n self.down_side_shape = self.side_shape\n if cfg.down_side_shape is not None:\n self.down_side_shape = cfg.down_side_shape\n\n if cfg.mask_rad is not None:\n self.mask = Mask(self.down_side_shape, cfg.mask_rad)\n\n self.f_mu = None\n self.f_std = None\n\n def __len__(self):\n return self.num_proj\n\n def estimate_normalization(self):\n if self.f_mu is None and self.f_std is None:\n f_sub_data = []\n # I have checked that the standard deviation of 10/100/1000 particles is similar\n for i in range(0, len(self), len(self) // 100):\n f_sub_data.append(self[i][\"fproj\"])\n f_sub_data = torch.cat(f_sub_data, dim=0)\n # self.f_mu = torch.mean(f_sub_data)\n self.f_mu = 0.0 # just follow cryodrgn\n self.f_std = torch.std(f_sub_data).item()\n else:\n raise Exception(\"The normalization factor has been estimated!\")\n\n def __getitem__(self, idx):\n item_row = self.particles_df.iloc[idx]\n try:\n img_name_raw = item_row[\"rlnImageName\"]\n in_mrc_idx, img_name = item_row[\"rlnImageName\"].split(\"@\")\n in_mrc_idx = int(in_mrc_idx) - 1\n mrc_path = osp.join(self.cfg.dataset_dir, img_name)\n with mrcfile.mmap(mrc_path, mode=\"r\", permissive=True) as mrc:\n if mrc.data.ndim > 2:\n proj = torch.from_numpy(np.array(mrc.data[in_mrc_idx])).float() * self.cfg.scale_images\n else:\n # the mrcs file can contain only one particle\n proj = torch.from_numpy(np.array(mrc.data)).float() * self.cfg.scale_images\n\n # get (1, side_shape, side_shape) proj\n if len(proj.shape) == 2:\n proj = proj[None, :, :] # add a dummy channel (for consistency w/ img fmt)\n else:\n assert len(proj.shape) == 3 and proj.shape[0] == 1 # some starfile already have a dummy channel\n\n # down-sample\n if self.down_side_shape != self.side_shape:\n if self.cfg.down_method == \"interp\":\n proj = tvf.resize(proj, [self.down_side_shape, ] * 2, antialias=True)\n elif self.cfg.down_method == \"fft\":\n proj = downsample_2d(proj[0, :, :], self.down_side_shape)[None, :, :]\n else:\n raise NotImplementedError\n\n if self.cfg.mask_rad is not None:\n proj = self.mask(proj)\n\n except Exception as e:\n print(f\"WARNING: Particle image {img_name_raw} invalid! Setting to zeros.\")\n print(e)\n proj = torch.zeros(1, self.down_side_shape, self.down_side_shape)\n\n if self.cfg.power_images != 1.0:\n proj *= self.cfg.power_images\n\n # Generate CTF from CTF paramaters\n defocusU = torch.from_numpy(np.array(item_row[\"rlnDefocusU\"] / 1e4, ndmin=2)).float()\n defocusV = torch.from_numpy(np.array(item_row[\"rlnDefocusV\"] / 1e4, ndmin=2)).float()\n angleAstigmatism = torch.from_numpy(np.radians(np.array(item_row[\"rlnDefocusAngle\"], ndmin=2))).float()\n\n # Read \"GT\" orientations\n if self.cfg.ignore_rots:\n rotmat = torch.eye(3).float()\n else:\n # yapf: disable\n rotmat = torch.from_numpy(euler_angles2matrix(\n np.radians(-item_row[\"rlnAngleRot\"]),\n # np.radians(particle[\"rlnAngleTilt\"]) * (-1 if self.cfg.invert_hand else 1),\n np.radians(-item_row[\"rlnAngleTilt\"]),\n np.radians(-item_row[\"rlnAnglePsi\"]))\n ).float()\n # yapf: enable\n\n # Read \"GT\" shifts\n if self.cfg.ignore_trans:\n shiftX = torch.tensor([0.])\n shiftY = torch.tensor([0.])\n else:\n # support early starfile formats\n # Particle translations used to be in pixels (rlnOriginX and rlnOriginY) but this changed to Angstroms\n # (rlnOriginXAngstrom and rlnOriginYAngstrom) in relion 3.1.\n # https://relion.readthedocs.io/en/release-3.1/Reference/Conventions.html\n if \"rlnOriginXAngst\" in item_row:\n shiftX = torch.from_numpy(np.array(item_row[\"rlnOriginXAngst\"], dtype=np.float32))\n shiftY = torch.from_numpy(np.array(item_row[\"rlnOriginYAngst\"], dtype=np.float32))\n else:\n shiftX = torch.from_numpy(np.array(item_row[\"rlnOriginX\"] * self.apix, dtype=np.float32))\n shiftY = torch.from_numpy(np.array(item_row[\"rlnOriginY\"] * self.apix, dtype=np.float32))\n\n fproj = primal_to_fourier_2d(proj)\n\n if self.f_mu is not None:\n fproj = (fproj - self.f_mu) / self.f_std\n proj = fourier_to_primal_2d(fproj).real\n\n in_dict = {\n \"proj\": proj,\n \"rotmat\": rotmat,\n \"defocusU\": defocusU,\n \"defocusV\": defocusV,\n \"shiftX\": shiftX,\n \"shiftY\": shiftY,\n \"angleAstigmatism\": angleAstigmatism,\n \"idx\": torch.tensor(idx, dtype=torch.long),\n \"fproj\": fproj,\n \"imgname_raw\": img_name_raw\n }\n\n if \"rlnClassNumber\" in item_row:\n in_dict[\"class_id\"] = item_row[\"rlnClassNumber\"]\n\n return in_dict" }, { "identifier": "StarfileDatasetConfig", "path": "cryostar/utils/dataio.py", "snippet": "class StarfileDatasetConfig:\n dataset_dir: str\n starfile_path: str\n # if is not specified, the following apix, and side_shape will be inferred from starfile\n apix: float = None\n side_shape: int = None\n # down-sample the original image or not\n down_side_shape: int = None\n down_method: str = \"interp\"\n # apply a circular mask on input image or not\n mask_rad: float = None\n # change image values\n scale_images: float = 1.0\n power_images: float = field(\n default=1.0,\n metadata={\"help\": \"Change the power of the signal by multiplying a constant number.\"})\n # ignore pose from starfile or not\n ignore_trans: bool = False\n ignore_rots: bool = False\n # invert_hand: bool = field(\n # default=False,\n # metadata={\"help\": \"Invert handedness when reading relion data.\"})" }, { "identifier": "Mask", "path": "cryostar/utils/dataio.py", "snippet": "class Mask(torch.nn.Module):\n\n def __init__(self, im_size, rad):\n super(Mask, self).__init__()\n\n mask = torch.lt(torch.linspace(-1, 1, im_size)[None]**2 + torch.linspace(-1, 1, im_size)[:, None]**2, rad**2)\n # float for pl ddp broadcast compatible\n self.register_buffer('mask', mask.float())\n self.num_masked = torch.sum(mask).item()\n\n def forward(self, x):\n return x * self.mask" }, { "identifier": "CTFRelion", "path": "cryostar/utils/ctf_utils.py", "snippet": "class CTFRelion(CTFBase):\n \"\"\"\n BUG: There are two bugs in this file:\n 1. `self.angleFrequency` has some error for even-sized grid.\n 2. `local_defocus` in `get_ctf()` has some error, `angleAstigmatism` should be\n replaced with `defocusU - defocusV`.\n\n The bugs will not affect real-world data too much. But you may encounter some issues\n on simulated datasets. Use CTFCryoDRGN instead.\n \"\"\"\n\n def __init__(self,\n size=257,\n resolution=0.8,\n kV=300.0,\n valueNyquist=1.,\n defocusU=1.,\n defocusV=1.,\n angleAstigmatism=0.,\n cs=2.7,\n phasePlate=0.,\n amplitudeContrast=.1,\n bFactor=0.,\n num_particles=500,\n requires_grad=False,\n precompute=False,\n flip_images=False):\n super(CTFRelion, self).__init__(resolution, num_particles, requires_grad)\n self.requires_grad = requires_grad\n self.flip_images = flip_images\n\n self.size = size # in pixel\n self.resolution = resolution # in angstrom\n self.kV = kV # in kilovolt\n\n self.valueNyquist = valueNyquist\n self.phasePlate = phasePlate / 180. * np.pi # in radians (converted from degrees)\n self.amplitudeContrast = amplitudeContrast\n self.bFactor = bFactor\n\n self.frequency = 1. / self.resolution\n\n self.wavelength = self._get_ewavelength(self.kV * 1e3) # input in V (so we convert kv*1e3)\n\n angleAstigmatism = angleAstigmatism / 180. * np.pi # input in degree converted in radian\n cs = cs * 1e7 # input in mm converted in angstrom\n # the angleAstigmatism, defocusU, defocusV and cs are nn.Parameter of size (N, 1, 1)\n self.angleAstigmatism = nn.Parameter(angleAstigmatism * torch.ones((num_particles, 1, 1), dtype=torch.float32),\n requires_grad=requires_grad)\n self.cs = nn.Parameter(cs * torch.ones((num_particles, 1, 1), dtype=torch.float32), requires_grad=requires_grad)\n self.defocusU = nn.Parameter(defocusU * torch.ones((num_particles, 1, 1), dtype=torch.float32),\n requires_grad=requires_grad)\n self.defocusV = nn.Parameter(defocusV * torch.ones((num_particles, 1, 1), dtype=torch.float32),\n requires_grad=requires_grad)\n\n self.precomputed_filters = precompute\n\n ax = torch.linspace(-1. / (2. * resolution), 1 / (2. * resolution), self.size)\n mx, my = torch.meshgrid(ax, ax, indexing=\"ij\")\n self.register_buffer(\"r2\", mx**2 + my**2)\n self.register_buffer(\"r\", torch.sqrt(self.r2))\n self.register_buffer(\"angleFrequency\", torch.atan2(my, mx))\n\n if not self.requires_grad and self.precomputed_filters:\n print(\"Precomputing hFourier in CTF\")\n self.register_buffer('hFourier', self.get_ctf(torch.arange(num_particles), num_particles))\n\n def _get_ewavelength(self, U):\n # assumes V as input, returns wavelength in angstrom\n h = scipy.constants.h\n e = scipy.constants.e\n c = scipy.constants.c\n m0 = scipy.constants.m_e\n\n return h / math.sqrt(2. * m0 * e * U) / math.sqrt(1 + e * U / (2 * m0 * c**2)) * 1e10\n\n def get_ctf(self, idcs, B, cpu_params={}, frequency_marcher=None):\n defocusU = self.defocusU[idcs, :, :]\n defocusV = self.defocusV[idcs, :, :]\n angleAstigmatism = self.angleAstigmatism[idcs, :, :]\n cs = self.cs[idcs, :, :]\n\n ac = self.amplitudeContrast\n pc = math.sqrt(1. - ac**2)\n K1 = np.pi / 2. * cs * self.wavelength**3\n K2 = np.pi * self.wavelength\n\n # Cut-off from frequency marcher\n if frequency_marcher is not None:\n self.size_after_fm = 2 * frequency_marcher.f + 1\n if self.size_after_fm > self.size:\n self.size_after_fm = self.size\n angleFrequency = frequency_marcher.cut_coords_plane(self.angleFrequency.reshape(\n self.size, self.size, 1)).reshape(self.size_after_fm, self.size_after_fm)\n r2 = frequency_marcher.cut_coords_plane(self.r2.reshape(self.size, self.size,\n 1)).reshape(self.size_after_fm, self.size_after_fm)\n else:\n self.size_after_fm = self.size\n angleFrequency = self.angleFrequency\n r2 = self.r2\n\n angle = angleFrequency - angleAstigmatism\n local_defocus = 1e4 * (defocusU + defocusV) / 2. + angleAstigmatism * torch.cos(2. * angle)\n\n gamma = K1 * r2**2 - K2 * r2 * local_defocus - self.phasePlate\n hFourier = -pc * torch.sin(gamma) + ac * torch.cos(gamma)\n\n if self.valueNyquist != 1:\n decay = np.sqrt(-np.log(self.valueNyquist)) * 2. * self.resolution\n envelope = torch.exp(-self.frequency * decay**2 * r2)\n hFourier *= envelope\n\n return hFourier\n\n def oversample_multiply_crop(self, x_fourier, hFourier):\n # we assume that the shape of the CTF is always going to be bigger\n # than the size of the input image\n input_sz = x_fourier.shape[-1]\n if input_sz != self.size_after_fm:\n x_primal = fourier_to_primal_2d(x_fourier)\n\n pad_len = (self.size_after_fm - x_fourier.shape[-1]) // 2 # here we assume even lengths\n p2d = (pad_len, pad_len, pad_len, pad_len)\n x_primal_padded = F.pad(x_primal, p2d, 'constant', 0)\n\n x_fourier_padded = primal_to_fourier_2d(x_primal_padded)\n\n x_fourier_padded_filtered = x_fourier_padded * hFourier[:, None, :, :]\n return x_fourier_padded_filtered[..., pad_len:-pad_len, pad_len:-pad_len]\n else:\n return x_fourier * hFourier[:, None, :, :]\n\n def get_cpu_params(self, idcs, ctf_params, flip=False):\n batch_size = idcs.shape[0]\n self.defocusU[idcs, :, :] = ctf_params['defocusU'][:batch_size] if not flip else\\\n ctf_params['defocusU'][batch_size:]\n self.defocusV[idcs, :, :] = ctf_params['defocusV'][:batch_size] if not flip else\\\n ctf_params['defocusV'][batch_size:]\n self.angleAstigmatism[idcs, :, :] = ctf_params['angleAstigmatism'][:batch_size] if not flip else\\\n ctf_params['angleAstigmatism'][batch_size:]\n cpu_params = {}\n return cpu_params\n\n def forward(self, x_fourier, idcs=0, ctf_params={}, mode='gt', frequency_marcher=None):\n # This is when we want to prescribe parameters for the CTF\n if x_fourier.dim() == 3:\n x_fourier = x_fourier[None, ...]\n # x_fourier: B, 1, S, S\n batch_size = len(idcs)\n cpu_params = {}\n if ctf_params:\n cpu_params = self.get_cpu_params(idcs, ctf_params, flip=False)\n\n # if new params for the CTF have been prescribed or we are optimizing it\n # then request the evaluation of the CTF\n if not ctf_params and self.precomputed_filters and not self.requires_grad:\n hFourier = self.hFourier[idcs, :, :]\n else:\n hFourier = self.get_ctf(idcs, batch_size, cpu_params=cpu_params, frequency_marcher=frequency_marcher)\n\n if self.flip_images:\n flipped_hFourier = torch.flip(hFourier, [1, 2])\n\n hFourier = torch.cat([hFourier, flipped_hFourier], dim=0)\n\n return self.oversample_multiply_crop(x_fourier, hFourier)" }, { "identifier": "CTFCryoDRGN", "path": "cryostar/utils/ctf_utils.py", "snippet": "class CTFCryoDRGN(CTFBase):\n\n def __init__(self,\n size,\n resolution,\n num_particles=None,\n kV=300,\n cs=2.0,\n amplitudeContrast=0.1,\n requires_grad=False):\n super(CTFBase, self).__init__()\n self.size = size\n self.resolution = resolution\n self.requires_grad = requires_grad\n self.kV = kV\n self.cs = cs\n self.ac = amplitudeContrast\n # ax = torch.linspace(-1. / (2. * resolution), 1 / (2. * resolution), self.size)\n # mx, my = torch.meshgrid(ax, ax, indexing=\"ij\")\n ax = torch.fft.fftshift(torch.fft.fftfreq(self.size, self.resolution))\n mx, my = torch.meshgrid(ax, ax, indexing=\"xy\")\n freqs = torch.stack([mx.flatten(), my.flatten()], 1)\n self.register_buffer(\"freqs\", freqs)\n\n def get_ctf(self, ctf_params={}):\n bsz = len(ctf_params[\"defocusU\"])\n device = self.freqs.device\n hFourier = compute_ctf(freqs=self.freqs.repeat(bsz, 1, 1),\n dfu=(ctf_params[\"defocusU\"] * 1e4).squeeze(1),\n dfv=(ctf_params[\"defocusV\"] * 1e4).squeeze(1),\n dfang=torch.rad2deg(ctf_params[\"angleAstigmatism\"]).squeeze(1),\n volt=torch.tensor(self.kV, device=device).repeat(bsz, 1),\n cs=torch.tensor(self.cs, device=device).repeat(bsz, 1),\n w=torch.tensor(self.ac, device=device).repeat(bsz,\n 1)).reshape(bsz, self.size, self.size)\n return hFourier\n\n def forward(self, x_fourier, idcs=0, ctf_params={}, mode='gt', frequency_marcher=None):\n hFourier = -self.get_ctf(ctf_params)\n return x_fourier * hFourier[:, None, :, :]" }, { "identifier": "calc_cor_loss", "path": "cryostar/utils/losses.py", "snippet": "def calc_cor_loss(pred_images, gt_images, mask=None):\n if mask is not None:\n pred_images = mask(pred_images)\n gt_images = mask(gt_images)\n pixel_num = mask.num_masked\n else:\n pixel_num = pred_images.shape[-2] * pred_images.shape[-1]\n\n # b, c, h, w -> b, c, num_pix\n pred_images = pred_images.flatten(start_dim=2)\n gt_images = gt_images.flatten(start_dim=2)\n\n # b, c\n dots = (pred_images * gt_images).sum(-1)\n # b, c -> b, c\n err = -dots / (gt_images.std(-1) + 1e-5) / (pred_images.std(-1) + 1e-5)\n # b, c -> b -> 1 value\n err = err.sum(-1).mean() / pixel_num\n return err" }, { "identifier": "calc_kl_loss", "path": "cryostar/utils/losses.py", "snippet": "def calc_kl_loss(mu, log_var, free_bits, reduction=\"mean\"):\n kld_loss = -0.5 * (1 + log_var - mu.pow(2) - log_var.exp())\n # free bits\n kld_loss = torch.clamp(kld_loss, free_bits) # (bsz, z-dim)\n kld_loss = torch.mean(kld_loss, dim=1) # (bsz, )\n if reduction == \"mean\":\n kld_loss = torch.mean(kld_loss) # averaged over bsz x z-dim\n elif reduction == \"none\":\n kld_loss = kld_loss\n else:\n raise NotImplementedError\n return kld_loss" }, { "identifier": "log_to_current", "path": "cryostar/utils/misc.py", "snippet": "def set_seed(seed: int = 42):\ndef chain(arg, *funcs):\ndef convert_to_numpy(*args):\ndef CHECK_SHAPE(tensor, expected_shape):\ndef ASSERT_SHAPE(tensor, expected_shape):\ndef parse_mmengine_args(override_mode=\"default\"):\ndef flatten_nested_dict(nested: Union[dict, Config]) -> dict:\ndef warmup(warmup_step, lower=0.0, upper=1.0):\n def run(cur_step):\ndef init_mmengine_config(args):\ndef init_mmengine_exp(args,\n exp_prefix='',\n backup_list=None,\n inplace=True,\n work_dir_name=\"work_dirs\",\n project_name=\"cryostar\",\n tensorboard=False):\ndef _get_next_version(root_dir, dir_name_prefix):\ndef pl_init_exp(override_mode=\"default\",\n exp_prefix='',\n backup_list=None,\n inplace=False,\n work_dir_name=\"work_dirs\",\n project_name=\"cryostar\"):\ndef save_pdb(CAs, path, ref_pdb_path):\ndef load_CAs_from_pdb(file):\ndef load_NCaC_from_pdb(file):\ndef load_chain_A(pdb_path):\ndef points_to_pdb(path_to_save, points: np.ndarray):\ndef point_stack_to_pdb(path_to_save, point_stack: np.ndarray):\ndef find_rigid_alignment(A, B):\ndef batch_find_rigid_alignment(A, B):\ndef pretty_dict(x, precision=3):\ndef create_sphere_mask(d, h, w, center=None, radius=None) -> np.ndarray:\ndef create_circular_mask(h, w, center=None, radius=None) -> np.ndarray:\n H = A_c.T.mm(B_c)\n U, S, V = torch.svd(H)\n R = V.mm(U.T)\n H = einops.einsum(A_c, B_c, \"b n c1, b n c2 -> b c1 c2\")\n V = VmT.mT\n R = einops.einsum(V, U.transpose(2, 1), \"b c1 c2, b c2 c3 -> b c1 c3\")" }, { "identifier": "bt_save_pdb", "path": "cryostar/utils/pdb_tools.py", "snippet": "def bt_save_pdb(file_path: Union[str, Path], array: Union[AtomArray, AtomArrayStack], **kwargs):\n \"\"\"Save biotite AtomArray or AtomArrayStack to pdb file\n\n Parameters\n ----------\n file_path: save file path\n array: the structure to be saved\n kwargs: additional parameters to be passed, always empty\n\n \"\"\"\n bt_struc.io.save_structure(file_path, array, **kwargs)" }, { "identifier": "EMAN2Grid", "path": "cryostar/gmm/gmm.py", "snippet": "class EMAN2Grid(BaseGrid):\n \"\"\"EMAN2 style grid.\n origin set to -(side_shape // 2) * voxel_size\n\n \"\"\"\n\n def __init__(self, side_shape, voxel_size):\n origin = -side_shape // 2 * voxel_size\n super().__init__(side_shape=side_shape, voxel_size=voxel_size, origin=origin)" }, { "identifier": "batch_projection", "path": "cryostar/gmm/gmm.py", "snippet": "def batch_projection(gauss: Gaussian, rot_mats: torch.Tensor, line_grid: Grid) -> torch.Tensor:\n \"\"\"A quick version of e2gmm projection.\n\n Parameters\n ----------\n gauss: (b/1, num_centers, 3) mus, (b/1, num_centers) sigmas and amplitudes\n rot_mats: (b, 3, 3)\n line_grid: (num_pixels, 3) coords, (nx, ) shape\n\n Returns\n -------\n proj: (b, y, x) projections\n \"\"\"\n\n centers = einops.einsum(rot_mats, gauss.mus, \"b c31 c32, b nc c32 -> b nc c31\")\n\n sigmas = einops.rearrange(gauss.sigmas, 'b nc -> b 1 nc')\n sigmas = 2 * sigmas**2\n\n proj_x = einops.rearrange(line_grid.coords, \"nx -> 1 nx 1\") - einops.rearrange(centers[..., 0], \"b nc -> b 1 nc\")\n proj_x = torch.exp(-proj_x**2 / sigmas)\n\n proj_y = einops.rearrange(line_grid.coords, \"ny -> 1 ny 1\") - einops.rearrange(centers[..., 1], \"b nc -> b 1 nc\")\n proj_y = torch.exp(-proj_y**2 / sigmas)\n\n proj = einops.einsum(gauss.amplitudes, proj_x, proj_y, \"b nc, b nx nc, b ny nc -> b nx ny\")\n proj = einops.rearrange(proj, \"b nx ny -> b ny nx\")\n return proj" }, { "identifier": "Gaussian", "path": "cryostar/gmm/gmm.py", "snippet": "class Gaussian:\n mus: Union[torch.Tensor, np.ndarray]\n sigmas: Union[torch.Tensor, np.ndarray]\n amplitudes: Union[torch.Tensor, np.ndarray]" }, { "identifier": "E3Deformer", "path": "cryostar/gmm/deformer.py", "snippet": "class E3Deformer(torch.nn.Module, DeformerProtocol):\n\n def transform(self, deformation, coords):\n ASSERT_SHAPE(coords, (None, 3))\n ASSERT_SHAPE(deformation, (None, coords.shape[0] * 3))\n\n bsz = deformation.shape[0]\n shift = deformation.reshape(bsz, -1, 3)\n return shift + coords" }, { "identifier": "NMADeformer", "path": "cryostar/gmm/deformer.py", "snippet": "class NMADeformer(torch.nn.Module, DeformerProtocol):\n def __init__(self, modes: torch.FloatTensor) -> None:\n super().__init__()\n modes = einops.rearrange(\n modes, \"(num_coords c3) num_modes -> num_modes num_coords c3\", c3=3\n )\n self.register_buffer(\"modes\", modes)\n self.num_modes = modes.shape[0]\n self.num_coords = modes.shape[1]\n\n def transform(self, deformation, coords):\n ASSERT_SHAPE(coords, (self.num_coords, 3))\n ASSERT_SHAPE(deformation, (None, 6 + self.num_modes))\n\n axis_angle = deformation[..., :3]\n translation = deformation[..., 3:6] * 10\n nma_coeff = deformation[..., 6:]\n rotation_matrix = axis_angle_to_matrix(axis_angle)\n\n nma_deform_e3 = einops.einsum(\n nma_coeff, self.modes, \"bsz num_modes, num_modes num_coords c3 -> bsz num_coords c3\"\n )\n rotated_coords = einops.einsum(rotation_matrix, nma_deform_e3 + coords,\n \"bsz c31 c32, bsz num_coords c31 -> bsz num_coords c32\")\n deformed_coords = rotated_coords + einops.rearrange(translation, \"bsz c3 -> bsz 1 c3\")\n return deformed_coords" }, { "identifier": "primal_to_fourier_2d", "path": "cryostar/utils/fft_utils.py", "snippet": "@torch.autocast(\"cuda\")\ndef primal_to_fourier_2d(r: torch.Tensor) -> torch.Tensor:\n with torch.autocast(\"cuda\", enabled=False):\n r = torch.fft.ifftshift(r.float(), dim=(-2, -1))\n f = torch.fft.fftshift(torch.fft.fftn(r, s=(r.shape[-2], r.shape[-1]), dim=(-2, -1)), dim=(-2, -1))\n return f" }, { "identifier": "fourier_to_primal_2d", "path": "cryostar/utils/fft_utils.py", "snippet": "def fourier_to_primal_2d(f: torch.Tensor) -> torch.Tensor:\n f = torch.fft.ifftshift(f, dim=(-2, -1))\n return torch.fft.fftshift(torch.fft.ifftn(f, s=(f.shape[-2], f.shape[-1]), dim=(-2, -1)), dim=(-2, -1))" }, { "identifier": "Polymer", "path": "cryostar/utils/polymer.py", "snippet": "class Polymer:\n chain_id: np.ndarray\n res_id: np.ndarray\n res_name: np.ndarray\n coord: np.ndarray\n atom_name: np.ndarray\n element: np.ndarray\n num_electron: np.ndarray\n\n def __init__(self, num):\n self.chain_id = np.empty(num, dtype=\"U4\")\n self.res_id = np.zeros(num, dtype=int)\n self.res_name = np.empty(num, dtype=\"U3\")\n self.coord = np.zeros((num, 3), dtype=np.float32)\n self.atom_name = np.empty(num, dtype=\"U6\")\n self.element = np.empty(num, dtype=\"U2\")\n self.num_electron = np.zeros(num, dtype=int)\n\n def __setitem__(self, index, kwargs):\n assert set(kwargs.keys()).issubset(f.name for f in dataclasses.fields(self))\n for k, v in kwargs.items():\n getattr(self, k)[index] = v\n\n def __getitem__(self, index):\n return {f.name: getattr(self, f.name)[index] for f in dataclasses.fields(self)}\n\n def __len__(self):\n return len(self.chain_id)\n\n @property\n def num_amino_acids(self):\n return np.sum(np.isin(self.atom_name, AA_ATOMS))\n\n @property\n def num_nucleotides(self):\n return np.sum(np.isin(self.atom_name, NT_ATOMS))\n\n @property\n def num_chains(self):\n return len(np.unique(self.chain_id))\n\n @classmethod\n def from_atom_arr(cls, atom_arr):\n assert isinstance(atom_arr, struc.AtomArray)\n\n nt_arr = atom_arr[struc.filter_nucleotides(atom_arr)]\n aa_arr = atom_arr[struc.filter_amino_acids(atom_arr)]\n\n num = 0\n if len(aa_arr) > 0:\n num += struc.get_residue_count(aa_arr)\n if len(nt_arr) > 0:\n for res in struc.residue_iter(nt_arr):\n valid_atoms = set(res.atom_name).intersection(NT_ATOMS)\n if len(valid_atoms) <= 0:\n raise UserWarning(f\"Nucleotides doesn't contain {' or '.join(NT_ATOMS)}.\")\n else:\n num += len(valid_atoms)\n meta = cls(num)\n\n def _update_res(tmp_res, kind=\"aa\"):\n nonlocal pos\n\n if kind == \"aa\":\n using_atom_names = AA_ATOMS\n filtered_res = tmp_res[struc.filter_peptide_backbone(tmp_res)]\n elif kind == \"nt\":\n using_atom_names = NT_ATOMS\n filtered_res = tmp_res\n else:\n raise NotImplemented\n\n valid_atom_names = set(tmp_res.atom_name).intersection(using_atom_names)\n\n for select_atom_name in valid_atom_names:\n meta[pos] = {\n \"chain_id\": tmp_res.chain_id[0],\n \"res_id\": tmp_res.res_id[0],\n \"res_name\": tmp_res.res_name[0],\n \"coord\": filtered_res[filtered_res.atom_name == select_atom_name].coord,\n \"atom_name\": select_atom_name,\n \"element\": filtered_res[filtered_res.atom_name == select_atom_name].element[0],\n \"num_electron\": get_num_electrons(tmp_res) // len(valid_atom_names)\n }\n pos += 1\n\n def _update(tmp_arr, kind=\"aa\"):\n nonlocal pos\n for chain in struc.chain_iter(tmp_arr):\n for tmp_res in struc.residue_iter(chain):\n _update_res(tmp_res, kind)\n\n pos = 0\n\n if len(aa_arr) > 0:\n _update(aa_arr, kind=\"aa\")\n if len(nt_arr) > 0:\n _update(nt_arr, kind=\"nt\")\n\n assert pos == num\n return meta\n\n @classmethod\n def from_pdb(cls, file_path):\n atom_arr = bt_read_pdb(file_path)\n if atom_arr.stack_depth() > 1:\n print(\"PDB file contains more than 1 models, select the 1st model\")\n atom_arr = atom_arr[0]\n return Polymer.from_atom_arr(atom_arr)\n\n def to_atom_arr(self):\n num = len(self)\n atom_arr = struc.AtomArray(num)\n atom_arr.coord = self.coord\n\n for f in dataclasses.fields(self):\n if f.name != \"coord\" and f.name in atom_arr.get_annotation_categories():\n atom_arr.set_annotation(f.name, getattr(self, f.name))\n # atom_arr.atom_name[atom_arr.atom_name == \"R\"] = \"CB\"\n return atom_arr" }, { "identifier": "NT_ATOMS", "path": "cryostar/utils/polymer.py", "snippet": "NT_ATOMS = (\"C1'\", )" }, { "identifier": "AA_ATOMS", "path": "cryostar/utils/polymer.py", "snippet": "AA_ATOMS = (\"CA\", )" }, { "identifier": "find_quaint_cutoff_pairs", "path": "cryostar/utils/dist_loss.py", "snippet": "def find_quaint_cutoff_pairs(coord_arr,\n chain_id_arr,\n res_id_arr,\n intra_chain_cutoff=12.,\n inter_chain_cutoff=12.,\n intra_chain_res_bound=None):\n sel_indices = []\n dist_map = distance.cdist(coord_arr, coord_arr, metric='euclidean')\n # 1. intra chain\n sel_mask = dist_map <= intra_chain_cutoff\n sel_mask = np.triu(sel_mask, k=1)\n # get indices of valid pairs\n indices_in_pdb = np.nonzero(sel_mask)\n indices_in_pdb = np.column_stack((indices_in_pdb[0], indices_in_pdb[1]))\n indices_in_pdb = indices_in_pdb[chain_id_arr[indices_in_pdb[:, 0]] == chain_id_arr[indices_in_pdb[:, 1]]]\n # filter by res_id\n if intra_chain_res_bound is not None:\n assert res_id_arr is not None\n res_ids = res_id_arr[indices_in_pdb]\n res_id_dist = np.abs(np.diff(res_ids, axis=1)).flatten()\n indices_in_pdb = indices_in_pdb[res_id_dist <= intra_chain_res_bound]\n\n sel_indices.append(indices_in_pdb)\n\n # 2. inter chain\n if inter_chain_cutoff is not None:\n sel_mask = dist_map <= inter_chain_cutoff\n sel_mask = np.triu(sel_mask, k=1)\n indices_in_pdb = np.nonzero(sel_mask)\n indices_in_pdb = np.column_stack((indices_in_pdb[0], indices_in_pdb[1]))\n indices_in_pdb = indices_in_pdb[chain_id_arr[indices_in_pdb[:, 0]] != chain_id_arr[indices_in_pdb[:, 1]]]\n sel_indices.append(indices_in_pdb)\n\n sel_indices = np.vstack(sel_indices)\n return sel_indices" }, { "identifier": "find_range_cutoff_pairs", "path": "cryostar/utils/dist_loss.py", "snippet": "def find_range_cutoff_pairs(coord_arr, min_cutoff=4., max_cutoff=10.):\n dist_map = distance.cdist(coord_arr, coord_arr, metric='euclidean')\n sel_mask = (dist_map <= max_cutoff) & (dist_map >= min_cutoff)\n indices_in_pdb = np.nonzero(sel_mask)\n indices_in_pdb = np.column_stack((indices_in_pdb[0], indices_in_pdb[1]))\n return indices_in_pdb" }, { "identifier": "find_continuous_pairs", "path": "cryostar/utils/dist_loss.py", "snippet": "def find_continuous_pairs(chain_id_arr, res_id_arr, atom_name_arr):\n pairs = []\n\n # res_id in different chains are duplicated, so loop on chains\n u_chain_id = np.unique(chain_id_arr)\n\n for c_id in u_chain_id:\n tmp_mask = chain_id_arr == c_id\n tmp_indices_in_pdb = np.nonzero(tmp_mask)[0]\n\n tmp_res_id_arr = res_id_arr[tmp_mask]\n tmp_atom_name_arr = atom_name_arr[tmp_mask]\n\n # check is aa or nt\n tmp_atom_name_set = set(tmp_atom_name_arr)\n\n if len(tmp_atom_name_set.intersection(AA_ATOMS)) > len(tmp_atom_name_set.intersection(NT_ATOMS)):\n in_res_atom_names = AA_ATOMS\n elif len(tmp_atom_name_set.intersection(AA_ATOMS)) < len(tmp_atom_name_set.intersection(NT_ATOMS)):\n in_res_atom_names = NT_ATOMS\n else:\n raise NotImplemented(\"Cannot determine chain is amino acid or nucleotide.\")\n\n # find pairs\n if len(in_res_atom_names) == 1:\n u_res_id, indices_in_chain = np.unique(tmp_res_id_arr, return_index=True)\n if len(u_res_id) != np.sum(tmp_mask):\n raise ValueError(f\"Found duplicate residue id in single chain {c_id}.\")\n\n indices_in_chain_pair = np.column_stack((indices_in_chain[:-1], indices_in_chain[1:]))\n\n # must be adjacent on residue id\n valid_mask = np.abs(np.diff(u_res_id[indices_in_chain_pair], axis=1)) == 1\n\n indices_in_chain_pair = indices_in_chain_pair[valid_mask.flatten()]\n\n indices_in_pdb_pair = tmp_indices_in_pdb[indices_in_chain_pair]\n elif len(in_res_atom_names) > 1:\n\n def _cmp(a, b):\n # res_id compare\n if a[0] != b[0]:\n return a[0] - b[0]\n else:\n # atom_name in the same order of AA_ATOMS or NT_ATOMS\n return in_res_atom_names.index(a[1]) - in_res_atom_names.index(b[1])\n\n cache = list(zip(tmp_res_id_arr, tmp_atom_name_arr, tmp_indices_in_pdb))\n sorted_cache = list(sorted(cache, key=cmp_to_key(_cmp)))\n\n sorted_indices_in_pdb = [item[2] for item in sorted_cache]\n sorted_res_id = [item[0] for item in sorted_cache]\n\n indices_in_pdb_pair = np.column_stack((sorted_indices_in_pdb[:-1], sorted_indices_in_pdb[1:]))\n\n valid_mask = np.abs(np.diff(np.column_stack((sorted_res_id[:-1], sorted_res_id[1:])), axis=1)) <= 1\n\n indices_in_pdb_pair = indices_in_pdb_pair[valid_mask.flatten()]\n else:\n raise NotImplemented(\"No enough atoms to construct continuous pairs.\")\n\n pairs.append(indices_in_pdb_pair)\n\n pairs = np.vstack(pairs)\n return pairs" }, { "identifier": "calc_dist_by_pair_indices", "path": "cryostar/utils/dist_loss.py", "snippet": "def calc_dist_by_pair_indices(coord_arr, pair_indices):\n coord_pair_arr = coord_arr[pair_indices] # num_pair, 2, 3\n dist = np.linalg.norm(np.diff(coord_pair_arr, axis=1), ord=2, axis=-1)\n return dist.flatten()" }, { "identifier": "remove_duplicate_pairs", "path": "cryostar/utils/dist_loss.py", "snippet": "def remove_duplicate_pairs(pairs_a, pairs_b, remove_flip=True):\n \"\"\"Remove pair b from a\"\"\"\n s = max(pairs_a.max(), pairs_b.max()) + 1\n # trick for fast comparison\n mask = np.zeros((s, s), dtype=bool)\n\n np.put(mask, np.ravel_multi_index(pairs_a.T, mask.shape), True)\n np.put(mask, np.ravel_multi_index(pairs_b.T, mask.shape), False)\n if remove_flip:\n np.put(mask, np.ravel_multi_index(np.flip(pairs_b, 1).T, mask.shape), False)\n return np.column_stack(np.nonzero(mask))" }, { "identifier": "filter_same_chain_pairs", "path": "cryostar/utils/dist_loss.py", "snippet": "def filter_same_chain_pairs(pair_ids, chain_id_arr):\n chain_ids = chain_id_arr[pair_ids]\n\n same_chain_mask = chain_ids[:, 0] == chain_ids[:, 1]\n\n pair_mask = []\n\n for u in np.unique(chain_ids):\n tmp = np.logical_and(chain_ids[:, 0] == u, same_chain_mask)\n if np.any(tmp):\n pair_mask.append(tmp)\n\n if len(pair_mask) > 0:\n return np.row_stack(pair_mask)\n else:\n return None" }, { "identifier": "DistLoss", "path": "cryostar/utils/dist_loss.py", "snippet": "class DistLoss(nn.Module):\n\n def __init__(self, pair_ids, gt_dists, reduction=\"mean\"):\n super().__init__()\n self.reduction = reduction\n\n self.register_buffer(\"pair_ids\", torch.from_numpy(pair_ids).long())\n self.register_buffer(\"gt_dists\", torch.from_numpy(gt_dists).float())\n\n # edge-wise weights\n # raw_weights = torch.ones(len(pair_ids), dtype=torch.float) * 3.\n #\n # self.register_parameter(\"raw_weights\", nn.Parameter(raw_weights))\n\n # RBF residue-wise weights\n # u_left_ids = np.unique(pair_ids[:, 0])\n #\n # std_idx = np.zeros(max(u_left_ids) + 1, dtype=int)\n # sparse_idx = np.arange(len(u_left_ids))\n #\n # std_idx[u_left_ids] = sparse_idx\n #\n # select_index = std_idx[pair_ids[:, 0]]\n\n # weight = 0.9 at dist_rescale\n # sigmas = torch.ones(max(u_left_ids) + 1, dtype=torch.float) * np.sqrt(-0.5 / np.log(0.9))\n #\n # self.dist_rescale = dist_rescale\n # self.register_buffer(\"select_index\", torch.from_numpy(select_index).long())\n # self.register_parameter(\"sigmas\", nn.Parameter(sigmas))\n\n # def get_weights(self):\n # return torch.sigmoid(self.raw_weights)\n # edge_sigmas = torch.index_select(self.sigmas, dim=0, index=self.select_index)\n # weights = torch.exp(-torch.pow(self.gt_dists / self.dist_rescale, 2) / (2 * torch.pow(edge_sigmas, 2)))\n # return weights\n\n def calc_pair_dists(self, batch_struc):\n batch_dist = batch_struc[:, self.pair_ids] # bsz, num_pair, 2, 3\n batch_dist = LA.vector_norm(torch.diff(batch_dist, dim=-2), axis=-1).squeeze(-1) # bsz, num_pair\n return batch_dist\n\n def forward(self, batch_struc):\n batch_dist = self.calc_pair_dists(batch_struc)\n # mse = torch.pow(batch_dist - self.gt_dists.unsqueeze(0), 2) * self.get_weights().unsqueeze(0)\n mse = torch.pow(batch_dist - self.gt_dists.unsqueeze(0), 2)\n if self.reduction is None:\n return mse\n elif self.reduction == \"mean\":\n return torch.mean(mse)\n else:\n raise NotImplementedError" }, { "identifier": "get_nearest_point", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def get_nearest_point(data: np.ndarray, query: np.ndarray) -> Tuple[npt.NDArray[np.float32], np.ndarray]:\n \"\"\"\n Find closest point in @data to @query\n Return datapoint, index\n \"\"\"\n ind = cdist(query, data).argmin(axis=1)\n return data[ind], ind" }, { "identifier": "cluster_kmeans", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def cluster_kmeans(z: np.ndarray, K: int, on_data: bool = True, reorder: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Cluster z by K means clustering\n Returns cluster labels, cluster centers\n If reorder=True, reorders clusters according to agglomerative clustering of cluster centers\n \"\"\"\n kmeans = KMeans(n_clusters=K, n_init=10, random_state=0, max_iter=10)\n labels = kmeans.fit_predict(z)\n centers = kmeans.cluster_centers_\n\n centers_ind = None\n if on_data:\n centers, centers_ind = get_nearest_point(z, centers)\n\n if reorder:\n # BUG from seaborn or scipy:\n # sns.clustermap only supports data with at least 2 dim\n if z.shape[1] == 1:\n centers = np.hstack([centers, np.zeros_like(centers)])\n g = sns.clustermap(centers)\n reordered = g.dendrogram_row.reordered_ind\n centers = centers[reordered]\n if centers_ind is not None:\n centers_ind = centers_ind[reordered]\n tmp = {k: i for i, k in enumerate(reordered)}\n labels = np.array([tmp[k] for k in labels])\n if z.shape[1] == 1:\n centers = centers[:, :1]\n return labels, centers" }, { "identifier": "run_pca", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def run_pca(z: np.ndarray) -> Tuple[np.ndarray, PCA]:\n pca = PCA(z.shape[1])\n pca.fit(z)\n # print(\"Explained variance ratio:\")\n # print(pca.explained_variance_ratio_)\n pc = pca.transform(z)\n return pc, pca" }, { "identifier": "get_pc_traj", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def get_pc_traj(\n pca: PCA,\n zdim: int,\n numpoints: int,\n dim: int,\n start: Optional[float] = 5,\n end: Optional[float] = 95,\n percentiles: Optional[np.ndarray] = None,\n) -> npt.NDArray[np.float32]:\n \"\"\"\n Create trajectory along specified principal component\n\n Inputs:\n pca: sklearn PCA object from run_pca\n zdim (int)\n numpoints (int): number of points between @start and @end\n dim (int): PC dimension for the trajectory (1-based index)\n start (float): Value of PC{dim} to start trajectory\n end (float): Value of PC{dim} to stop trajectory\n percentiles (np.array or None): Define percentile array instead of np.linspace(start,stop,numpoints)\n\n Returns:\n np.array (numpoints x zdim) of z values along PC\n \"\"\"\n if percentiles is not None:\n assert len(percentiles) == numpoints\n traj_pca = np.zeros((numpoints, zdim))\n if percentiles is not None:\n traj_pca[:, dim - 1] = percentiles\n else:\n assert start is not None\n assert end is not None\n traj_pca[:, dim - 1] = np.linspace(start, end, numpoints)\n ztraj_pca = pca.inverse_transform(traj_pca)\n return ztraj_pca" }, { "identifier": "run_umap", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def run_umap(z: np.ndarray, **kwargs) -> Tuple[np.ndarray, umap.UMAP]:\n reducer = umap.UMAP(**kwargs)\n z_embedded = reducer.fit_transform(z)\n return z_embedded, reducer" }, { "identifier": "plot_z_dist", "path": "cryostar/utils/vis_utils.py", "snippet": "def plot_z_dist(z, extra_cluster=None, save_path=None):\n if z.shape[-1] == 1:\n fig = sns.displot(x=z[:, 0])\n fig.set_xlabels(\"z values\")\n if save_path is not None:\n fig.savefig(save_path)\n elif z.shape[-1] == 2:\n sns.set()\n fig = sns.jointplot(x=z[:, 0], y=z[:, 1], kind=\"kde\", fill=True)\n ax = fig.figure.axes\n if extra_cluster is not None:\n ax[0].scatter(extra_cluster[:, 0], extra_cluster[:, 1], marker='.', color='tab:orange')\n if save_path is not None:\n fig.savefig(save_path)\n else:\n raise ValueError(f\"input z with shape {z.shape}\")" }, { "identifier": "save_tensor_image", "path": "cryostar/utils/vis_utils.py", "snippet": "def save_tensor_image(tensors, save_path, mask=None):\n # normalize\n max_val = torch.max(tensors.flatten(start_dim=1), 1)[0][:, None, None, None]\n min_val = torch.min(tensors.flatten(start_dim=1), 1)[0][:, None, None, None]\n tensors = (tensors - min_val) / (max_val - min_val)\n\n show_img = ToPILImage()(make_grid(tensors, nrow=5))\n if mask is None:\n show_img.save(save_path)\n else:\n show_img = np.copy(np.asarray(show_img))\n # show_img = cv2.cvtColor(show_img, cv2.COLOR_GRAY2RGB)\n if mask.ndim == 2:\n mask = mask[None]\n mask = ToPILImage()(make_grid(mask.expand(tensors.shape[0], -1, -1, -1), nrow=5))\n mask = np.invert(np.asarray(mask).astype(bool))[..., 0]\n color_mask = np.array([[0, 0, 0], [31, 119, 180]], dtype=np.uint8)\n color_mask = color_mask[mask.astype(int)]\n show_img[mask] = cv2.addWeighted(show_img[mask], 0.5, color_mask[mask], 0.5, 0)\n show_img = Image.fromarray(show_img)\n show_img.save(save_path)" }, { "identifier": "merge_step_outputs", "path": "cryostar/utils/pl_utils.py", "snippet": "def merge_step_outputs(outputs):\n ks = outputs[0].keys()\n res = {}\n for k in ks:\n res[k] = torch.concat([out[k] for out in outputs], dim=0)\n return res" }, { "identifier": "squeeze_dict_outputs_1st_dim", "path": "cryostar/utils/pl_utils.py", "snippet": "def squeeze_dict_outputs_1st_dim(outputs):\n res = {}\n for k in outputs.keys():\n res[k] = outputs[k].flatten(start_dim=0, end_dim=1)\n return res" }, { "identifier": "filter_outputs_by_indices", "path": "cryostar/utils/pl_utils.py", "snippet": "def filter_outputs_by_indices(outputs, indices):\n res = {}\n for k in outputs.keys():\n res[k] = outputs[k][indices]\n return res" }, { "identifier": "get_1st_unique_indices", "path": "cryostar/utils/pl_utils.py", "snippet": "def get_1st_unique_indices(t):\n _, idx, counts = torch.unique(t, dim=None, sorted=True, return_inverse=True, return_counts=True)\n # ind_sorted: the index corresponding to same unique value will be grouped by these indices\n _, ind_sorted = torch.sort(idx, stable=True)\n cum_sum = counts.cumsum(0)\n cum_sum = torch.cat((cum_sum.new_tensor([\n 0,\n ]), cum_sum[:-1]))\n first_idx = ind_sorted[cum_sum]\n return first_idx" } ]
import os.path as osp import warnings import collections import einops import numpy as np import biotite.structure as struc import torch import lightning.pytorch as pl from pathlib import Path from copy import deepcopy from torch import nn from torch import optim from torch.utils.data import DataLoader from torchinfo import summary from lightning.fabric.utilities.warnings import PossibleUserWarning from lightning.pytorch.utilities import rank_zero_only from lightning.pytorch.strategies import DDPStrategy from mmengine import mkdir_or_exist from cryostar.utils.transforms import SpatialGridTranslate from cryostar.utils.dataio import StarfileDataSet, StarfileDatasetConfig, Mask from cryostar.utils.ctf_utils import CTFRelion, CTFCryoDRGN from cryostar.utils.losses import calc_cor_loss, calc_kl_loss from cryostar.utils.misc import log_to_current, \ pl_init_exp, pretty_dict, set_seed, warmup from cryostar.utils.pdb_tools import bt_save_pdb from cryostar.gmm.gmm import EMAN2Grid, batch_projection, Gaussian from cryostar.gmm.deformer import E3Deformer, NMADeformer from cryostar.utils.fft_utils import primal_to_fourier_2d, fourier_to_primal_2d from cryostar.utils.polymer import Polymer, NT_ATOMS, AA_ATOMS from cryostar.utils.dist_loss import (find_quaint_cutoff_pairs, find_range_cutoff_pairs, find_continuous_pairs, calc_dist_by_pair_indices, remove_duplicate_pairs, filter_same_chain_pairs, DistLoss) from cryostar.utils.latent_space_utils import get_nearest_point, cluster_kmeans, run_pca, get_pc_traj, run_umap from cryostar.utils.vis_utils import plot_z_dist, save_tensor_image from cryostar.utils.pl_utils import merge_step_outputs, squeeze_dict_outputs_1st_dim, \ filter_outputs_by_indices, get_1st_unique_indices from miscs import calc_pair_dist_loss, calc_clash_loss, low_pass_mask2d, VAE, infer_ctf_params_from_config
16,442
if cfg.loss.sse_weight != 0.0: log_to_current("use pseduo `sse` by building spatial/sequential edges") sse_pairs = find_quaint_cutoff_pairs(meta.coord, meta.chain_id, meta.res_id, cfg.loss.intra_chain_cutoff, 0, 20) cutoff_pairs = remove_duplicate_pairs(cutoff_pairs, sse_pairs) clash_pairs = find_range_cutoff_pairs(meta.coord, cfg.loss.clash_min_cutoff) clash_pairs = remove_duplicate_pairs(clash_pairs, connect_pairs) if len(connect_pairs) > 0: self.register_buffer("connect_pairs", torch.from_numpy(connect_pairs).long()) dists = calc_dist_by_pair_indices(meta.coord, connect_pairs) self.register_buffer("connect_dists", torch.from_numpy(dists).float()) log_to_current(f"found {len(connect_pairs)} connect_pairs") else: log_to_current("connect_pairs is empty") if cfg.loss.sse_weight != 0.0: self.register_buffer("sse_pairs", torch.from_numpy(sse_pairs).long()) dists = calc_dist_by_pair_indices(meta.coord, sse_pairs) self.register_buffer("sse_dists", torch.from_numpy(dists).float()) log_to_current(f"found {len(sse_pairs)} sse_pairs") if len(cutoff_pairs) > 0: dists = calc_dist_by_pair_indices(meta.coord, cutoff_pairs) log_to_current(f"found {len(cutoff_pairs)} cutoff_pairs") self.dist_loss_fn = DistLoss(cutoff_pairs, dists, reduction=None) # for chain-wise dropout cutoff_chain_mask = filter_same_chain_pairs(cutoff_pairs, meta.chain_id) self.register_buffer("cutoff_chain_mask", torch.from_numpy(cutoff_chain_mask)) else: log_to_current("cutoff_pairs is empty") if len(clash_pairs) > 0: self.register_buffer("clash_pairs", torch.from_numpy(clash_pairs).long()) log_to_current(f"found {len(clash_pairs)} clash_pairs") else: log_to_current("clash_pairs is empty") # low-pass filtering if hasattr(cfg.data_process, "low_pass_bandwidth"): log_to_current(f"Use low-pass filtering w/ {cfg.data_process.low_pass_bandwidth} A") lp_mask2d = low_pass_mask2d(cfg.data_process.down_side_shape, cfg.data_process.down_apix, cfg.data_process.low_pass_bandwidth) self.register_buffer("lp_mask2d", torch.from_numpy(lp_mask2d).float()) else: self.lp_mask2d = None # self.mask = Mask(cfg.data_process.down_side_shape, rad=cfg.loss.mask_rad_for_image_loss) # for projection grid = EMAN2Grid(side_shape=cfg.data_process.down_side_shape, voxel_size=cfg.data_process.down_apix) self.grid = grid ctf_params = infer_ctf_params_from_config(cfg) if cfg.model.ctf == "v1": self.ctf = CTFRelion(**ctf_params, num_particles=len(dataset)) log_to_current("We will deprecate `model.ctf=v1` in a future version, use `model.ctf=v2` instead.") elif cfg.model.ctf == "v2": self.ctf = CTFCryoDRGN(**ctf_params, num_particles=len(dataset)) else: raise NotImplementedError log_to_current(ctf_params) # translate image helper self.translator = SpatialGridTranslate(D=cfg.data_process.down_side_shape, device=self.device) self.apix = self.cfg.data_process.down_apix # cache self.validation_step_outputs = [] self.stored_metrics = {} self.history_saved_dirs = [] if getattr(self.cfg.extra_input_data_attr, "ckpt_path", None) is not None: log_to_current(f"load checkpoint from {self.cfg.extra_input_data_attr.ckpt_path}") self._load_ckpt(self.cfg.extra_input_data_attr.ckpt_path) def _save_ckpt(self, ckpt_path): torch.save( { "model": self.model.state_dict(), "gmm_sigmas": self.gmm_sigmas.data, "gmm_amps": self.gmm_amps.data }, ckpt_path) def _load_ckpt(self, ckpt_path): state_dict = torch.load(ckpt_path, map_location=self.device) self.model.load_state_dict(state_dict["model"]) if self.cfg.gmm.tunable: self.gmm_sigmas.data = state_dict["gmm_sigmas"] self.gmm_amps.data = state_dict["gmm_amps"] def _get_save_dir(self): save_dir = osp.join(self.cfg.work_dir, f"{self.current_epoch:04d}_{self.global_step:07d}") mkdir_or_exist(save_dir) return save_dir def low_pass_images(self, images): f_images = primal_to_fourier_2d(images) f_images = f_images * self.lp_mask2d images = fourier_to_primal_2d(f_images).real return images def get_batch_pose(self, batch): rot_mats = batch["rotmat"] # yx order trans_mats = torch.concat((batch["shiftY"].unsqueeze(1), batch["shiftX"].unsqueeze(1)), dim=1) trans_mats /= self.apix return rot_mats, trans_mats def _shared_forward(self, images, idxes, rots): # predict structure pred_deformation, mu, log_var = self.model(prepare_images(images, self.cfg.model.input_space), idxes, rots) return pred_deformation, mu, log_var def _shared_projection(self, pred_struc, rot_mats): pred_images = batch_projection(
# other # avoid num_workers set as cpu_count warning warnings.simplefilter("ignore", PossibleUserWarning) # only log to rank_zero, comment this for debugging log_to_current = rank_zero_only(log_to_current) TASK_NAME = "atom" def prepare_images(images: torch.FloatTensor, space: str): assert space in ("real", "fourier") if space == "real": model_input = einops.rearrange(images, "b 1 ny nx -> b (1 ny nx)") else: fimages = primal_to_fourier_2d(images) model_input = einops.rearrange(torch.view_as_real(fimages), "b 1 ny nx c2 -> b (1 ny nx c2)", c2=2) return model_input class InitTask(pl.LightningModule): def __init__(self, em_module): super().__init__() self.cfg = em_module.cfg self.em_module = em_module self.loss_deque = collections.deque([ 10, ], maxlen=20) def on_train_batch_end(self, outputs, batch, batch_idx): self.loss_deque.append(outputs['loss'].item()) if np.mean(self.loss_deque) < 1e-3: self.trainer.should_stop = True # update all process status self.trainer.should_stop = self.trainer.strategy.broadcast(self.trainer.should_stop) def training_step(self, batch, batch_idx): images = batch["proj"] idxes = batch["idx"] rot_mats, trans_mats = self.em_module.get_batch_pose(batch) pred_deformation, mu, log_var = self.em_module.model(prepare_images(images, self.cfg.model.input_space), idxes, rot_mats) shift_loss = torch.mean(torch.pow(pred_deformation.flatten(start_dim=-2), 2)) loss = shift_loss if self.global_step % self.cfg.runner.log_every_n_step == 0: log_to_current(f"loss {loss.item()}") return loss def configure_optimizers(self): return optim.AdamW(self.em_module.model.parameters(), lr=1e-4) def on_fit_end(self): log_to_current(f"Init finished with loss {np.mean(self.loss_deque)}") class CryoEMTask(pl.LightningModule): def __init__(self, cfg, dataset): super().__init__() cfg = deepcopy(cfg) self.cfg = cfg # Define GMM meta = Polymer.from_pdb(cfg.dataset_attr.ref_pdb_path) log_to_current(f"Load reference structure from {cfg.dataset_attr.ref_pdb_path}") # for save self.template_pdb = meta.to_atom_arr() log_to_current(f"Protein contains {len(meta)} atoms, " f"{meta.num_amino_acids} amino acids, " f"{meta.num_nucleotides} nucleotides, " f"{meta.num_chains} chains.") # ref ref_centers = torch.from_numpy(meta.coord).float() ref_amps = torch.from_numpy(meta.num_electron).float() ref_sigmas = torch.ones_like(ref_amps) ref_sigmas.fill_(2.) log_to_current(f"1st GMM blob amplitude {ref_amps[0].item()}, sigma {ref_sigmas[0].item()}") num_pts = len(meta) log_to_current(f"Reference structure has {num_pts} atom coordinates") # tunable params # gmm self.register_buffer("gmm_centers", ref_centers) if cfg.gmm.tunable: log_to_current("Set GMM sigmas, amplitudes tunable") self.register_parameter("gmm_sigmas", nn.Parameter(ref_sigmas)) self.register_parameter("gmm_amps", nn.Parameter(ref_amps)) else: self.register_buffer("gmm_sigmas", ref_sigmas) self.register_buffer("gmm_amps", ref_amps) nma_modes = None if (hasattr(self.cfg.extra_input_data_attr, "nma_path") and self.cfg.extra_input_data_attr.nma_path not in ["", None]): nma_modes = torch.tensor(np.load(self.cfg.extra_input_data_attr.nma_path), dtype=torch.float32) log_to_current(f"Load NMA coefficients from {self.cfg.extra_input_data_attr.nma_path}, " f"whose shape is {nma_modes.shape}") # model if cfg.model.input_space == "fourier": in_dim = 2 * cfg.data_process.down_side_shape ** 2 elif cfg.model.input_space == "real": in_dim = cfg.data_process.down_side_shape ** 2 else: raise NotImplementedError self.model = VAE(in_dim=in_dim, out_dim=num_pts * 3 if nma_modes is None else 6 + nma_modes.shape[1], **cfg.model.model_cfg) log_to_current('Model summary:\n' + str(summary(self.model, input_size=[(1, in_dim), (1,)], verbose=0))) if nma_modes is None: self.deformer = E3Deformer() else: self.deformer = NMADeformer(nma_modes) # loss or regularization's preparation # dist loss connect_pairs = find_continuous_pairs(meta.chain_id, meta.res_id, meta.atom_name) if cfg.extra_input_data_attr.use_domain: log_to_current("use domain instead of chain!") domain_id = np.load(cfg.extra_input_data_attr.domain_path) cutoff_pairs = find_quaint_cutoff_pairs(meta.coord, domain_id, meta.res_id, cfg.loss.intra_chain_cutoff, cfg.loss.inter_chain_cutoff, cfg.loss.intra_chain_res_bound) else: # deal with RNA/DNA if np.sum(np.isin(meta.atom_name, NT_ATOMS)): # aa tmp_mask = np.isin(meta.atom_name, AA_ATOMS) indices_in_pdb = np.nonzero(tmp_mask)[0] aa_cutoff_pairs = find_quaint_cutoff_pairs(meta.coord[tmp_mask], meta.chain_id[tmp_mask], meta.res_id[tmp_mask], cfg.loss.intra_chain_cutoff, cfg.loss.inter_chain_cutoff, cfg.loss.intra_chain_res_bound) aa_cutoff_pairs = indices_in_pdb[aa_cutoff_pairs] log_to_current(f"{len(aa_cutoff_pairs)} AA pairs") # nt tmp_mask = np.isin(meta.atom_name, NT_ATOMS) indices_in_pdb = np.nonzero(tmp_mask)[0] nt_cutoff_pairs = find_quaint_cutoff_pairs(meta.coord[tmp_mask], meta.chain_id[tmp_mask], meta.res_id[tmp_mask], cfg.loss.nt_intra_chain_cutoff, cfg.loss.nt_inter_chain_cutoff, cfg.loss.nt_intra_chain_res_bound) nt_cutoff_pairs = indices_in_pdb[nt_cutoff_pairs] log_to_current(f"{len(nt_cutoff_pairs)} NT pairs") cutoff_pairs = np.vstack((aa_cutoff_pairs, nt_cutoff_pairs)) else: cutoff_pairs = find_quaint_cutoff_pairs(meta.coord, meta.chain_id, meta.res_id, cfg.loss.intra_chain_cutoff, cfg.loss.inter_chain_cutoff, cfg.loss.intra_chain_res_bound) cutoff_pairs = remove_duplicate_pairs(cutoff_pairs, connect_pairs) if cfg.loss.sse_weight != 0.0: log_to_current("use pseduo `sse` by building spatial/sequential edges") sse_pairs = find_quaint_cutoff_pairs(meta.coord, meta.chain_id, meta.res_id, cfg.loss.intra_chain_cutoff, 0, 20) cutoff_pairs = remove_duplicate_pairs(cutoff_pairs, sse_pairs) clash_pairs = find_range_cutoff_pairs(meta.coord, cfg.loss.clash_min_cutoff) clash_pairs = remove_duplicate_pairs(clash_pairs, connect_pairs) if len(connect_pairs) > 0: self.register_buffer("connect_pairs", torch.from_numpy(connect_pairs).long()) dists = calc_dist_by_pair_indices(meta.coord, connect_pairs) self.register_buffer("connect_dists", torch.from_numpy(dists).float()) log_to_current(f"found {len(connect_pairs)} connect_pairs") else: log_to_current("connect_pairs is empty") if cfg.loss.sse_weight != 0.0: self.register_buffer("sse_pairs", torch.from_numpy(sse_pairs).long()) dists = calc_dist_by_pair_indices(meta.coord, sse_pairs) self.register_buffer("sse_dists", torch.from_numpy(dists).float()) log_to_current(f"found {len(sse_pairs)} sse_pairs") if len(cutoff_pairs) > 0: dists = calc_dist_by_pair_indices(meta.coord, cutoff_pairs) log_to_current(f"found {len(cutoff_pairs)} cutoff_pairs") self.dist_loss_fn = DistLoss(cutoff_pairs, dists, reduction=None) # for chain-wise dropout cutoff_chain_mask = filter_same_chain_pairs(cutoff_pairs, meta.chain_id) self.register_buffer("cutoff_chain_mask", torch.from_numpy(cutoff_chain_mask)) else: log_to_current("cutoff_pairs is empty") if len(clash_pairs) > 0: self.register_buffer("clash_pairs", torch.from_numpy(clash_pairs).long()) log_to_current(f"found {len(clash_pairs)} clash_pairs") else: log_to_current("clash_pairs is empty") # low-pass filtering if hasattr(cfg.data_process, "low_pass_bandwidth"): log_to_current(f"Use low-pass filtering w/ {cfg.data_process.low_pass_bandwidth} A") lp_mask2d = low_pass_mask2d(cfg.data_process.down_side_shape, cfg.data_process.down_apix, cfg.data_process.low_pass_bandwidth) self.register_buffer("lp_mask2d", torch.from_numpy(lp_mask2d).float()) else: self.lp_mask2d = None # self.mask = Mask(cfg.data_process.down_side_shape, rad=cfg.loss.mask_rad_for_image_loss) # for projection grid = EMAN2Grid(side_shape=cfg.data_process.down_side_shape, voxel_size=cfg.data_process.down_apix) self.grid = grid ctf_params = infer_ctf_params_from_config(cfg) if cfg.model.ctf == "v1": self.ctf = CTFRelion(**ctf_params, num_particles=len(dataset)) log_to_current("We will deprecate `model.ctf=v1` in a future version, use `model.ctf=v2` instead.") elif cfg.model.ctf == "v2": self.ctf = CTFCryoDRGN(**ctf_params, num_particles=len(dataset)) else: raise NotImplementedError log_to_current(ctf_params) # translate image helper self.translator = SpatialGridTranslate(D=cfg.data_process.down_side_shape, device=self.device) self.apix = self.cfg.data_process.down_apix # cache self.validation_step_outputs = [] self.stored_metrics = {} self.history_saved_dirs = [] if getattr(self.cfg.extra_input_data_attr, "ckpt_path", None) is not None: log_to_current(f"load checkpoint from {self.cfg.extra_input_data_attr.ckpt_path}") self._load_ckpt(self.cfg.extra_input_data_attr.ckpt_path) def _save_ckpt(self, ckpt_path): torch.save( { "model": self.model.state_dict(), "gmm_sigmas": self.gmm_sigmas.data, "gmm_amps": self.gmm_amps.data }, ckpt_path) def _load_ckpt(self, ckpt_path): state_dict = torch.load(ckpt_path, map_location=self.device) self.model.load_state_dict(state_dict["model"]) if self.cfg.gmm.tunable: self.gmm_sigmas.data = state_dict["gmm_sigmas"] self.gmm_amps.data = state_dict["gmm_amps"] def _get_save_dir(self): save_dir = osp.join(self.cfg.work_dir, f"{self.current_epoch:04d}_{self.global_step:07d}") mkdir_or_exist(save_dir) return save_dir def low_pass_images(self, images): f_images = primal_to_fourier_2d(images) f_images = f_images * self.lp_mask2d images = fourier_to_primal_2d(f_images).real return images def get_batch_pose(self, batch): rot_mats = batch["rotmat"] # yx order trans_mats = torch.concat((batch["shiftY"].unsqueeze(1), batch["shiftX"].unsqueeze(1)), dim=1) trans_mats /= self.apix return rot_mats, trans_mats def _shared_forward(self, images, idxes, rots): # predict structure pred_deformation, mu, log_var = self.model(prepare_images(images, self.cfg.model.input_space), idxes, rots) return pred_deformation, mu, log_var def _shared_projection(self, pred_struc, rot_mats): pred_images = batch_projection(
gauss=Gaussian(
12
2023-11-06 07:15:26+00:00
24k
KAIST-AILab/palr
train.py
[ { "identifier": "BC", "path": "imitation/bc.py", "snippet": "class BC(nn.Module):\n def __init__(self, policy, env, best_policy=None,\n replay_buffer=None, replay_buffer_valid=None, seed=0, \n device='cpu', lr=3e-4, envname=None, wandb=None, save_policy_path=None, \n obs_dim=1, action_dim=1, stacksize=1, standardize=True):\n \n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n \n super(BC, self).__init__()\n\n self.env = env\n self.policy = policy\n self.best_policy = best_policy\n self.replay_buffer = replay_buffer\n self.replay_buffer_valid = replay_buffer_valid\n self.device = device\n \n self.obs_dim = obs_dim\n self.action_dim = action_dim \n self.stacksize = stacksize\n \n self.policy_optimizer = optim.Adam(policy.parameters(), lr=lr)\n \n self.num_eval_iteration = 50\n self.envname = envname\n \n self.wandb = None\n if wandb:\n self.wandb = wandb\n self.wandb.init()\n\n self.save_policy_path = save_policy_path \n \n # For standardization\n self.standardize = standardize\n\n self.obs_mean_tt = torch.tensor(self.replay_buffer.obs_mean, device=device)\n self.obs_std_tt = torch.tensor(self.replay_buffer.obs_std, device=device)\n self.act_mean_tt = torch.tensor(self.replay_buffer.act_mean, device=device)\n self.act_std_tt = torch.tensor(self.replay_buffer.act_std, device=device)\n\n self.obs_mean = self.replay_buffer.obs_mean\n self.obs_std = self.replay_buffer.obs_std\n self.act_mean = self.replay_buffer.act_mean\n self.act_std = self.replay_buffer.act_std\n \n\n def train(self, total_iteration=1e6, eval_freq=1000, batch_size=1024, num_valid=2000):\n \n max_score = -100000.\n \n batch_valid = self.replay_buffer_valid.random_batch(num_valid, standardize=self.standardize)\n \n obs_valid = batch_valid['observations']\n actions_valid = batch_valid['actions'][:, -self.action_dim:] \n prev_expert_action_valid = batch_valid['actions'][:, :-self.action_dim] # For debugging\n \n obs_valid = torch.tensor(obs_valid, dtype=torch.float32, device=self.device)\n actions_valid = torch.tensor(actions_valid, dtype=torch.float32, device=self.device)\n prev_expert_action_valid = torch.tensor(prev_expert_action_valid, dtype=torch.float32, device=self.device)\n \n for num in range(0, int(total_iteration)):\n batch = self.replay_buffer.random_batch(batch_size, standardize=self.standardize)\n \n obs = batch['observations']\n actions = batch['actions'][:, -self.action_dim:]\n \n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n actions = torch.tensor(actions, dtype=torch.float32, device=self.device) \n\n neg_likelihood = -self.policy.log_prob(obs, actions).mean()\n train_loss = neg_likelihood\n \n self.policy_optimizer.zero_grad()\n train_loss.backward()\n self.policy_optimizer.step()\n\n if (num+1) % eval_freq == 0:\n policy_action = self.policy(obs).sample()\n policy_action_valid = self.policy(obs_valid).sample()\n prev_expert_action = batch['actions'][:, :-self.action_dim] \n prev_expert_action = torch.tensor(prev_expert_action, dtype=torch.float32, device=self.device) \n \n # Train data HSCIC (for debugging) \n policy_embedding = self.policy.forward_embedding(obs)\n if self.standardize:\n Y_std = (prev_expert_action - self.act_mean_tt[0, :-self.action_dim])/ self.act_std_tt[0, :-self.action_dim]\n Z_std = (actions - self.act_mean_tt[0, -self.action_dim:])/ self.act_std_tt[0, -self.action_dim:]\n p_std = (policy_action - self.act_mean_tt[0, -self.action_dim:])/ self.act_std_tt[0, -self.action_dim:]\n\n Y_std = Y_std.to(torch.float32)\n Z_std = Z_std.to(torch.float32)\n p_std = p_std.to(torch.float32)\n else:\n Y_std = prev_expert_action\n Z_std = actions\n p_std = policy_action\n \n hscic_estimate = estimate_hscic(X=policy_embedding, Y=Y_std, Z=Z_std, ridge_lambda=1e-5)\n \n policy_embedding_valid = self.policy.forward_embedding(obs_valid)\n if self.standardize:\n Y_std = (prev_expert_action_valid - self.act_mean_tt[0, :-self.action_dim])/ self.act_std_tt[0, :-self.action_dim]\n Z_std = (actions_valid - self.act_mean_tt[0, -self.action_dim:])/ self.act_std_tt[0, -self.action_dim:]\n\n Y_std = Y_std.to(torch.float32)\n Z_std = Z_std.to(torch.float32)\n else:\n Y_std = prev_expert_action_valid\n Z_std = actions_valid\n p_std = policy_action\n \n valid_hscic_estimate = estimate_hscic(X=policy_embedding_valid, Y=Y_std, Z=Z_std, ridge_lambda=1e-5)\n valid_hscic_estimate_action = estimate_hscic(X=policy_action_valid, Y=prev_expert_action_valid, Z=actions_valid, ridge_lambda=1e-5)\n\n valid_neg_likelihood = -self.policy.log_prob(obs_valid, actions_valid).mean()\n valid_loss = valid_neg_likelihood\n\n eval_ret_mean, eval_ret_std = self.evaluate(num_iteration=self.num_eval_iteration)\n \n print(f'** iter{num+1}: train_policy_loss={train_loss.item():.2f}, val_policy_loss={valid_loss.item():.2f}, eval_ret={eval_ret_mean:.2f}+-{eval_ret_std:.2f} ({obs_valid.shape[0]})',)\n print(f'** HSCIC : (train){hscic_estimate:.6f} (valid){valid_hscic_estimate:.6f} (valid,action){valid_hscic_estimate_action:.6f}')\n \n if self.wandb:\n self.wandb.log({'train_total_loss': train_loss.item(), \n 'valid_total_loss': valid_loss.item(),\n 'train_neg_likelihood': neg_likelihood.item(),\n 'valid_neg_likelihood': valid_neg_likelihood.item(),\n 'train_mean_hscic(rep,prev|target)': hscic_estimate,\n 'valid_mean_hscic(rep,prev|target)': valid_hscic_estimate,\n 'valid_mean_hscic(act,prev|target)': valid_hscic_estimate_action,\n 'eval_episode_return': eval_ret_mean\n }, step=num+1)\n\n if eval_ret_mean > max_score:\n print(f'** max score record! ')\n max_score = eval_ret_mean\n copy_nn_module(self.policy, self.best_policy)\n \n if self.save_policy_path:\n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_best.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.best_policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_best.pt')\n \n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_last.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_last.pt')\n \n \n def evaluate(self, num_iteration=5):\n rets = []\n maxtimestep = 1000\n for num in range(0, num_iteration):\n obs_list = []\n obs = np.zeros(self.obs_dim * self.stacksize)\n \n obs_ = self.env.reset()\n obs_list.append(obs_)\n\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[- self.obs_dim:] = obs_\n\n done = False\n t = 0\n ret = 0.\n \n while not done and t < maxtimestep:\n if self.standardize:\n obs = (obs - self.obs_mean[0]) / self.obs_std[0]\n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n action = self.policy(obs).mean.cpu().detach().numpy()\n \n next_obs, rew, done, _ = self.env.step(action)\n ret += rew\n \n obs_ = next_obs \n obs_list.append(obs_)\n\n if len(obs_list) < self.stacksize:\n obs_ = np.concatenate(obs_list)\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[-(len(obs_list)) * self.obs_dim:] = obs_\n \n else:\n obs = np.concatenate(obs_list[-self.stacksize:])\n \n t += 1\n \n rets.append(ret)\n \n return np.mean(rets), np.std(rets)" }, { "identifier": "RAP", "path": "imitation/rap.py", "snippet": "class RAP(nn.Module):\n # Implementation of Residual Action Prediction (ECCV 2022)\n # - https://arxiv.org/pdf/2207.09705.pdf\n def __init__(self, policy, env, best_policy=None,\n replay_buffer=None, replay_buffer_valid=None, seed=0, \n device='cpu', lr=3e-4, wandb=None, save_policy_path=None, \n obs_dim=1, action_dim=1, embedding_dim=1, stacksize=1, standardize=False\n ):\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n \n super(RAP, self).__init__()\n\n self.env = env\n self.policy = policy\n self.best_policy = best_policy\n self.replay_buffer = replay_buffer\n self.replay_buffer_valid = replay_buffer_valid\n \n self.device = device\n \n self.m_embedding_optimizer = optim.Adam(policy.history_embedding_params, lr=lr)\n self.h_embedding_optimizer = optim.Adam(policy.single_embedding_params, lr=lr)\n self.policy_optimizer = optim.Adam(policy.policy_params, lr=lr)\n self.residual_optimizer = optim.Adam(policy.residual_params, lr=lr)\n\n self.num_eval_iteration = 50 \n \n self.wandb = None\n if wandb:\n self.wandb = wandb\n self.wandb.init()\n\n self.save_policy_path = save_policy_path\n\n self.obs_dim = obs_dim\n self.action_dim = action_dim\n self.embedding_dim = embedding_dim\n self.stacksize = stacksize\n \n self.standardize = standardize\n\n self.obs_mean_tt = torch.tensor(self.replay_buffer.obs_mean, device=device)\n self.obs_std_tt = torch.tensor(self.replay_buffer.obs_std, device=device)\n self.act_mean_tt = torch.tensor(self.replay_buffer.act_mean, device=device)\n self.act_std_tt = torch.tensor(self.replay_buffer.act_std, device=device)\n\n self.obs_mean = self.replay_buffer.obs_mean\n self.obs_std = self.replay_buffer.obs_std\n self.act_mean = self.replay_buffer.act_mean\n self.act_std = self.replay_buffer.act_std\n \n\n def train(self, total_iteration=1e6, eval_freq=1000, batch_size=1024, num_valid=2000):\n \n max_score = -100000. \n min_loss = 100000. \n \n batch_valid = self.replay_buffer_valid.get_batch(num_valid, standardize=self.standardize)\n \n obs_valid = batch_valid['observations']\n actions_valid = batch_valid['actions'][:, -self.action_dim:]\n prev_actions_valid = batch_valid['actions'][:, :-self.action_dim] \n \n obs_valid = torch.tensor(obs_valid, dtype=torch.float32, device=self.device)\n actions_valid = torch.tensor(actions_valid, dtype=torch.float32, device=self.device)\n prev_actions_valid = torch.tensor(prev_actions_valid, dtype=torch.float32, device=self.device) \n \n for num in range(0, int(total_iteration)):\n batch = self.replay_buffer.random_batch(batch_size, standardize=self.standardize)\n \n obs = batch['observations']\n actions = batch['actions'][:, -self.action_dim:]\n prev_actions = batch['actions'][:, :-self.action_dim]\n \n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n actions = torch.tensor(actions, dtype=torch.float32, device=self.device)\n prev_actions = torch.tensor(prev_actions, dtype=torch.float32, device=self.device)\n\n self.m_embedding_optimizer.zero_grad()\n self.residual_optimizer.zero_grad() \n \n # m : history embedding, h : single observation embedding\n m, _ = self.policy.forward_embedding(obs) \n action_residuals = actions - prev_actions\n action_residual_pred = self.policy.forward_residual_from_m(m)\n \n train_residual_loss = torch.mean((action_residual_pred - action_residuals) ** 2)\n train_residual_loss.backward()\n \n self.m_embedding_optimizer.step()\n self.residual_optimizer.step() \n \n self.policy_optimizer.zero_grad() \n self.h_embedding_optimizer.zero_grad() \n \n m, h = self.policy.forward_embedding(obs)\n \n # we follow the original implementation that stop-gradient layer on m ; \n # see `forward_policy_from_embedding` method for detail. (m.detach() in input)\n train_neg_likelihood = -self.policy.log_prob_policy_from_m_h(m, h, actions).mean()\n train_neg_likelihood.backward()\n \n self.policy_optimizer.step()\n self.h_embedding_optimizer.step()\n \n if (num+1) % eval_freq == 0: \n valid_m, valid_h = self.policy.forward_embedding(obs_valid) \n valid_action_residuals = actions_valid - prev_actions_valid\n valid_action_residual_pred = self.policy.forward_residual_from_m(valid_m)\n \n valid_policy_neg_likelihood = -self.policy.log_prob_policy_from_m_h(valid_m, valid_h, actions_valid).mean()\n valid_residual_loss = torch.mean((valid_action_residual_pred - valid_action_residuals) ** 2) \n \n valid_loss = valid_policy_neg_likelihood + valid_residual_loss\n \n policy_action_valid = self.policy(obs_valid).sample() \n \n train_mh = torch.cat([m,h], dim=-1)\n valid_mh = torch.cat([valid_m, valid_h], dim=-1)\n \n hscic_estimate = estimate_hscic(X=train_mh, Y=prev_actions, Z=actions, ridge_lambda=1e-5)\n valid_hscic_estimate = estimate_hscic(X=valid_mh, Y=prev_actions_valid, Z=actions_valid, ridge_lambda=1e-5)\n valid_hscic_estimate_action = estimate_hscic(X=policy_action_valid, Y=prev_actions_valid, Z=actions_valid, ridge_lambda=1e-5) \n train_hscic_m_a_given_aprev = estimate_hscic(X=m, Y=actions, Z=prev_actions, ridge_lambda=1e-5)\n valid_hscic_m_a_given_aprev = estimate_hscic(X=valid_m, Y=actions_valid, Z=prev_actions_valid, ridge_lambda=1e-5)\n \n eval_ret_mean, eval_ret_std = self.evaluate(num_iteration=self.num_eval_iteration)\n \n train_loss = train_neg_likelihood + train_residual_loss\n \n print(f'** iter{num+1}: train_loss={train_loss.item()}, nll={train_neg_likelihood}, residual_loss={train_residual_loss}, eval_ret={eval_ret_mean}+-{eval_ret_std}')\n print(f' valid_loss={valid_loss.item()}, valid_nll={valid_policy_neg_likelihood}, valid_residual_loss={valid_residual_loss}')\n \n print(f'** HSCIC(mh, a_prev | a_current) : (train){hscic_estimate:.6f} (valid){valid_hscic_estimate:.6f} (valid,action){valid_hscic_estimate_action:.6f}')\n print(f'** HSCIC(m, a_current | a_prev) : (train){train_hscic_m_a_given_aprev:.6f} (valid){valid_hscic_m_a_given_aprev:.6f} ')\n \n if self.wandb:\n self.wandb.log({\n 'train_total_loss': train_loss.item(),\n 'valid_total_loss': valid_loss.item(),\n 'train_neg_likelihood': train_neg_likelihood.item(),\n 'valid_neg_likelihood': valid_policy_neg_likelihood.item(),\n 'train_mean_hscic(rep,prev|target)': hscic_estimate,\n 'valid_mean_hscic(rep,prev|target)': valid_hscic_estimate,\n 'valid_mean_hscic(act,prev|target)': valid_hscic_estimate_action,\n 'train_residual_loss': train_residual_loss,\n 'valid_residual_loss': valid_residual_loss,\n 'train_mean_hscic(m,target|prev)': train_hscic_m_a_given_aprev,\n 'valid_mean_hscic(m,target|prev)': valid_hscic_m_a_given_aprev,\n 'eval_episode_return': eval_ret_mean\n }, step=num+1)\n\n if eval_ret_mean > max_score:\n print(f'** max score ')\n max_score = eval_ret_mean\n copy_nn_module(self.policy, self.best_policy)\n \n if self.save_policy_path:\n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_best.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.best_policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_best.pt')\n \n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_last.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_last.pt')\n \n \n def evaluate(self, num_iteration=5):\n rets = []\n maxtimestep = 1000\n for num in range(0, num_iteration):\n obs_list = []\n obs = np.zeros(self.obs_dim * self.stacksize)\n \n obs_ = self.env.reset()\n obs_list.append(obs_)\n\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[- self.obs_dim:] = obs_\n\n done = False\n t = 0\n ret = 0.\n \n while not done and t < maxtimestep:\n # obs = obs[:true_obs_dim]\n if self.standardize:\n obs = (obs - self.obs_mean[0]) / self.obs_std[0]\n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n action = self.policy(obs).mean.cpu().detach().numpy()[0]\n next_obs, rew, done, env_info = self.env.step(action)\n ret += rew\n \n obs_ = next_obs \n obs_list.append(obs_)\n\n if len(obs_list) < self.stacksize:\n obs_ = np.concatenate(obs_list)\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[-(len(obs_list)) * self.obs_dim:] = obs_\n \n else:\n obs = np.concatenate(obs_list[-self.stacksize:])\n \n t += 1\n \n rets.append(ret)\n \n return np.mean(rets), np.std(rets)" }, { "identifier": "FCA", "path": "imitation/fca.py", "snippet": "class FCA(nn.Module):\n def __init__(self, policy, env, best_policy=None,\n replay_buffer=None, replay_buffer_valid=None, seed=0, \n device='cpu', lr=3e-4, wandb=None, save_policy_path=None, \n obs_dim=1, action_dim=1, stacksize=1, standardize=True,\n embedding_dim=1, entropy_hidden_size=300, entropy_lr=1e-4, reg_coef=1e-5, info_bottleneck_loss_coef=0.001, \n ):\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n \n super(FCA, self).__init__()\n\n self.env = env\n self.policy = policy\n self.best_policy = best_policy\n self.replay_buffer = replay_buffer\n self.replay_buffer_valid = replay_buffer_valid \n self.device = device\n \n self.obs_dim = obs_dim\n self.action_dim = action_dim\n self.embedding_dim = embedding_dim\n self.stacksize = stacksize \n\n # Additional Network for Conditional Entropy (FCA)\n self.entropy_input_size = embedding_dim + action_dim\n self.entropy_hidden_size = entropy_hidden_size\n self.entropy_net = nn.Sequential(\n nn.Linear(self.entropy_input_size, self.entropy_hidden_size, device=self.device),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(self.entropy_hidden_size, action_dim, device=self.device)\n )\n \n # FCA Hyperparameters\n self.entropy_coef = reg_coef \n self.info_bottleneck_loss_coef = info_bottleneck_loss_coef \n \n self.embedding_optimizer = optim.Adam(policy.embedding_params, lr=lr)\n self.policy_optimizer = optim.Adam(policy.policy_params, lr=lr)\n self.entropy_optimizer = optim.Adam(self.entropy_net.parameters(), lr=entropy_lr)\n\n self.num_eval_iteration = 50\n \n self.wandb = None\n if wandb:\n self.wandb = wandb\n self.wandb.init()\n\n self.save_policy_path = save_policy_path\n\n # For standardization\n self.standardize = standardize\n\n self.obs_mean_tt = torch.tensor(self.replay_buffer.obs_mean, device=device)\n self.obs_std_tt = torch.tensor(self.replay_buffer.obs_std, device=device)\n self.act_mean_tt = torch.tensor(self.replay_buffer.act_mean, device=device)\n self.act_std_tt = torch.tensor(self.replay_buffer.act_std, device=device)\n\n self.obs_mean = self.replay_buffer.obs_mean\n self.obs_std = self.replay_buffer.obs_std\n self.act_mean = self.replay_buffer.act_mean\n self.act_std = self.replay_buffer.act_std \n\n def train(self, total_iteration=1e6, eval_freq=1000, batch_size=1024, num_valid=2000, inner_steps=1):\n \n max_score = -100000. \n min_loss = 100000. \n \n batch_valid = self.replay_buffer_valid.get_batch(num_valid, standardize=self.standardize)\n \n obs_valid = batch_valid['observations']\n actions_valid = batch_valid['actions'][:, -self.action_dim:]\n prev_actions_valid = batch_valid['actions'][:, :-self.action_dim] \n \n obs_valid = torch.tensor(obs_valid, dtype=torch.float32, device=self.device)\n actions_valid = torch.tensor(actions_valid, dtype=torch.float32, device=self.device)\n prev_actions_valid = torch.tensor(prev_actions_valid, dtype=torch.float32, device=self.device) \n \n for num in range(0, int(total_iteration)):\n batch = self.replay_buffer.random_batch(batch_size, standardize=self.standardize)\n \n obs = batch['observations']\n actions = batch['actions'][:, -self.action_dim:]\n prev_actions = batch['actions'][:, :-self.action_dim]\n \n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n actions = torch.tensor(actions, dtype=torch.float32, device=self.device)\n prev_actions = torch.tensor(prev_actions, dtype=torch.float32, device=self.device)\n\n # conditional entropy input : H(a_{t-1}| a_{t}, varphi_t)\n h = self.policy.forward_embedding(obs)\n expert_action_and_h = torch.cat([actions, h], dim=-1) \n \n self.policy_optimizer.zero_grad()\n self.embedding_optimizer.zero_grad()\n self.entropy_optimizer.zero_grad()\n\n if self.entropy_coef > 0.:\n neg_likelihood = -self.policy.log_prob_policy_from_embedding(h, actions).mean()\n info_bottleneck_loss = 0.5 * (h ** 2).sum()\n\n # prev_actions = torch.tensor(prev_actions, dtype=torch.float32, device=self.device)\n pred_prev_actions = self.entropy_net(expert_action_and_h) \n entropy_loss = torch.mean((pred_prev_actions - prev_actions) ** 2) \n\n train_loss = neg_likelihood \\\n - self.entropy_coef * entropy_loss \\\n + self.info_bottleneck_loss_coef * info_bottleneck_loss\n \n train_loss.backward() # backprop embedding\n \n self.policy_optimizer.step()\n self.embedding_optimizer.step()\n\n # conditional entropy training\n for _ in range(inner_steps):\n batch = self.replay_buffer.random_batch(batch_size, standardize=self.standardize)\n \n obs = batch['observations']\n actions = batch['actions'][:, -self.action_dim:]\n prev_actions = batch['actions'][:, :-self.action_dim]\n \n obs = torch.tensor(obs, dtype=torch.float32, device=self.device) \n actions = torch.tensor(actions, dtype=torch.float32, device=self.device)\n \n h = self.policy.forward_embedding(obs)\n expert_action_and_h = torch.cat([actions, h], dim=-1) \n\n prev_actions = torch.tensor(prev_actions, dtype=torch.float32, device=self.device) \n pred_prev_actions = self.entropy_net(expert_action_and_h.detach())\n\n entropy_loss = torch.mean((pred_prev_actions - prev_actions) ** 2)\n \n self.entropy_optimizer.zero_grad()\n entropy_loss.backward()\n self.entropy_optimizer.step()\n\n else:\n neg_likelihood = -self.policy.log_prob_policy_from_embedding(h, actions).mean()\n info_bottleneck_loss = 0.5 * (h ** 2).sum()\n \n train_loss = neg_likelihood + self.info_bottleneck_loss_coef * info_bottleneck_loss \n \n train_loss.backward()\n \n self.policy_optimizer.step()\n self.embedding_optimizer.step() \n \n\n if (num+1) % eval_freq == 0: \n h_valid = self.policy.forward_embedding(obs_valid)\n valid_info_bottleneck_loss = 0.5 * (h_valid ** 2).sum()\n \n if self.entropy_coef > 0:\n expert_action_and_h_valid = torch.cat([actions_valid, h_valid], dim=-1) \n pred_prev_actions_valid = self.entropy_net(expert_action_and_h_valid)\n \n prev_actions_valid = batch_valid['actions'][:, :-self.action_dim]\n prev_actions_valid = torch.tensor(prev_actions_valid, dtype=torch.float32, device=self.device)\n \n valid_entropy_loss = torch.mean((pred_prev_actions_valid - prev_actions_valid) ** 2)\n else:\n valid_entropy_loss = 0.\n \n valid_neg_likelihood = - self.policy.log_prob(obs_valid, actions_valid).mean()\n \n valid_loss = valid_neg_likelihood \\\n - self.entropy_coef * valid_entropy_loss \\\n + self.info_bottleneck_loss_coef * valid_info_bottleneck_loss\n \n policy_action_valid = self.policy(obs_valid).sample() \n h_train = self.policy.forward_embedding(obs)\n \n hscic_estimate = estimate_hscic(X=h_train, Y=prev_actions, Z=actions, ridge_lambda=1e-5)\n valid_hscic_estimate = estimate_hscic(X=h_valid, Y=prev_actions_valid, Z=actions_valid, ridge_lambda=1e-5)\n valid_hscic_estimate_action = estimate_hscic(X=policy_action_valid, Y=prev_actions_valid, Z=actions_valid, ridge_lambda=1e-5)\n \n eval_ret_mean, eval_ret_std = self.evaluate(num_iteration=self.num_eval_iteration)\n \n print(f'** iter{num+1}: entropy_loss={entropy_loss}, train_loss={train_loss.item()}, eval_ret={eval_ret_mean}+-{eval_ret_std} ')\n print(f'** HSCIC : (train){hscic_estimate:.6f} (valid){valid_hscic_estimate:.6f} (valid,action){valid_hscic_estimate_action:.6f}')\n \n if self.wandb:\n self.wandb.log({\n 'train_total_loss': train_loss.item(), \n 'valid_total_loss': valid_loss.item(),\n 'train_neg_likelihood': neg_likelihood.item(), \n 'valid_neg_likelihood': valid_neg_likelihood.item(),\n 'train_mean_hscic(rep,prev|target)': hscic_estimate,\n 'valid_mean_hscic(rep,prev|target)': valid_hscic_estimate,\n 'valid_mean_hscic(act,prev|target)': valid_hscic_estimate_action,\n 'valid_entropy_loss': entropy_loss, \n 'valid_IB_loss': info_bottleneck_loss.item(),\n 'eval_episode_return': eval_ret_mean\n }, step=num+1)\n\n if eval_ret_mean > max_score:\n print(f'** max score record! ')\n max_score = eval_ret_mean\n copy_nn_module(self.policy, self.best_policy)\n \n if self.save_policy_path:\n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_best.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.best_policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_best.pt')\n \n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_last.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_last.pt')\n \n \n def evaluate(self, num_iteration=5):\n rets = []\n maxtimestep = 1000\n for num in range(0, num_iteration):\n obs_list = []\n obs = np.zeros(self.obs_dim * self.stacksize)\n \n obs_ = self.env.reset()\n obs_list.append(obs_)\n\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[- self.obs_dim:] = obs_\n\n done = False\n t = 0\n ret = 0.\n \n while not done and t < maxtimestep: \n if self.standardize:\n obs = (obs - self.obs_mean[0]) / self.obs_std[0]\n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n action = self.policy(obs).mean.cpu().detach().numpy()\n \n next_obs, rew, done, _ = self.env.step(action)\n ret += rew\n \n obs_ = next_obs \n obs_list.append(obs_)\n\n if len(obs_list) < self.stacksize:\n obs_ = np.concatenate(obs_list)\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[-(len(obs_list)) * self.obs_dim:] = obs_\n \n else:\n obs = np.concatenate(obs_list[-self.stacksize:])\n \n t += 1\n \n rets.append(ret)\n \n return np.mean(rets), np.std(rets)" }, { "identifier": "MINE_BC", "path": "imitation/mine.py", "snippet": "class MINE_BC(nn.Module):\n def __init__(self, policy, env, best_policy=None,\n replay_buffer=None, replay_buffer_valid=None, seed=0, \n device='cpu', lr=3e-4, wandb=None, save_policy_path=None, \n obs_dim=1, action_dim=1, stacksize=1, standardize=True,\n embedding_dim=1, mine_lr=1e-4, reg_coef=1e-5, info_bottleneck_loss_coef=0.001, \n ):\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n \n super(MINE_BC, self).__init__()\n\n self.env = env\n self.policy = policy\n self.best_policy = best_policy\n self.replay_buffer = replay_buffer\n self.replay_buffer_valid = replay_buffer_valid\n self.device = device\n \n self.obs_dim = obs_dim\n self.action_dim = action_dim\n self.embedding_dim = embedding_dim\n self.stacksize = stacksize\n \n # Additional Network for MINE Neural Estimator\n self.mine = MINE_DV(action_dim, action_dim + embedding_dim, device=device)\n \n # MINE-BC Hyperparameters\n self.reg_coef = reg_coef\n self.info_bottleneck_loss_coef = info_bottleneck_loss_coef\n\n self.embedding_optimizer = optim.Adam(policy.embedding_params, lr=lr)\n self.policy_optimizer = optim.Adam(policy.policy_params, lr=lr)\n self.mine_optimizer = optim.Adam(self.mine.parameters(), lr=mine_lr)\n \n self.num_eval_iteration = 50\n \n self.wandb = None\n if wandb:\n self.wandb = wandb\n self.wandb.init()\n\n self.save_policy_path = save_policy_path\n\n # For standardization \n self.standardize = standardize\n\n self.obs_mean_tt = torch.tensor(self.replay_buffer.obs_mean, device=device)\n self.obs_std_tt = torch.tensor(self.replay_buffer.obs_std, device=device)\n self.act_mean_tt = torch.tensor(self.replay_buffer.act_mean, device=device)\n self.act_std_tt = torch.tensor(self.replay_buffer.act_std, device=device)\n\n self.obs_mean = self.replay_buffer.obs_mean\n self.obs_std = self.replay_buffer.obs_std\n self.act_mean = self.replay_buffer.act_mean\n self.act_std = self.replay_buffer.act_std\n \n def train(self, total_iteration=1e6, eval_freq=1000, batch_size=1024, num_valid=2000, inner_steps=1):\n \n min_loss = 100000.\n max_score = -100000.\n \n batch_valid = self.replay_buffer_valid.get_batch(num_valid, standardize=self.standardize)\n \n obs_valid = batch_valid['observations']\n actions_valid = batch_valid['actions'][:, -self.action_dim:]\n prev_actions_valid = batch_valid['actions'][:, :-self.action_dim] \n \n obs_valid = torch.tensor(obs_valid, dtype=torch.float32, device=self.device)\n actions_valid = torch.tensor(actions_valid, dtype=torch.float32, device=self.device)\n prev_actions_valid = torch.tensor(prev_actions_valid, dtype=torch.float32, device=self.device)\n \n for num in range(0, int(total_iteration)):\n batch = self.replay_buffer.random_batch(batch_size, standardize=self.standardize)\n \n obs = batch['observations']\n actions = batch['actions'][:, -self.action_dim:]\n prev_actions = batch['actions'][:, :-self.action_dim]\n \n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n actions = torch.tensor(actions, dtype=torch.float32, device=self.device)\n prev_actions = torch.tensor(prev_actions, dtype=torch.float32, device=self.device)\n\n # MINE : I (a_{t-1}; a_{t}, varphi_t)\n h = self.policy.forward_embedding(obs)\n expert_action_and_h = torch.cat([actions, h], dim=-1)\n \n self.policy_optimizer.zero_grad()\n self.embedding_optimizer.zero_grad()\n self.mine_optimizer.zero_grad()\n\n if self.reg_coef > 0:\n neg_likelihood = -self.policy.log_prob_policy_from_embedding(h, actions).mean()\n info_bottleneck_loss = 0.5 * (h ** 2).sum()\n mi_estimate = self.mine.get_mi_bound(prev_actions, expert_action_and_h, update_ema=False)\n\n train_loss = neg_likelihood \\\n + self.reg_coef * mi_estimate \\\n + self.info_bottleneck_loss_coef * info_bottleneck_loss\n \n train_loss.backward()\n \n self.policy_optimizer.step()\n self.embedding_optimizer.step()\n\n # MINE training\n for _ in range(inner_steps):\n batch = self.replay_buffer.random_batch(batch_size, standardize=self.standardize)\n\n obs = batch['observations']\n actions = batch['actions'][:, -self.action_dim:]\n prev_actions = batch['actions'][:, :-self.action_dim]\n \n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n actions = torch.tensor(actions, dtype=torch.float32, device=self.device)\n \n h = self.policy.forward_embedding(obs)\n expert_action_and_h = torch.cat([actions, h], dim=-1)\n \n prev_actions = torch.tensor(prev_actions, dtype=torch.float32, device=self.device)\n \n mine_loss = -self.mine.get_mi_bound(prev_actions, expert_action_and_h.detach(), update_ema=True)\n\n self.mine_optimizer.zero_grad()\n mine_loss.backward()\n self.mine_optimizer.step()\n\n else:\n neg_likelihood = -self.policy.log_prob_policy_from_embedding(h, actions).mean()\n info_bottleneck_loss = 0.5 * (h ** 2).sum()\n \n train_loss = neg_likelihood + self.info_bottleneck_loss_coef * info_bottleneck_loss \n \n train_loss.backward()\n \n self.policy_optimizer.step()\n self.embedding_optimizer.step()\n \n\n if (num+1) % eval_freq == 0:\n h_valid = self.policy.forward_embedding(obs_valid)\n valid_info_bottleneck_loss = 0.5 * (h_valid ** 2).sum()\n \n if self.reg_coef > 0:\n expert_action_and_h_valid = torch.cat([actions_valid, h_valid], dim=-1) \n valid_mi_estimate = self.mine.get_mi_bound(prev_actions_valid, expert_action_and_h_valid, update_ema=False)\n else:\n valid_mi_estimate = 0.\n \n valid_neg_likelihood = -self.policy.log_prob(obs_valid, actions_valid).mean()\n\n valid_loss = valid_neg_likelihood \\\n + self.reg_coef * valid_mi_estimate \\\n + self.info_bottleneck_loss_coef * valid_info_bottleneck_loss\n \n policy_action_valid = self.policy(obs_valid).sample() \n h_train = self.policy.forward_embedding(obs)\n \n hscic_estimate = estimate_hscic(X=h_train, Y=prev_actions, Z=actions, ridge_lambda=1e-5)\n valid_hscic_estimate = estimate_hscic(X=h_valid, Y=prev_actions_valid, Z=actions_valid, ridge_lambda=1e-5)\n valid_hscic_estimate_action = estimate_hscic(X=policy_action_valid, Y=prev_actions_valid, Z=actions_valid, ridge_lambda=1e-5)\n \n eval_ret_mean, eval_ret_std = self.evaluate(num_iteration=self.num_eval_iteration)\n \n print(f'** iter{num+1}: mine_loss={-mi_estimate.cpu().item()}, train_loss={train_loss.item()}, eval_ret={eval_ret_mean}+-{eval_ret_std} ')\n print(f'** HSCIC : (train){hscic_estimate:.6f} (valid){valid_hscic_estimate:.6f} (valid,action){valid_hscic_estimate_action:.6f}')\n \n if self.wandb:\n self.wandb.log({\n 'train_total_loss': train_loss.cpu().item(),\n 'valid_total_loss': valid_loss.cpu().item(),\n 'train_neg_likelihood': neg_likelihood.cpu().item(),\n 'valid_neg_likelihood': valid_neg_likelihood.cpu().item(),\n 'train_mean_hscic(rep,prev|target)': hscic_estimate,\n 'valid_mean_hscic(rep,prev|target)': valid_hscic_estimate,\n 'valid_mean_hscic(act,prev|target)': valid_hscic_estimate_action,\n 'valid_mine_loss': -mi_estimate.cpu().item(),\n 'valid_IB_loss': info_bottleneck_loss.cpu().item(),\n 'eval_episode_return': eval_ret_mean\n }, step=num+1)\n\n if eval_ret_mean > max_score:\n print(f'** max score record! ')\n max_score = eval_ret_mean\n copy_nn_module(self.policy, self.best_policy)\n \n if self.save_policy_path:\n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_best.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.best_policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_best.pt')\n \n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_last.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_last.pt')\n \n \n def evaluate(self, num_iteration=5):\n rets = []\n maxtimestep = 1000\n for num in range(0, num_iteration):\n obs_list = []\n obs = np.zeros(self.obs_dim * self.stacksize)\n \n obs_ = self.env.reset()\n obs_list.append(obs_)\n\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[- self.obs_dim:] = obs_\n\n done = False\n t = 0\n ret = 0.\n \n while not done and t < maxtimestep: \n if self.standardize:\n obs = (obs - self.obs_mean[0]) / self.obs_std[0]\n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n action = self.policy(obs).mean.cpu().detach().numpy()\n next_obs, rew, done, _ = self.env.step(action)\n ret += rew\n \n obs_ = next_obs \n obs_list.append(obs_)\n\n if len(obs_list) < self.stacksize:\n obs_ = np.concatenate(obs_list)\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[-(len(obs_list)) * self.obs_dim:] = obs_\n \n else:\n obs = np.concatenate(obs_list[-self.stacksize:])\n \n t += 1\n \n rets.append(ret)\n \n return np.mean(rets), np.std(rets)" }, { "identifier": "PALR", "path": "imitation/palr.py", "snippet": "class PALR(nn.Module):\n def __init__(self, policy, env, best_policy=None,\n replay_buffer=None, replay_buffer_valid=None, seed=0, \n device='cpu', lr=3e-4, wandb=None, save_policy_path=None, \n obs_dim=1, action_dim=1, stacksize=1, standardize=True,\n reg_coef=0.01, ridge_lambda=1e-3):\n \n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n \n super(PALR, self).__init__()\n\n self.env = env\n self.policy = policy\n self.best_policy = best_policy\n self.replay_buffer = replay_buffer\n self.replay_buffer_valid = replay_buffer_valid\n self.device = device\n\n self.obs_dim = obs_dim\n self.action_dim = action_dim\n self.stacksize = stacksize\n \n self.policy_optimizer = optim.Adam(self.policy.parameters(), lr=lr) \n \n self.num_eval_iteration = 50\n \n self.wandb = None\n if wandb:\n self.wandb = wandb\n self.wandb.init()\n\n self.save_policy_path = save_policy_path\n \n # HSCIC Hyperparameters\n self.reg_coef = reg_coef\n self.ridge_lambda = ridge_lambda\n \n # For standardization\n self.standardize = standardize\n\n self.obs_mean_tt = torch.tensor(self.replay_buffer.obs_mean, device=device)\n self.obs_std_tt = torch.tensor(self.replay_buffer.obs_std, device=device)\n self.act_mean_tt = torch.tensor(self.replay_buffer.act_mean, device=device)\n self.act_std_tt = torch.tensor(self.replay_buffer.act_std, device=device)\n\n self.obs_mean = self.replay_buffer.obs_mean\n self.obs_std = self.replay_buffer.obs_std\n self.act_mean = self.replay_buffer.act_mean\n self.act_std = self.replay_buffer.act_std\n \n\n def train(self, total_iteration=1e6, eval_freq=1000, batch_size=1024, num_valid=2000):\n \n min_loss = 100000.\n max_score = -100000.\n \n batch_valid = self.replay_buffer_valid.get_batch(num_valid, standardize=self.standardize)\n \n obs_valid = batch_valid['observations'] \n actions_valid = batch_valid['actions'][:, -self.action_dim:]\n prev_expert_action_valid = batch_valid['actions'][:, :-self.action_dim]\n \n obs_valid = torch.tensor(obs_valid, dtype=torch.float32, device=self.device)\n actions_valid = torch.tensor(actions_valid, dtype=torch.float32, device=self.device)\n prev_expert_action_valid = torch.tensor(prev_expert_action_valid, dtype=torch.float32, device=self.device)\n \n for num in range(0, int(total_iteration)):\n batch = self.replay_buffer.random_batch(batch_size, standardize=self.standardize)\n\n obs = batch['observations']\n actions = batch['actions'][:, -self.action_dim:]\n prev_expert_action = batch['actions'][:, :-self.action_dim]\n \n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n actions = torch.tensor(actions, dtype=torch.float32, device=self.device)\n prev_expert_action = torch.tensor(prev_expert_action, dtype=torch.float32, device=self.device)\n\n neg_likelihood = - self.policy.log_prob(obs, actions).mean() \n policy_action = self.policy(obs).rsample()\n \n if self.reg_coef != 0: \n policy_embedding = self.policy.forward_embedding(obs)\n if self.standardize:\n Y_std = (prev_expert_action - self.act_mean_tt[0, :-self.action_dim])/ self.act_std_tt[0, :-self.action_dim]\n Z_std = (actions - self.act_mean_tt[0, -self.action_dim:])/ self.act_std_tt[0, -self.action_dim:]\n\n Y_std = Y_std.to(torch.float32)\n Z_std = Z_std.to(torch.float32)\n else:\n Y_std = prev_expert_action\n Z_std = actions\n \n hscic_estimate = estimate_hscic(X=policy_embedding, Y=Y_std, Z=Z_std, ridge_lambda=self.ridge_lambda)\n \n else:\n hscic_estimate = 0.\n \n train_loss = neg_likelihood + self.reg_coef * hscic_estimate \n\n self.policy_optimizer.zero_grad()\n train_loss.backward()\n self.policy_optimizer.step()\n\n if (num+1) % eval_freq == 0:\n policy_action = self.policy(obs).sample()\n policy_action_valid = self.policy(obs_valid).sample()\n \n # Train data HSCIC (for debugging) \n policy_embedding = self.policy.forward_embedding(obs)\n if self.standardize:\n Y_std = (prev_expert_action - self.act_mean_tt[0, :-self.action_dim])/ self.act_std_tt[0, :-self.action_dim]\n Z_std = (actions - self.act_mean_tt[0, -self.action_dim:])/ self.act_std_tt[0, -self.action_dim:]\n p_std = (policy_action - self.act_mean_tt[0, -self.action_dim:])/ self.act_std_tt[0, -self.action_dim:]\n\n Y_std = Y_std.to(torch.float32)\n Z_std = Z_std.to(torch.float32)\n p_std = p_std.to(torch.float32)\n \n else:\n Y_std = prev_expert_action\n Z_std = actions\n p_std = policy_action\n \n hscic_estimate = estimate_hscic(X=policy_embedding, Y=Y_std, Z=Z_std, ridge_lambda=self.ridge_lambda)\n \n policy_embedding_valid = self.policy.forward_embedding(obs_valid)\n if self.standardize:\n Y_std = (prev_expert_action_valid - self.act_mean_tt[0, :-self.action_dim])/ self.act_std_tt[0, :-self.action_dim]\n Z_std = (actions_valid - self.act_mean_tt[0, -self.action_dim:])/ self.act_std_tt[0, -self.action_dim:]\n\n Y_std = Y_std.to(torch.float32)\n Z_std = Z_std.to(torch.float32)\n else:\n Y_std = prev_expert_action_valid\n Z_std = actions_valid\n p_std = policy_action\n \n valid_hscic_estimate = estimate_hscic(X=policy_embedding_valid, Y=Y_std, Z=Z_std, ridge_lambda=self.ridge_lambda) \n valid_hscic_estimate_action = estimate_hscic(X=policy_action_valid, Y=prev_expert_action_valid, Z=actions_valid, ridge_lambda=self.ridge_lambda)\n\n valid_neg_likelihood = -self.policy.log_prob(obs_valid, actions_valid).mean()\n valid_loss = valid_neg_likelihood + self.reg_coef * valid_hscic_estimate\n\n eval_ret_mean, eval_ret_std = self.evaluate(num_iteration=self.num_eval_iteration)\n \n print(f'** iter{num+1}: train_policy_loss={train_loss.item():.2f}, val_policy_loss={valid_loss.item():.2f}, eval_ret={eval_ret_mean:.2f}+-{eval_ret_std:.2f}',)\n print(f'** HSCIC : (train){hscic_estimate:.6f} (valid){valid_hscic_estimate:.6f} (valid,action){valid_hscic_estimate_action:.6f}')\n \n if self.wandb:\n self.wandb.log({'train_total_loss': train_loss.item(), \n 'valid_total_loss': valid_loss.item(),\n 'train_neg_likelihood': neg_likelihood.item(),\n 'valid_neg_likelihood': valid_neg_likelihood.item(),\n 'train_mean_hscic(rep,prev|target)': hscic_estimate,\n 'valid_mean_hscic(rep,prev|target)': valid_hscic_estimate,\n 'valid_mean_hscic(act,prev|target)': valid_hscic_estimate_action,\n 'eval_episode_return': eval_ret_mean\n }, step=num+1)\n\n if eval_ret_mean > max_score:\n print(f'** max score record! ')\n max_score = eval_ret_mean\n copy_nn_module(self.policy, self.best_policy)\n \n if self.save_policy_path:\n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_best.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.best_policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_best.pt')\n \n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_last.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_last.pt')\n \n def evaluate(self, num_iteration=5):\n rets = []\n maxtimestep = 1000\n for num in range(0, num_iteration):\n obs_list = []\n obs = np.zeros(self.obs_dim * self.stacksize)\n \n obs_ = self.env.reset()\n obs_list.append(obs_)\n\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[- self.obs_dim:] = obs_\n\n done = False\n t = 0\n ret = 0.\n \n while not done and t < maxtimestep:\n if self.standardize:\n obs = (obs - self.obs_mean[0]) / self.obs_std[0]\n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n action = self.policy(obs).mean.cpu().detach().numpy()\n \n next_obs, rew, done, _ = self.env.step(action)\n ret += rew\n \n obs_ = next_obs \n obs_list.append(obs_)\n\n if len(obs_list) < self.stacksize:\n obs_ = np.concatenate(obs_list)\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[-(len(obs_list)) * self.obs_dim:] = obs_\n \n else:\n obs = np.concatenate(obs_list[-self.stacksize:])\n \n t += 1\n \n rets.append(ret)\n \n return np.mean(rets), np.std(rets)" }, { "identifier": "TanhGaussianPolicyWithEmbedding", "path": "core/policy.py", "snippet": "class TanhGaussianPolicyWithEmbedding(TorchStochasticPolicy):\n \"\"\"\n Reference : \n https://github.com/AlvinWen428/fighting-copycat-agents/blob/52dabfd8b1c42e50f31d84bd431915aad62e09cb/imitation_learning/models/gan_model/__init__.py#L9\n \n Usage:\n\n ```\n policy = TanhGaussianPolicy(...)\n \"\"\"\n\n def __init__(\n self,\n obs_dim,\n action_dim,\n embedding_dim,\n embedding_hidden_size,\n policy_hidden_size, \n policy_std=None,\n disc_std=None,\n init_w=1e-3,\n device='cpu',\n hidden_activation=F.leaky_relu, \n layer_norm=False,\n **kwargs\n ):\n if device =='cuda':\n ptu.set_gpu_mode(True)\n self.device = device\n \n super(TanhGaussianPolicyWithEmbedding, self).__init__()\n # hidden_sizes,\n # input_size=obs_dim,\n # output_size=action_dim,\n # init_w=init_w,\n # device=device,\n # **kwargs\n # )\n\n self.input_size = obs_dim\n self.output_size = action_dim\n self.hidden_activation = hidden_activation\n self.layer_norm = layer_norm\n\n self.embedding_params = []\n self.disc_params = []\n self.policy_params = []\n\n self.embed_fcs = []\n # self.embed_layer_norms = []\n\n self.policy_fcs = []\n # self.policy_layer_norms = []\n\n self.disc_fcs = []\n # self.disc_layer_norms = []\n \n self.device = device\n in_size = self.input_size\n\n self.embed_fcs = nn.Sequential(\n nn.Linear(self.input_size, embedding_hidden_size, bias=False, device=self.device),\n # nn.BatchNorm1d(embedding_hidden_size),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(embedding_hidden_size, embedding_dim, device=self.device), \n )\n self.embedding_params = self.embed_fcs.parameters()\n\n self.policy_fcs = nn.Sequential(\n nn.LeakyReLU(0.2, inplace=False),\n nn.Linear(embedding_dim, policy_hidden_size, device=self.device),\n nn.LeakyReLU(0.2, inplace=True),\n )\n # self.policy_params.append({'params': self.policy_fcs.parameters()})\n self.policy_mean = nn.Linear(policy_hidden_size, action_dim, device=self.device)\n self.policy_params.append({'params': self.policy_mean.parameters()}) \n \n # self.policy_fc1 = nn.Linear(embedding_dim, policy_hidden_size, device=self.device)\n # self.policy_fc1.weight.data.uniform_(-init_w, init_w)\n # self.policy_fc1.bias.data.fill_(0)\n # self.policy_params.append({'params': self.policy_fc1.parameters()}) \n # self.policy_fc2 = nn.Linear(policy_hidden_size, action_dim, device=self.device)\n # self.policy_fc2.weight.data.uniform_(-init_w, init_w)\n # self.policy_fc2.bias.data.fill_(0)\n # self.policy_params.append({'params': self.policy_fc2.parameters()}) \n\n self.policy_log_std = None\n self.policy_std = policy_std\n \n if policy_std is None:\n self.policy_fc_log_std = nn.Linear(policy_hidden_size, action_dim, device=self.device)\n # self.policy_fc_log_std.weight.data.uniform_(-init_w, init_w)\n # self.policy_fc_log_std.bias.data.uniform_(-init_w, init_w)\n self.policy_params.append({'params': self.policy_fc_log_std.parameters()})\n else:\n self.policy_log_std = np.log(policy_std)\n assert LOG_SIG_MIN <= self.policy_log_std <= LOG_SIG_MAX\n\n def forward(self, obs):\n # h = obs\n\n # h = self.hidden_activation(self.embed_fc1(h))\n # h = self.embed_fc2(h)\n\n # h = self.hidden_activation(self.policy_fc1(h))\n # policy_mean = self.policy_fc2(h)\n\n h = self.embed_fcs(obs)\n h = self.policy_fcs(h)\n policy_mean = self.policy_mean(h)\n\n if self.policy_std is None:\n policy_log_std = self.policy_fc_log_std(h)\n policy_log_std = torch.clamp(policy_log_std, LOG_SIG_MIN, LOG_SIG_MAX)\n policy_std = torch.exp(policy_log_std)\n else:\n policy_std = torch.from_numpy(np.array([self.policy_std, ])).float().to(ptu.device)\n\n return TanhNormal(policy_mean, policy_std)\n\n def forward_embedding(self, obs):\n # h = obs\n \n # h = self.hidden_activation(self.embed_fc1(h))\n # h = self.embed_fc2(h)\n h = self.embed_fcs(obs)\n\n return h\n\n def forward_policy_from_embedding(self, h):\n # h = self.hidden_activation(h)\n # h = self.hidden_activation(self.policy_fc1(h))\n h = self.policy_fcs(h)\n policy_mean = self.policy_mean(h)\n\n if self.policy_std is None:\n policy_log_std = self.policy_fc_log_std(h)\n policy_log_std = torch.clamp(policy_log_std, LOG_SIG_MIN, LOG_SIG_MAX)\n policy_std = torch.exp(policy_log_std)\n else:\n policy_std = torch.from_numpy(np.array([self.policy_std, ])).float().to(ptu.device)\n\n return TanhNormal(policy_mean, policy_std)\n\n def logprob(self, action, mean, std):\n tanh_normal = TanhNormal(mean, std)\n log_prob = tanh_normal.log_prob(\n action,\n )\n log_prob = log_prob.sum(dim=1, keepdim=True)\n return log_prob\n\n def log_prob(self, obs, action):\n tanh_normal = self.forward(obs)\n log_prob = tanh_normal.log_prob(\n action,\n )\n # log_prob = log_prob.sum(dim=1, keepdim=True)\n return log_prob\n\n def log_prob_policy_from_embedding(self, h, action):\n tanh_normal = self.forward_policy_from_embedding(h)\n log_prob = tanh_normal.log_prob(\n action,\n )\n # log_prob = log_prob.sum(dim=1, keepdim=True)\n return log_prob\n\n def predict_action_from_embedding(self, h):\n tanh_normal = self.forward_policy_from_embedding(h)\n pred_action = tanh_normal.mean \n # log_prob = log_prob.sum(dim=1, keepdim=True)\n return pred_action" }, { "identifier": "TanhGaussianRAPPolicy", "path": "core/policy.py", "snippet": "class TanhGaussianRAPPolicy(TorchStochasticPolicy):\n \"\"\"\n Reference : \n \n Usage:\n\n ```\n policy = TanhGaussianPolicy(...)\n \"\"\"\n\n def __init__(\n self,\n obs_dim,\n stack_size,\n action_dim,\n embedding_dim,\n embedding_hidden_size,\n policy_hidden_size,\n residual_hidden_size,\n policy_std=None,\n residual_std=0.1,\n device='cpu',\n hidden_activation=F.leaky_relu, \n layer_norm=False,\n **kwargs\n ):\n if device =='cuda':\n ptu.set_gpu_mode(True)\n self.device = device\n \n super(TanhGaussianRAPPolicy, self).__init__()\n \n self.input_size = obs_dim\n self.stack_size = stack_size\n self.output_size = action_dim\n self.hidden_activation = hidden_activation\n self.layer_norm = layer_norm\n\n self.embedding_params = []\n self.residual_params = []\n self.policy_params = []\n\n self.history_embed_fcs = []\n self.single_embed_fcs = []\n # self.embed_layer_norms = []\n\n self.policy_fcs = []\n self.residual_fcs = []\n \n self.device = device\n in_size = self.input_size\n\n self.history_embed_fcs = nn.Sequential(\n nn.Linear(self.input_size * self.stack_size, embedding_hidden_size, bias=False, device=self.device),\n # nn.BatchNorm1d(embedding_hidden_size),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(embedding_hidden_size, embedding_dim, device=self.device)\n )\n self.history_embedding_params = self.history_embed_fcs.parameters()\n \n self.single_embed_fcs = nn.Sequential(\n nn.Linear(self.input_size, embedding_hidden_size, bias=False, device=self.device),\n # nn.BatchNorm1d(embedding_hidden_size),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(embedding_hidden_size, embedding_dim, device=self.device)\n )\n self.single_embedding_params = self.single_embed_fcs.parameters()\n\n self.policy_fcs = nn.Sequential(\n nn.LeakyReLU(0.2, inplace=False),\n nn.Linear(embedding_dim*2, policy_hidden_size, device=self.device),\n nn.LeakyReLU(0.2, inplace=True),\n )\n self.policy_params.append({'params': self.policy_fcs.parameters()})\n self.policy_mean = nn.Linear(policy_hidden_size, action_dim, device=self.device)\n self.policy_params.append({'params': self.policy_mean.parameters()}) \n\n self.policy_log_std = None\n self.policy_std = policy_std\n \n if policy_std is None:\n self.policy_fc_log_std = nn.Linear(policy_hidden_size, action_dim, device=self.device)\n # self.policy_fc_log_std.weight.data.uniform_(-init_w, init_w)\n # self.policy_fc_log_std.bias.data.uniform_(-init_w, init_w)\n self.policy_params.append({'params': self.policy_fc_log_std.parameters()})\n else:\n self.policy_log_std = np.log(policy_std)\n assert LOG_SIG_MIN <= self.policy_log_std <= LOG_SIG_MAX\n\n self.residual_fcs = nn.Sequential(\n # nn.LeakyReLU(0.2, inplace=False),\n nn.Linear(embedding_dim, residual_hidden_size, device=self.device),\n nn.LeakyReLU(0.2, inplace=True),\n )\n self.residual_params.append({'params': self.residual_fcs.parameters()})\n self.residual_mean = nn.Linear(residual_hidden_size, action_dim, device=self.device) \n self.residual_params.append({'params': self.residual_mean.parameters()})\n\n def forward(self, obs):\n if len(obs.shape) < 2:\n obs = obs[None]\n \n obs_total = obs\n obs_current = obs[:, -self.input_size:]\n\n m = self.history_embed_fcs(obs_total)\n h = self.single_embed_fcs(obs_current) \n \n policy_input = torch.cat([m.detach(), h], dim=-1)\n \n policy_input = self.policy_fcs(policy_input)\n policy_mean = self.policy_mean(policy_input)\n\n if self.policy_std is None:\n policy_log_std = self.policy_fc_log_std(policy_input)\n policy_log_std = torch.clamp(policy_log_std, LOG_SIG_MIN, LOG_SIG_MAX)\n policy_std = torch.exp(policy_log_std)\n else:\n policy_std = torch.from_numpy(np.array([self.policy_std, ])).float().to(ptu.device)\n\n policy_dist = TanhNormal(policy_mean, policy_std) \n \n return policy_dist #, residual_dist\n\n def forward_embedding(self, obs):\n obs_total = obs\n obs_current = obs[:, -self.input_size:]\n\n m = self.history_embed_fcs(obs_total)\n h = self.single_embed_fcs(obs_current)\n\n return m, h\n\n def forward_residual_from_m(self, m):\n residual_m = self.residual_fcs(m)\n residual_mean = self.residual_mean(residual_m) \n \n return residual_mean\n\n def forward_policy_from_embedding(self, m, h):\n policy_input = torch.cat([m.detach(), h], dim=-1)\n \n policy_input = self.policy_fcs(policy_input)\n policy_mean = self.policy_mean(policy_input)\n\n if self.policy_std is None:\n policy_log_std = self.policy_fc_log_std(policy_input)\n policy_log_std = torch.clamp(policy_log_std, LOG_SIG_MIN, LOG_SIG_MAX)\n policy_std = torch.exp(policy_log_std)\n else:\n policy_std = torch.from_numpy(np.array([self.policy_std, ])).float().to(ptu.device)\n\n return TanhNormal(policy_mean, policy_std)\n\n def logprob(self, action, mean, std):\n tanh_normal = TanhNormal(mean, std)\n log_prob = tanh_normal.log_prob(action)\n log_prob = log_prob.sum(dim=1, keepdim=True)\n return log_prob\n \n def log_prob(self, obs, action):\n tanh_normal = self.forward(obs)\n log_prob = tanh_normal.log_prob(action) \n return log_prob\n \n def log_prob_policy_from_m_h(self, m, h, action): \n tanh_normal = self.forward_policy_from_embedding(m, h)\n log_prob = tanh_normal.log_prob(action)\n return log_prob\n\n def predict_action_from_m_h(self, m, h):\n tanh_normal = self.forward_policy_from_embedding(m, h)\n pred_action = tanh_normal.mean \n return pred_action" }, { "identifier": "EnvReplayBuffer", "path": "core/replay_buffer.py", "snippet": "class EnvReplayBuffer(SimpleReplayBuffer):\n def __init__(\n self,\n max_replay_buffer_size,\n env,\n stack_size=1,\n action_history_len=0,\n env_info_sizes=None,\n train_with_action_history=False\n ):\n \"\"\"\n :param max_replay_buffer_size:\n :param env:\n \"\"\"\n self.env = env\n self._ob_space = env.observation_space #.shape[0] * stack_size\n self._action_space = env.action_space\n\n if train_with_action_history:\n obs_dim = get_dim(self._ob_space) * stack_size + get_dim(self._action_space) * max(stack_size - 1, 1)\n else:\n obs_dim = get_dim(self._ob_space) * stack_size\n\n act_dim = get_dim(self._action_space) * (action_history_len)\n\n if env_info_sizes is None:\n if hasattr(env, 'info_sizes'):\n env_info_sizes = env.info_sizes\n else:\n env_info_sizes = dict()\n\n super().__init__(\n max_replay_buffer_size=max_replay_buffer_size,\n observation_dim=obs_dim,\n action_dim=act_dim,\n env_info_sizes=env_info_sizes\n )\n\n self.obs_mean = None\n self.obs_std = None\n\n self.act_mean = None\n self.act_std = None\n\n # def add_sample(self, observation, action, prev_action, reward, terminal,\n # next_observation, **kwargs):\n # if isinstance(self._action_space, Discrete):\n # new_action = np.zeros(self._action_dim)\n # new_action[action] = 1\n # else:\n # new_action = action\n\n # return super().add_sample(\n # observation=observation,\n # action=new_action,\n # prev_action=prev_action,\n # reward=reward,\n # next_observation=next_observation,\n # terminal=terminal,\n # # **kwargs\n # )\n\n def calculate_statistics(self):\n self.obs_mean = np.mean(self._observations[:self._top], axis=0, keepdims=True)\n self.obs_std = np.std(self._observations[:self._top], axis=0, keepdims=True)\n\n self.act_mean = np.mean(self._actions[:self._top], axis=0, keepdims=True)\n self.act_std = np.std(self._actions[:self._top], axis=0, keepdims=True)\n\n return self.obs_mean, self.obs_std, self.act_mean, self.act_std\n\n def set_statistics(self, obs_mean, obs_std, act_mean, act_std):\n self.obs_mean, self.obs_std, self.act_mean, self.act_std = obs_mean, obs_std, act_mean, act_std\n \n def get_statistics(self):\n return self.obs_mean, self.obs_std, self.act_mean, self.act_std\n\n def random_batch(self, batch_size, standardize=False):\n indices = np.random.choice(self._size, size=batch_size, replace=self._replace or self._size < batch_size)\n if not self._replace and self._size < batch_size:\n warnings.warn('Replace was set to false, but is temporarily set to true because batch size is larger than current size of replay.')\n\n if standardize and self.obs_mean is not None:\n obss = (self._observations[indices] - self.obs_mean) / self.obs_std\n # actions = (self._actions[indices] - self.act_mean) / self.act_std\n next_obss = (self._next_obs[indices] - self.obs_mean) / self.obs_std\n else:\n obss = self._observations[indices] \n # actions = self._actions[indices] \n next_obss = self._next_obs[indices]\n\n actions = self._actions[indices]\n \n batch = dict(\n observations=obss,\n actions=actions,\n # prev_actions=self._prev_actions[indices],\n rewards=self._rewards[indices],\n terminals=self._terminals[indices],\n next_observations=next_obss,\n )\n for key in self._env_info_keys:\n assert key not in batch.keys()\n batch[key] = self._env_infos[key][indices]\n\n return batch\n \n def get_batch(self, batch_size, standardize=False):\n datasize = min(batch_size, self._top) \n indices = np.arange(datasize)\n # if not self._replace and self._size < batch_size:\n # warnings.warn('Replace was set to false, but is temporarily set to true because batch size is larger than current size of replay.')\n\n if standardize and self.obs_mean is not None:\n obss = (self._observations[indices] - self.obs_mean) / self.obs_std\n # actions = (self._actions[indices] - self.act_mean) / self.act_std\n next_obss = (self._next_obs[indices] - self.obs_mean) / self.obs_std\n else:\n obss = self._observations[indices] \n # actions = self._actions[indices] \n next_obss = self._next_obs[indices]\n\n actions = self._actions[indices]\n \n batch = dict(\n observations=obss,\n actions=actions,\n # prev_actions=self._prev_actions[indices],\n rewards=self._rewards[indices],\n terminals=self._terminals[indices],\n next_observations=next_obss,\n )\n for key in self._env_info_keys:\n assert key not in batch.keys()\n batch[key] = self._env_infos[key][indices]\n\n return batch\n\n def add_sample(self, observation, action, reward, terminal,\n next_observation, **kwargs):\n if isinstance(self._action_space, Discrete):\n new_action = np.zeros(self._action_dim)\n new_action[action] = 1\n else:\n new_action = action\n\n return super().add_sample(\n observation=observation,\n action=new_action,\n reward=reward,\n next_observation=next_observation,\n terminal=terminal,\n # **kwargs\n )" }, { "identifier": "preprocess_dataset_with_prev_actions", "path": "core/preprocess.py", "snippet": "def preprocess_dataset_with_prev_actions(mdpfile, envtype, stacksize=1, partially_observable=False, action_history_len=2):\n \n indx = list(np.arange(20))\n # Indices of position information observations\n if partially_observable:\n envtype_to_idx = {\n 'hopper': indx[:5], \n 'ant': indx[:13], \n 'walker2d': indx[:8], \n 'halfcheetah': indx[:4] + indx[8:13]\n }\n obs_idx = envtype_to_idx[envtype]\n observations = np.array(mdpfile['observations'])[:, obs_idx]\n next_observations = np.array(mdpfile['next_observations'])[:, obs_idx]\n else:\n observations = np.array(mdpfile['observations'])\n next_observations = np.array(mdpfile['next_observations'])\n \n terminals = np.array(mdpfile['terminals'])\n timeouts = np.array(mdpfile['timeouts'])\n rewards = np.array(mdpfile['rewards'])\n actions = np.array(mdpfile['actions'])\n\n obs_dim = observations.shape[-1]\n action_dim = actions.shape[-1]\n\n n_data = observations.shape[0]\n new_observations_list = []\n new_next_observations_list = []\n prev_action_list = []\n action_history_list = []\n \n idx_from_initial_state = 0\n num_trajs = 0\n\n for i in range(n_data):\n if idx_from_initial_state == 0:\n prev_action = np.zeros(action_dim)\n else:\n prev_action = actions[i-1]\n prev_action_list.append(prev_action)\n\n if idx_from_initial_state < stacksize:\n if idx_from_initial_state == 0:\n initial_obs = observations[i]\n \n new_observation = np.zeros(obs_dim * stacksize)\n new_observation_ = np.concatenate(observations[i-idx_from_initial_state: i+1])\n new_observation[-(idx_from_initial_state+1) * obs_dim:] = new_observation_\n \n new_next_observation = np.zeros(obs_dim * stacksize)\n new_next_observation_ = np.concatenate(next_observations[i-idx_from_initial_state: i+1])\n new_next_observation[-(idx_from_initial_state+1) * obs_dim:] = new_next_observation_\n \n if idx_from_initial_state + 1 != stacksize:\n new_next_observation[-(idx_from_initial_state+2) * obs_dim:-(idx_from_initial_state+1) * obs_dim] \\\n = initial_obs\n \n else:\n new_observation = np.concatenate(observations[i+1-stacksize:i+1])\n new_next_observation = np.concatenate(next_observations[i+1-stacksize:i+1])\n\n if idx_from_initial_state < action_history_len:\n action_history = np.zeros(action_dim * action_history_len)\n action_history_ = np.concatenate(actions[i-idx_from_initial_state: i+1])\n action_history[-(idx_from_initial_state+1) * action_dim:] = action_history_\n \n else:\n action_history = np.concatenate(actions[i+1-action_history_len:i+1])\n\n\n new_observations_list.append(new_observation)\n new_next_observations_list.append(new_next_observation)\n action_history_list.append(action_history)\n\n idx_from_initial_state += 1\n if terminals[i] or timeouts[i]:\n idx_from_initial_state = 0\n num_trajs += 1 \n\n new_observations = np.array(new_observations_list)\n new_next_observations = np.array(new_next_observations_list)\n new_actions = np.array(action_history_list)\n\n new_paths = {\n 'observations': new_observations,\n 'next_observations': new_next_observations,\n 'rewards': rewards,\n 'actions': new_actions,\n 'terminals': terminals,\n 'timeouts': timeouts \n }\n \n return new_paths" }, { "identifier": "data_select_num_transitions", "path": "core/preprocess.py", "snippet": "def data_select_num_transitions(path, num_transitions=1000, start_idx=0, random=False):\n new_path = {}\n \n if random:\n num_full_trajs = len(path['observations'])\n choice_idx = np.random.choice(num_full_trajs, num_transitions)\n \n else:\n choice_idx = np.arange(start_idx, start_idx + num_transitions)\n \n for key in path.keys():\n new_path[key] = np.array(path[key])[choice_idx]\n \n return new_path" }, { "identifier": "NormalizedBoxEnv", "path": "rlkit/envs/wrappers.py", "snippet": "class NormalizedBoxEnv(ProxyEnv):\n \"\"\"\n Normalize action to in [-1, 1].\n\n Optionally normalize observations and scale reward.\n \"\"\"\n\n def __init__(\n self,\n env,\n reward_scale=1.,\n obs_mean=None,\n obs_std=None,\n ):\n ProxyEnv.__init__(self, env)\n self._should_normalize = not (obs_mean is None and obs_std is None)\n if self._should_normalize:\n if obs_mean is None:\n obs_mean = np.zeros_like(env.observation_space.low)\n else:\n obs_mean = np.array(obs_mean)\n if obs_std is None:\n obs_std = np.ones_like(env.observation_space.low)\n else:\n obs_std = np.array(obs_std)\n self._reward_scale = reward_scale\n self._obs_mean = obs_mean\n self._obs_std = obs_std\n ub = np.ones(self._wrapped_env.action_space.shape)\n self.action_space = Box(-1 * ub, ub)\n\n def estimate_obs_stats(self, obs_batch, override_values=False):\n if self._obs_mean is not None and not override_values:\n raise Exception(\"Observation mean and std already set. To \"\n \"override, set override_values to True.\")\n self._obs_mean = np.mean(obs_batch, axis=0)\n self._obs_std = np.std(obs_batch, axis=0)\n\n def _apply_normalize_obs(self, obs):\n return (obs - self._obs_mean) / (self._obs_std + 1e-8)\n\n def step(self, action):\n lb = self._wrapped_env.action_space.low\n ub = self._wrapped_env.action_space.high\n scaled_action = lb + (action + 1.) * 0.5 * (ub - lb)\n scaled_action = np.clip(scaled_action, lb, ub)\n\n wrapped_step = self._wrapped_env.step(scaled_action)\n next_obs, reward, done, info = wrapped_step\n if self._should_normalize:\n next_obs = self._apply_normalize_obs(next_obs)\n return next_obs, reward * self._reward_scale, done, info\n\n def __str__(self):\n return \"Normalized: %s\" % self._wrapped_env" } ]
import os import wandb import envs import d4rl import gym import torch from imitation.bc import BC from imitation.rap import RAP from imitation.fca import FCA from imitation.mine import MINE_BC from imitation.palr import PALR from argparse import ArgumentParser from itertools import product from core.policy import TanhGaussianPolicyWithEmbedding, TanhGaussianRAPPolicy from core.replay_buffer import EnvReplayBuffer from core.preprocess import preprocess_dataset_with_prev_actions, data_select_num_transitions from rlkit.envs.wrappers import NormalizedBoxEnv
19,053
wandb_dir = '.' os.environ['WANDB_DIR'] = wandb_dir os.environ['D4RL_DATASET_DIR'] = './dataset/' def train(configs): env = NormalizedBoxEnv(gym.make(configs['envname'])) obs_dim = env.observation_space.low.size action_dim = env.action_space.low.size d4rl_env = gym.make(configs['d4rl_env_name']) stacksize = configs['stacksize'] if stacksize == 0: stacksize = 1 device = 'cuda' if torch.cuda.is_available() else 'cpu' envname, envtype = configs['envname'], configs['envtype'] traj_load_path = configs['traj_load_path'] print(f'-- Loading dataset from {traj_load_path}...') dataset = d4rl_env.get_dataset() print(f'-- Done!') print(f'-- Preprocessing dataset... ({envtype}, {stacksize})')
wandb_dir = '.' os.environ['WANDB_DIR'] = wandb_dir os.environ['D4RL_DATASET_DIR'] = './dataset/' def train(configs): env = NormalizedBoxEnv(gym.make(configs['envname'])) obs_dim = env.observation_space.low.size action_dim = env.action_space.low.size d4rl_env = gym.make(configs['d4rl_env_name']) stacksize = configs['stacksize'] if stacksize == 0: stacksize = 1 device = 'cuda' if torch.cuda.is_available() else 'cpu' envname, envtype = configs['envname'], configs['envtype'] traj_load_path = configs['traj_load_path'] print(f'-- Loading dataset from {traj_load_path}...') dataset = d4rl_env.get_dataset() print(f'-- Done!') print(f'-- Preprocessing dataset... ({envtype}, {stacksize})')
path = preprocess_dataset_with_prev_actions(dataset, envtype, stacksize, configs['partially_observable'], action_history_len=2)
8
2023-11-06 08:35:34+00:00
24k
tylerlight071/Project-Cipher
main.py
[ { "identifier": "clear_terminal", "path": "components/common_functions.py", "snippet": "def clear_terminal():\n os.system('cls' if os.name == 'nt' else 'clear')" }, { "identifier": "print_slow", "path": "components/common_functions.py", "snippet": "def print_slow(text, delay=0.00): # change to 0.01\n for char in text:\n print(char, end='', flush=True)\n time.sleep(delay)\n print()" }, { "identifier": "shop_help", "path": "components/common_functions.py", "snippet": "def shop_help():\n print_slow(Fore.YELLOW + \"Shop Help:\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(\"[buy] - Use the 'buy [upgrade]' command to purchase the upgrade in the shop. \")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[exit] - Use the 'exit' command to return to the main terminal.\")\n print_slow(\"\")" }, { "identifier": "help_user", "path": "components/common_functions.py", "snippet": "def help_user():\n print_slow(Fore.MAGENTA + \"Help:\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(\"[connect] - Use the 'connect' command to hack into Enigma Corps network.\")\n print_slow(\"\")\n print_slow(\"[mail] - Use the 'mail' command to view and respond to emails from your client and other characters.\")\n print_slow(\"\")\n print_slow(\"[balance] - Use the 'balance' command to view your current earnings which you can spend on upgrades. \")\n print_slow(\"\")\n print_slow(\"[shop] - Use the 'shop' command to view upgrades available in the shop. \")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[help] - Use the 'help' command if you need assistance at any time.\")\n print_slow(\"\")\n print_slow(\"[exit] - Use the 'exit' command to return to the Main Menu.\")\n print_slow(\"\")" }, { "identifier": "connect_help", "path": "components/common_functions.py", "snippet": "def connect_help():\n print_slow(Fore.MAGENTA + \"Connect Help:\" + Style.RESET_ALL)\n print_slow(\n \"[scan] - Use the 'scan' command to scan the network and search for available systems and vulnerabilities.\")\n print_slow(\"\")\n print_slow(\"[hack] - Use the 'hack [system/vulnerability]' to hack into different systems.\")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[disconnect] - Use the 'disconnect' command to disconnect from the current system or vulnerability.\")\n print_slow(\"\")" }, { "identifier": "mail_help", "path": "components/common_functions.py", "snippet": "def mail_help():\n print_slow(Fore.LIGHTBLUE_EX + \"Mail Help:\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(\"[l] - Use the 'l' command to list all emails.\")\n print_slow(\"\")\n print_slow(\"[r] - Use the 'r [subject]' command to read an email with the specified subject.\")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[exit] - Use the 'exit' command to return to the main terminal.\")\n print_slow(\"\")" }, { "identifier": "system_help", "path": "components/common_functions.py", "snippet": "def system_help():\n print_slow(\"\")\n print_slow(\"[mail] - Use the 'mail' command to log into the users emails.\")\n print_slow(\"\")\n print_slow(\"[l] - Use the 'l' command to list files in a users system.\")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[r] - Use the 'r [file]' command to read files in a users system\")\n print_slow(\"\")" }, { "identifier": "intro_call", "path": "conversations/calls.py", "snippet": "def intro_call():\n clear_terminal()\n\n # Anonymous Sender (Anonymous)\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n Fore.YELLOW + \"Welcome, Cipher. Operation Enigma is our covert mission against Enigma Corp, a powerful and secretive entity.\")\n print_slow(\n \"Your skills and secrecy have brought you to our attention. Your mission is to dig through their systems and servers looking for valuable data.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(Fore.BLUE + \"Got it, Anonymous. Exposing secrets and bringing justice. I'm in.\")\n print_slow(\"What's my first move? Talk to me about this 'EnigmaLink'.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender (Anonymous)\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Excellent, Cipher. EnigmaLink is a specialized tool available on the Hacker's Market. It contains a hidden backdoor, allowing access to Enigma Corps servers.\")\n print_slow(\n \"Your task is to acquire EnigmaLink and initiate your infiltration. Use the 'connect' command to navigate the network and gather crucial intelligence.\")\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"EnigmaLink, got it. I'll secure it and initiate the infiltration. What about this employee, Amy?\")\n print_slow(\"You mentioned her password is 'sexinthecity.' What's my objective with her?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender (Anonymous)\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Good question, Cipher. Amy is a key target. Use her password to access her computer and gather any pertinent information.\")\n print_slow(\n \"This data is vital to our cause. Be thorough and meticulous in your investigation. The success of our operation depends on it.\")\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(Fore.BLUE + \"Understood, Anonymous. I'll focus on Amy, gather intel, and proceed with precision.\")\n print_slow(\"Consider it done. Anything else I should know before I dive in?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender (Anonymous)\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"One last thing, Cipher. All collected data is highly confidential. This contract is binding, and your success is paramount.\")\n print_slow(\"Execute with diligence, and may the odds be in your favor. Good luck, Cipher.\")\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "first_call", "path": "conversations/calls.py", "snippet": "def first_call():\n clear_terminal()\n print_slow(sender_art)\n print_slow(\"\")\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(Fore.YELLOW + \"That's a good start, but we already have that information.\")\n print_slow(\"Regardless, I've transferred £20 into the account for your troubles.\")\n print_slow(\"Keep digging Cipher!\" + Style.RESET_ALL)\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "second_call", "path": "conversations/calls.py", "snippet": "def second_call():\n clear_terminal()\n print_slow(sender_art)\n print_slow(\"\")\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n Fore.YELLOW + \"Hey Cipher, you nailed it! 'Billy' just spilled the beans about wanting to climb the corporate ladder into management.\")\n print_slow(\n \"This is gold for us. We can guide 'Billy' toward training and workshops that align with our interests, nudging things in our favor.\")\n print_slow(\n \"Picture it – we're pulling the strings, helping 'Billy' grow, and steering the ship where we want it to go.\")\n print_slow(\"Keep the ball rolling, Cipher!\" + Style.RESET_ALL)\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "third_call", "path": "conversations/calls.py", "snippet": "def third_call():\n clear_terminal()\n\n # Anonymous Sender\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\"\n \"\")\n print_slow(\n Fore.YELLOW + \"Cipher, we've stumbled upon a perplexing development regarding Enigma's interest in a mysterious 'compound.'\")\n print_slow(\n \"I'm cross-referencing our existing intel to unveil more details. Stay vigilant and be prepared for the unknown.\" + Style.RESET_ALL)\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"A compound, huh? Any hints on whether we're talking metal, chemicals, or something else entirely?\")\n print_slow(\"This feels like navigating in the dark. What exactly am I dealing with?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Response\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(Fore.YELLOW +\n \"Cipher, we're in the dark too. Initial reports are unclear—could be metal, chemical, or something beyond our comprehension.\")\n print_slow(\n \"Your mission is to identify the nature of this compound. Exercise extreme caution; this goes deeper than we anticipated.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Inquiry\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(Fore.BLUE + \"So, we're playing 'guess the compound.' Any leads, any connections I should explore?\")\n print_slow(\"This is starting to sound like one of those high-stakes puzzles.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Clarification\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(Fore.YELLOW +\n \"I wish I had more details, Cipher. This is uncharted territory for us. Investigate discreetly, and trust no one.\")\n print_slow(\n \"I'll attempt to gather more intel. Stay on the line, and keep me updated on any findings.\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "fourth_call", "path": "conversations/calls.py", "snippet": "def fourth_call():\n clear_terminal()\n\n # Anonymous Sender\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n Fore.YELLOW + \"Cipher, we've got our hands on an intriguing document – an Employee Performance Review for 'Billy Constantine'.\")\n print_slow(\n \"This could be a goldmine of information. Let's dig in and see if there's anything we can leverage to our advantage.\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"An Employee Performance Review? Interesting choice. What's the scoop on 'Billy Constantine'?\")\n print_slow(\"Give me the details, and we'll figure out our next move.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Briefing\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Cipher, 'Billy Constantine' is making waves. The review highlights exceptional performance as a sales representative.\")\n print_slow(\n \"He's exceeding sales targets, mentoring new team members, and earning a solid 4.5/5 rating. A rising star, it seems.\")\n print_slow(\"We might use this to our advantage. Let's explore how we can align his ambitions with our agenda.\")\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Strategy\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"A high-performing sales rep, huh? We could steer 'Billy' towards projects that align with our goals.\")\n print_slow(\"Let's use this performance review to our advantage. Maybe mentorship programs, leadership initiatives?\")\n print_slow(\"I'm ready to play this card strategically. What's the next move?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Next Steps\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Great thinking, Cipher. Let's work on a plan to subtly guide 'Billy' toward initiatives that benefit us.\")\n print_slow(\"We'll need to dig deeper into 'Billy's' aspirations and weave our influence seamlessly.\")\n print_slow(\"Stay vigilant, Cipher. This could be a game-changer.\")\n print_slow(\"\")\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "fifth_call", "path": "conversations/calls.py", "snippet": "def fifth_call():\n clear_terminal()\n\n # Anonymous Sender\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n Fore.YELLOW + \"Cipher, we've intercepted some Meeting Minutes dated 24/06/2025. It's related to 'Project X' and involves key players.\")\n print_slow(\n \"This could be our chance to uncover more about Enigma's activities. Let's dive into the details and see what we can extract.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"Meeting Minutes, huh? 'Project X' sounds intriguing. Who were the players involved, and what's the agenda?\")\n print_slow(\"I'm ready to dissect this information and uncover any hidden gems.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Briefing\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Cipher, the meeting involved key personnel—Amy, Billy, Kyle, and others. 'Project X' is on the agenda, and there's mention of sensitive materials.\")\n print_slow(\n \"This could be a crucial insight into Enigma's plans. Let's analyze the action items and plan our next move.\")\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Analysis\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(Fore.BLUE + \"'Project X,' sensitive materials, and action items. This is a goldmine of information.\")\n print_slow(\n \"Let's focus on dissecting the action items and see if we can connect the dots. What's our strategy, Anonymous?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Next Steps\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Agreed, Cipher. Let's delve into the action items, especially the data compilation and safety protocol training.\")\n print_slow(\"We might uncover more about 'Project X' and gain insights into Enigma's plans.\")\n print_slow(\"Stay sharp, Cipher. This could be a pivotal moment in our mission.\")\n print_slow(\"\")\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "sixth_call", "path": "conversations/calls.py", "snippet": "def sixth_call():\n print_slow(\"ADD CALL STUFF HERE\")" }, { "identifier": "markus_seen_call", "path": "conversations/calls.py", "snippet": "def markus_seen_call():\n print_slow(\"Something goes here\")" }, { "identifier": "code_shatter_call", "path": "conversations/minigame_calls.py", "snippet": "def code_shatter_call():\n clear_terminal()\n print_slow(sender_art)\n print_slow(\"\")\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(Fore.YELLOW + \"I see you have bought CodeShatter!\")\n print_slow(\"This item is a one time use upgrade so once you get the password, it is gone so use wisely!\")\n print_slow(\"But don't threat, if you fail, you get a chance to retry. The item is only used when you get the password, so be sure to write it down!\" + Style.RESET_ALL)\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "code_shatter_minigame", "path": "minigames/code_shatter_minigame.py", "snippet": "def code_shatter_minigame():\n # Generate a random 5-digit number\n target = [str(random.randint(1, 9)) for _ in range(5)]\n\n print_slow(\"Welcome to CodeShatter!\")\n print_slow(\"\")\n print_slow(\"Guess the 5-digit number.\")\n print_slow(\"\")\n print_slow(\"The sequence can contain multiple same numbers\")\n print_slow(\"\")\n print_slow(Fore.GREEN + \"Green: Correct digit in correct position.\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(Fore.YELLOW + \"Orange: Correct digit in incorrect position.\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(Fore.RED + \"Red: Incorrect digit.\" + Style.RESET_ALL)\n print_slow(\"\")\n\n attempts = 0\n while attempts < 7:\n # Get the user's guess\n guess = input(\"Enter your guess: \")\n\n if len(guess) != 5 or not guess.isdigit():\n print_slow(\"Invalid input. Please enter a 5-digit number.\")\n continue\n\n attempts += 1\n\n # Check the guess against the target\n feedback = []\n for i in range(5):\n if guess[i] == target[i]:\n feedback.append(Fore.GREEN + guess[i] + Style.RESET_ALL)\n elif guess[i] in target:\n feedback.append(Fore.YELLOW + guess[i] + Style.RESET_ALL)\n else:\n feedback.append(Fore.RED + guess[i] + Style.RESET_ALL)\n\n print_slow(\"Feedback: \" + \" \".join(feedback))\n\n # Check if the guess is correct\n if guess == \"\".join(target):\n print_slow(Fore.GREEN + \"Access granted.\" + Style.RESET_ALL)\n break\n else:\n print_slow(Fore.RED + \"Access denied. Too many attempts.\" + Style.RESET_ALL)\n time.sleep(1)\n print_slow(\"\")\n print_slow(Fore.RED + \"Rebooting CodeShatter with new proxy...\" + Style.RESET_ALL)\n time.sleep(1)\n clear_terminal()\n code_shatter_minigame()" }, { "identifier": "port_scanning", "path": "minigames/eye_spy_minigame.py", "snippet": "def port_scanning():\n num_ports = 10\n open_ports, closed_ports = generate_ports(num_ports)\n attempts = 5\n correct_guesses = 0\n scan_attempts = 2\n\n print_slow(\"Welcome to the Port Scanning minigame!\")\n print_slow(\"\")\n print_slow(f\"Find the open ports in the range 1-{num_ports}.\")\n print_slow(\"\")\n print_slow(f\"You have {attempts} attempts.\")\n print_slow(\"\")\n\n while scan_attempts > 0:\n print_slow(\"\")\n print_slow(f\"\\nYou have {scan_attempts} scan attempts left.\")\n print_slow(\"\")\n start = int(input(\"Enter the start of the range to scan: \"))\n print_slow(\"\")\n end = int(input(\"Enter the end of the range to scan: \"))\n print_slow(\"\")\n\n num_open_ports_in_range = len(open_ports.intersection(range(start, end + 1)))\n print_slow(\"\")\n print_slow(f\"There are {num_open_ports_in_range} open ports in the range {start}-{end}.\")\n\n scan_attempts -= 1\n\n while attempts > 0 and len(open_ports) > 0:\n port = int(input(\"\\nEnter a port number to guess: \"))\n\n if port in open_ports:\n print_slow(Fore.GREEN + \"Port is open!\" + Style.RESET_ALL)\n open_ports.remove(port)\n correct_guesses += 1\n elif port in closed_ports:\n print_slow(Fore.RED + \"Port is closed.\" + Style.RESET_ALL)\n closed_ports.remove(port)\n else:\n print_slow(\"Invalid port number. Please enter a number between 1 and\", num_ports)\n\n attempts -= 1\n\n if len(open_ports) == 0:\n print_slow(\n Fore.GREEN + \"\\nCongratulations! You have successfully found all the open ports and gained access to the camera.\" + Style.RESET_ALL)\n time.sleep(2)\n clear_terminal()\n else:\n print_slow(\n Fore.RED + f\"\\nHack Failed! You found {correct_guesses} out of {len(open_ports) + correct_guesses} open ports.\" + Style.RESET_ALL)\n time.sleep(1)\n clear_terminal()\n port_scanning()" }, { "identifier": "AmySystem", "path": "systems/level_1/amy/amy_system.py", "snippet": "class AmySystem:\n def __init__(self):\n self.files = [\n {\n \"name\": \"return_to_work_form.txt\",\n \"content\": (\n \"Employee Name: _______________\\n\"\n \"Employee ID: ____________\\n\"\n \"Department: _______________\\n\"\n \"Date of Return: ______\\n\\n\"\n \"I, [Employee Name], certify that I have followed the company's \"\n \"guidelines for returning to work after an absence. \"\n \"I understand that it is my responsibility to adhere to all safety \"\n \"protocols and procedures to ensure the health and well-being of my \"\n \"colleagues and myself.\\n\\n\"\n \"I acknowledge that I have completed any necessary training and have \"\n \"been briefed on any updates to the company's policies and procedures. \"\n \"I am aware that I must report any symptoms or exposure to COVID-19 to \"\n \"my supervisor immediately.\\n\\n\"\n \"I am committed to doing my part to maintain a safe and healthy work \"\n \"environment for everyone. I will continue to follow all guidelines \"\n \"and protocols and will cooperate with any additional measures that \"\n \"may be implemented in the future.\\n\\n\"\n \"Signature: [Employee Signature]\\n\"\n \"Date: [Date]\"\n )\n },\n {\n \"name\": \"employee_handbook.txt\",\n \"content\": (\n \"Welcome to Enigma Corps We are thrilled to have you as part of our \"\n \"team. This employee handbook has been designed to help you understand \"\n \"our company's policies, procedures, and expectations.\\n\\n\"\n \"Our company is committed to fostering a positive and inclusive work \"\n \"environment where all employees feel valued and supported. We believe \"\n \"in treating everyone with respect and dignity and expect all employees \"\n \"to do the same.\\n\\n\"\n \"In this handbook, you will find information on topics such as:\\n\\n\"\n \"- Code of Conduct\\n\"\n \"- Dress Code\\n\"\n \"- Attendance and Punctuality\\n\"\n \"- Time Off and Leave Policies\\n\"\n \"- Performance Evaluations\\n\"\n \"- Health and Safety\\n\"\n \"- Equal Employment Opportunity\\n\"\n \"- Harassment and Discrimination\\n\\n\"\n \"Please take the time to read through this handbook carefully and \"\n \"familiarize yourself with our policies and procedures. If you have any \"\n \"questions or concerns, do not hesitate to reach out to your supervisor \"\n \"or the HR department.\\n\\n\"\n \"We look forward to working with you and hope you have a long and \"\n \"successful career with Enigma Corps!\"\n )\n },\n {\n \"name\": \"benefits_summary.txt\",\n \"content\": (\n \"At Enigma Corps, we believe in taking care of our employees and \"\n \"offer a comprehensive benefits package to support your health, well-being, \"\n \"and financial security. Below is a summary of the benefits available to \"\n \"you as an employee of Enigma Corps.\\n\\n\"\n \"Health Insurance: We offer a choice of medical, dental, and vision \"\n \"plans to meet your needs. Our plans provide coverage for preventive care, \"\n \"hospitalization, prescription drugs, and more.\\n\\n\"\n \"Retirement Savings: We offer a 401(k) plan with a generous company \"\n \"match to help you save for your future. You can choose from a variety of \"\n \"investment options to suit your needs.\\n\\n\"\n \"Paid Time Off: We provide a generous amount of paid time off, \"\n \"including vacation, sick leave, and holiday pay. We also offer paid \"\n \"parental leave for new parents.\\n\\n\"\n \"Flexible Work Arrangements: We understand the importance of work-life \"\n \"balance and offer flexible work arrangements, such as remote work and \"\n \"flexible schedules, where possible.\\n\\n\"\n \"Wellness Programs: We offer a variety of wellness programs and \"\n \"resources to support your physical and mental health, including fitness \"\n \"classes, stress management programs, and counseling services.\\n\\n\"\n \"Professional Development: We are committed to supporting your growth \"\n \"and development and offer a variety of training and development \"\n \"opportunities, including tuition reimbursement, workshops, and seminars.\"\n \"\\n\\n\"\n \"We encourage you to review this summary carefully and take advantage of \"\n \"the benefits available to you. If you have any questions or need further \"\n \"information, please contact the HR department.\"\n )\n },\n ]\n self.emails = [\n {\n \"sender\": \"Amy\",\n \"subject\": \"Can't Stop Thinking About You\",\n \"body\": (\n \"Hey Billy,\\n\\n\"\n \"I hope this message finds you in good spirits. I've been meaning to write to you for a while now, but I couldn't find the right words to express what I've been feeling.\\n\\n\"\n \"Ever since that night we spent together, I can't seem to get you out of my mind. There's something about the way you make me feel that I've never experienced before. \"\n \"\\nIt's exhilarating, yet terrifying all at the same time.\\n\\n\"\n \"I know we both have a lot on our plates right now, and I don't want to add any more stress to your life. But I can't help but wonder what could happen if we gave this a real shot. \"\n \"I know it's complicated, and there are a lot of factors to consider, but I think we owe it to ourselves to explore this connection we have.\\n\\n\"\n \"I understand if you're not ready to take that step, and I don't want to pressure you into anything you're not comfortable with. \"\n \"\\nBut I can't shake the feeling that we could have something truly special together.\\n\\n\"\n \"I'd love to hear your thoughts on this, and I'm more than willing to take things slow if that's what you need. Maybe we could meet up for dinner and talk about it in person?\"\n \" I think it would be easier to have this conversation face-to-face.\\n\\n\"\n \"I hope you're doing well, and I look forward to hearing from you soon.\\n\\n\"\n \"Take care,\\n\"\n \"Amy\"\n )\n },\n {\n \"sender\": \"Amy\",\n \"subject\": \"Need Your Help on the Smith Project\",\n \"body\": (\n \"Hi Billy,\\n\\n\"\n \"I hope this email finds you well. I wanted to reach out and ask for your help on the Smith project. I've been having some trouble with the data analysis portion,\"\n \"\\nand I know you have a lot of experience in that area.\\n\\n\"\n \"The project involves analyzing customer feedback data to identify trends and areas for improvement. I've been working on it for a few weeks now, but I'm finding it challenging to make sense of the data and\"\n \"\\ndraw meaningful conclusions.\\n\\n\"\n \"Would you be available for a quick meeting later this week to go over some of the data with me? I would really appreciate your input and guidance on this. \"\n \"\\nI think your expertise could really help me make progress and ensure the success of the project.\\n\\n\"\n \"If you're available, please let me know your preferred date and time, and I'll send out a calendar invite. I'm flexible and can work around your schedule.\\n\\n\"\n \"Thank you in advance for your help, and I look forward to hearing from you soon.\\n\\n\"\n \"Best,\\n\"\n \"Amy\"\n )\n },\n {\n \"sender\": \"Amy\",\n \"subject\": \"Request for Time Off\",\n \"body\": (\n \"Good Afternoon Katie,\\n\\n\"\n \"I hope this email finds you well. I wanted to request some time off next month for a family vacation. I am planning to be out of the office from 10/09/2024 to 18/09/2024\\n\\n\"\n \"I have been working hard on the Johnson project and have made significant progress. I will make sure to finish up any outstanding work and hand off any ongoing projects to my colleagues before I leave. I will also be available by email in case of any urgent matters.\\n\\n\"\n \"I understand that this is a busy time for the team, and I want to ensure that my absence doesn't cause any disruptions. I have already spoken to Markus and he has kindly agreed to cover for me while I'm away.\\n\\n\"\n \"Thank you for considering my request. I look forward to spending some quality time with my family and coming back to work refreshed and recharged.\"\n \"\\nI am confident that the time off will help me come back with renewed energy and focus.\\n\\n\"\n \"Best,\\n\"\n \"Amy\"\n )\n },\n {\n \"sender\": \"Amy\",\n \"subject\": \"Apology for the Mistake\",\n \"body\": (\n \"Good Morning Kyle,\\n\\n\"\n \"I hope this email finds you well. I wanted to reach out and apologize for the mistake I made on the Johnson report. I realize now that I overlooked some important data, and I take full responsibility for it.\\n\\n\"\n \"I have gone back and corrected the report, and I will make sure to double-check my work in the future to avoid any similar mistakes. I have also attached the updated report for your reference.\\n\\n\"\n \"I understand if you are disappointed or frustrated, and I am more than willing to do whatever it takes to make it right. Please let me know if there's anything else I can do to fix this,\"\n \"\\nor if you would like to discuss this further.\\n\\n\"\n \"Once again, I am truly sorry for the mistake, and I appreciate your understanding. I value our working relationship and hope that this incident doesn't tarnish it. I am committed to making amends and ensuring that this doesn't happen again in the future.\\n\\n\"\n \"Best,\\n\"\n \"Amy\"\n )\n },\n {\n \"sender\": \"Amy\",\n \"subject\": \"Thank You for Letting Me Use Your Computer\",\n \"body\": (\n \"Hey Billy,\\n\\n\"\n \"I wanted to take a moment to express my gratitude for allowing me to use your computer while mine was being serviced by IT. \"\n \"It was a huge help and allowed me to stay productive during that time.\\n\\n\"\n \"I also noticed that your password is 'football'. While I understand it's easy to remember, it's important to choose a more secure password to protect your accounts.\"\n \"\\nI would recommend changing it to something more complex and unique. You never know who's watching after all.\\n\\n\"\n \"Thanks again for your generosity and understanding.\\n\\n\"\n \"Best,\\n\"\n \"Amy\"\n )\n }\n ]\n\n def list_files(self):\n print_slow(\"\\nFiles:\")\n for file in self.files:\n print_slow(f\"\\n{file['name']}\")\n\n def read_file(self, file_name):\n file_found = False\n for file in self.files:\n if file['name'] == file_name:\n file_found = True\n return file['content']\n\n if not file_found:\n print_slow(\"\\nNo file found with that name, please try again.\")\n return None\n\n def list_emails(self):\n print_slow(\"\\nEmails:\")\n for i, email in enumerate(self.emails):\n print_slow(f\"\\n{email['subject']} - From: {email['sender']}\")\n\n def read_email(self, subject):\n for email in self.emails:\n if email['subject'].lower() == subject.lower():\n print_slow(f\"\\nFrom: {email['sender']}\\nSubject: {email['subject']}\\n\\n{email['body']}\")\n return\n print_slow(\"\\nNo email found with that subject, please try again.\")" }, { "identifier": "BillySystem", "path": "systems/level_1/billy/billy_system.py", "snippet": "class BillySystem:\n def __init__(self):\n self.files = [\n {\n \"name\": \"cover_letter.txt\",\n \"content\": (\n \"Dear Hiring Manager,\\n\\n\"\n \"I am writing to express my interest in the management position at Enigma Corps. \"\n \"I have been with the company for over 7 years and have consistently demonstrated my commitment to driving excellence and fostering collaboration within the team.\\n\\n\"\n \"During my tenure at Enigma Corps, I have been involved in various projects, including the successful completion of the Q3 deliverables project, where I played a key role in the planning and execution stages. \"\n \"My dedication to achieving project milestones and my ability to work under pressure make me a strong candidate for a management role.\\n\\n\"\n \"I possess strong leadership skills, which I have honed through my experiences in leading teams and coordinating cross-functional efforts. \"\n \"My ability to communicate effectively and build positive relationships with team members and stakeholders has resulted in successful project outcomes and increased productivity.\\n\\n\"\n \"In addition to my technical and leadership skills, I am also committed to continuous learning and professional development. \"\n \"I have participated in various training programs and workshops to enhance my management skills and stay up-to-date with industry trends and best practices.\\n\\n\"\n \"I am excited about the opportunity to contribute to the growth and success of Enigma Corps as a member of the management team. \"\n \"I am confident that my skills and experience will be valuable assets to the company, and I look forward to the opportunity to work closely with the team to drive innovation and excellence.\\n\\n\"\n \"Thank you for considering my application. I am looking forward to the opportunity to discuss my qualifications further and explore how I can contribute to the success of Enigma Corps.\\n\\n\"\n \"Sincerely,\\n\"\n \"Billy Constantine\\n\"\n )\n },\n {\n \"name\": \"employee_handbook.txt\",\n \"content\": (\n \"Welcome to Enigma Corps We are thrilled to have you as part of our \"\n \"team. This employee handbook has been designed to help you understand \"\n \"our company's policies, procedures, and expectations.\\n\\n\"\n \"Our company is committed to fostering a positive and inclusive work \"\n \"environment where all employees feel valued and supported. We believe \"\n \"in treating everyone with respect and dignity and expect all employees \"\n \"to do the same.\\n\\n\"\n \"In this handbook, you will find information on topics such as:\\n\\n\"\n \"- Code of Conduct\\n\"\n \"- Dress Code\\n\"\n \"- Attendance and Punctuality\\n\"\n \"- Time Off and Leave Policies\\n\"\n \"- Performance Evaluations\\n\"\n \"- Health and Safety\\n\"\n \"- Equal Employment Opportunity\\n\"\n \"- Harassment and Discrimination\\n\\n\"\n \"Please take the time to read through this handbook carefully and \"\n \"familiarize yourself with our policies and procedures. If you have any \"\n \"questions or concerns, do not hesitate to reach out to your supervisor \"\n \"or the HR department.\\n\\n\"\n \"We look forward to working with you and hope you have a long and \"\n \"successful career with Enigma Corps!\"\n )\n },\n {\n \"name\": \"meeting_minutes.txt\",\n \"content\": (\n \"Meeting Minutes\\n\\n\"\n \"Date: 24/06/2025\\n\"\n \"Location: REDACTED\\n\"\n \"Attendees: Amy, REDACTED, Billy, Kyle, REDACTED, REDACTED, REDACTED\\n\\n\"\n \"Agenda:\\n\"\n \"- Discuss progress on Project REDACTED\\n\"\n \"- Review safety protocols for handling sensitive materials\\n\"\n \"- Plan next steps for research and development\\n\\n\"\n \"Action Items:\\n\"\n \"- Compile data from recent experiments and share with team\\n\"\n \"- Schedule training session on updated safety protocols\\n\"\n \"- Develop timeline for next phase of Project X\\n\\n\"\n \"Next Meeting: 05/08/24, 12:00pm\\n\"\n )\n },\n {\n \"name\": \"employee_performance_review.txt\",\n \"content\": (\n \"Employee Performance Review\\n\\n\"\n \"Employee Name: Billy Constantine\\n\"\n \"Employee ID: 035854\\n\"\n \"Review Date: 28/06/2024\\n\\n\"\n \"Performance Summary:\\n\"\n \"Billy has demonstrated exceptional performance in his role as a sales representative. He has consistently exceeded sales targets, built strong relationships with clients, and demonstrated leadership qualities in team meetings and projects.\\n\\n\"\n \"Strengths:\\n\"\n \"- Exceeded quarterly sales targets by 15%.\\n\"\n \"- Successfully onboarded and mentored two new team members.\\n\"\n \"- Demonstrated excellent communication and negotiation skills.\\n\\n\"\n \"Areas for Improvement:\\n\"\n \"- Time management skills can be further developed to ensure all tasks are completed in a timely manner.\\n\"\n \"- Continued development of technical knowledge to stay up-to-date with industry trends.\\n\"\n \"- Strengthen collaboration with cross-functional teams to drive more integrated solutions.\\n\\n\"\n \"Goals for Next Review Period:\\n\"\n \"- Increase sales targets by 20%.\\n\"\n \"- Complete a management training program.\\n\"\n \"- Improve time management skills through prioritization and delegation.\\n\\n\"\n \"Overall Rating: 4.5/5\\n\"\n \"Reviewer Name: Katie Thompson\\n\"\n \"Reviewer Signature: Katie Thompson\\n\"\n \"Date: 28/06/2024\\n\"\n )\n }\n ]\n self.emails = [\n\n {\n \"sender\": \"Billy\",\n \"subject\": \"Re: Need Your Help on the Smith Project\",\n \"body\": (\n \"Hi Amy,\\n\\n\"\n \"I hope this message finds you in great spirits! I'm more than happy to lend a helping hand with the Smith project. After all, two heads are better than one, especially when it comes to data analysis, right?\\n\\n\"\n \"How about we grab a coffee and chat about the project in person? I think it would be nice to catch up and discuss the data over a cup of joe. I'm sure we can brainstorm some ideas and come up with a game plan together.\\n\\n\"\n \"I'm free [date] at [time], does that work for you? If not, just let me know your availability, and we can find a time that suits us both. I'm really looking forward to our coffee date and tackling the project together.\\n\\n\"\n \"Can't wait to see you and dive into the data!\\n\\n\"\n \"Best,\\n\"\n \"Billy\"\n )\n },\n {\n \"sender\": \"Billy\",\n \"subject\": \"Project Update\",\n \"body\": (\n \"Hello Team,\\n\\n\"\n \"I wanted to provide everyone with a quick update on our progress with the Q3 deliverables project. We've successfully completed the initial research phase and are now moving into the planning stage.\\n\\n\"\n \"In our last meeting, we discussed the following key points:\\n\"\n \"- Compound Analysis: We've identified a unique compound with potential applications in various industries. Further testing and analysis are required to unlock its full potential.\\n\"\n \"- Resource Management: We've allocated a special team and dedicated resources to handle the delicate nature of this project, ensuring utmost confidentiality and security.\\n\"\n \"- Safety Protocols: We've developed strict safety protocols to handle the compound, and we're conducting regular training sessions to ensure compliance.\\n\\n\"\n \"Our next steps include finalizing the project plan, assigning tasks to team members, and setting deadlines. I would appreciate input and feedback from all team members to ensure we're on the right track. Please review the attached project plan document for more details.\\n\\n\"\n \"Additionally, I want to remind everyone of the confidential nature of this project. It's imperative that we maintain discretion and follow all security protocols to safeguard our work. Let's work together to make this project a success and uphold the company's reputation for innovation and excellence.\\n\\n\"\n \"If you have any questions or concerns, please don't hesitate to reach out. Your cooperation and commitment to this project are greatly appreciated.\\n\\n\"\n \"Best regards,\\n\"\n \"Billy\"\n )\n },\n {\n \"sender\": \"Billy\",\n \"subject\": \"Re: Can't Stop Thinking About You\",\n \"body\": (\n \"Hey there, Amy,\\n\\n\"\n \"Wow, your message really caught me by surprise! But in the best way possible, of course. I've been trying to play it cool, but I have to admit, I've been thinking about that night a lot too. There was just something electric in the air, wasn't there?\\n\\n\"\n \"I've been tossing and turning, wondering if I should reach out to you or if I should wait for you to make the first move. I guess you beat me to it, and I'm glad you did. It's like you read my mind.\\n\\n\"\n \"I can't deny that there's a certain chemistry between us, and I'm intrigued to see where it could lead. I agree that our lives are complicated, and we don't want to add more stress to each other's plates. But sometimes, taking a risk is what makes life exciting, don't you think?\\n\\n\"\n \"I don't want to rush things or make you feel pressured in any way. I'm more than happy to take things slow and let them unfold naturally. But I can't help but imagine the possibilities if we give this a real shot. We could have something truly special, and I don't want to let that pass us by.\\n\\n\"\n \"How about we meet up for dinner and drinks next week? We can talk about it more and see where the night takes us. I think it would be a fun and relaxed way to get to know each other better and explore this connection we have. What do you say?\\n\\n\"\n \"I hope you're doing well, and I'm eagerly awaiting your reply. Until then, I'll be daydreaming about our next encounter.\\n\\n\"\n \"Take care, and talk to you soon.\\n\\n\"\n \"Yours truly,\\n\"\n \"Billy\"\n )\n },\n {\n \"sender\": \"Billy\",\n \"subject\": \"Re: Thank You for Letting Me Use Your Computer\",\n \"body\": (\n \"Hey Amy,\\n\\n\"\n \"No problem at all! I'm always here to help out when I can. It's what teammates do, right?\\n\\n\"\n \"Oh, and about the password thing – haha, I know it's not the most secure choice. I've been meaning to change it, but I guess old habits die hard, right? \"\n \"Thanks for looking out for me though! I'll try to come up with something a bit more creative next time.\\n\\n\"\n \"If you ever need anything else, just give me a shout. Happy to help!\\n\\n\"\n \"Take care,\\n\"\n \"Billy\"\n )\n },\n {\n \"sender\": \"Billy\",\n \"subject\": \"Professional Development\",\n \"body\": (\n \"Good Evening Katie,\\n\\n\"\n \"I hope this email finds you well. I'm reaching out to express my interest in professional development opportunities within the company, particularly in the area of management and leadership.\\n\\n\"\n \"I've been with the company for several years now, and I've had the chance to work on various projects and collaborate with different teams. I'm keen to build on this experience and take on more responsibility, and I believe that acquiring the necessary skills for a management role would be a great next step in my career.\\n\\n\"\n \"Could you please provide information on available training programs, workshops, or seminars that focus on leadership development and management skills? I'm particularly interested in areas such as team leadership, strategic planning, conflict resolution, and decision-making.\\n\\n\"\n \"Additionally, if there are any tuition reimbursement programs or resources for management training and certification, I'd like to learn more about them. I'm committed to investing time and effort in my professional growth and believe that these opportunities would greatly benefit both myself and the company.\\n\\n\"\n \"Your guidance and assistance in exploring these options would be greatly appreciated. I look forward to your response and any recommendations you may have.\\n\\n\"\n \"Thank you for your support, and I'm excited about the prospect of contributing to the company's success in a management role.\\n\\n\"\n \"Best regards,\\n\"\n \"Billy\"\n )\n }\n ]\n\n def list_files(self):\n print_slow(\"\\nFiles:\")\n for file in self.files:\n print_slow(f\"\\n{file['name']}\")\n\n def read_file(self, file_name):\n file_found = False\n for file in self.files:\n if file['name'] == file_name:\n file_found = True\n return file['content']\n\n if not file_found:\n print_slow(\"\\nNo file found with that name, please try again.\")\n return None\n\n def list_emails(self):\n print_slow(\"\\nEmails:\")\n for i, email in enumerate(self.emails):\n print_slow(f\"\\n{email['subject']} - From: {email['sender']}\")\n\n def read_email(self, subject):\n for email in self.emails:\n if email['subject'].lower() == subject.lower():\n print_slow(f\"\\nFrom: {email['sender']}\\nSubject: {email['subject']}\\n\\n{email['body']}\")\n return\n print_slow(\"\\nNo email found with that subject, please try again.\")" }, { "identifier": "camera_first", "path": "systems/level_1/cameras/camera_1.py", "snippet": "def camera_first():\n print(camera_1)\n print()\n print()\n move = input(Fore.GREEN + \"> \" + Style.RESET_ALL)\n\n if move.lower() == \"forward\":\n clear_terminal()\n camera_second()\n elif move.lower() == \"back\":\n print(Fore.RED + \"There is nothing to go back to...\" + Style.RESET_ALL)\n time.sleep(2)\n clear_terminal()\n camera_first()" }, { "identifier": "MarkusSystem", "path": "systems/level_1/markus/markus_system.py", "snippet": "class MarkusSystem:\n def __init__(self):\n self.files = [\n {\n \"name\": \"system_log.txt\",\n \"content\": (\n \"Enigma Corps System Log\\n\\n\"\n \"Date: 2023-11-16 08:00 AM\\n\"\n \"Event Type: System Startup\\n\"\n \"Description: The Enigma Corps systems smoothly initiated startup procedures, ensuring a seamless beginning to the workday.\\n\\n\"\n \"Date: 2023-11-16 10:30 AM\\n\"\n \"Event Type: Network Upgrade\\n\"\n \"Description: Implemented a network upgrade to enhance data transfer speeds, providing improved efficiency across departments.\\n\\n\"\n \"Date: 2023-11-16 01:45 PM\\n\"\n \"Event Type: Security Patch Applied\\n\"\n \"Description: Critical security patch successfully applied to safeguard against potential vulnerabilities, ensuring system integrity.\\n\\n\"\n \"Date: 2023-11-16 04:20 PM\\n\"\n \"Event Type: Server Maintenance\\n\"\n \"Description: Conducted routine maintenance on Enigma Corps servers, optimizing performance and minimizing downtime.\\n\\n\"\n \"This dynamic system log captures key events, from the smooth startup of the day to network upgrades, security enhancements, and routine maintenance. It serves as a valuable record for troubleshooting and analysis, ensuring the optimal functionality of Enigma Corps systems.\"\n )\n },\n {\n \"name\": \"technical_documentation.docx\",\n \"content\": (\n \"Enigma Corps System Technical Documentation\\n\\n\"\n \"1. System Architecture:\\n\"\n \" - Overview of the system's structural design and components.\\n\\n\"\n \"2. Network Configuration:\\n\"\n \" - Details on the configuration of Enigma Corps' network setup for efficient communication.\\n\\n\"\n \"3. Security Protocols:\\n\"\n \" - Comprehensive overview of security measures and protocols implemented to safeguard sensitive data.\\n\\n\"\n \"4. Troubleshooting Guide:\\n\"\n \" - Step-by-step guide for identifying and resolving common issues to ensure seamless system functionality.\\n\\n\"\n \"5. Software Installation Procedures:\\n\"\n \" - Instructions for installing and updating software components within the Enigma Corps system.\\n\\n\"\n \"6. Hardware Specifications:\\n\"\n \" - Detailed specifications of the hardware components utilized in the Enigma Corps infrastructure.\\n\\n\"\n \"This meticulously crafted technical documentation serves as a go-to resource for understanding the Enigma Corps system, covering everything from its architecture and network configuration to security protocols, troubleshooting, and hardware specifications. It's an invaluable reference for maintaining optimal system performance.\"\n )\n },\n {\n \"name\": \"passwords.txt\",\n \"content\": (\n \"Sensitive Password Information for Enigma Corps\\n\\n\"\n \"Admin Password: *********\\n\"\n \"Database Password: *********\\n\"\n \"Router Password: *********\\n\"\n \"WiFi Password: *********\\n\"\n \"Encryption Key: *********\\n\\n\"\n \"Warning: This file contains confidential information. Keep it secure, and refrain from sharing passwords without explicit authorization. Safeguarding this information is crucial to maintaining the security and integrity of the Enigma Corps systems.\"\n )\n },\n {\n \"name\": \"software_inventory.csv\",\n \"content\": (\n \"Software Inventory for Enigma Corps\\n\\n\"\n \"Software Name, Version, License Key\\n\"\n \"1. Enigma Security Suite, v2.0, X1Y2Z3A4-B5C6D7E8-F9G0H1I2\\n\"\n \"2. DataGuard Backup, v1.5, Y3X2W1V0-U9T8S7R6-Q5P4O3N2\\n\"\n \"3. Office Suite, v2022, Z9Z8Z7Z6-Z5Z4Z3Z2-Z1Z0Z9Z8-Z7Z6Z5\\n\"\n \"4. VPN Client, v3.1, W6W5W4W3-W2W1W0-W9W8W7-W6W5W4\\n\"\n \"5. Project Management Tool, v4.2, VV8V7V6V5-V4V3V2V1-V0V9V8V7-V6V5V4\\n\\n\"\n \"Important: This inventory is crucial for tracking and managing software across Enigma Corps systems. The provided license keys are randomized for security reasons. Handle this information responsibly, and ensure it is only accessible to authorized personnel to maintain the security and compliance of our software assets.\"\n )\n }\n ]\n self.emails = [\n # Email to Management\n {\n \"sender\": \"Markus\",\n \"subject\": \"System Maintenance Scheduled\",\n \"body\": (\n \"Dear Michael,\\n\\n\"\n \"I hope this email finds you well. We wanted to inform you that we have scheduled a system maintenance session for the upcoming weekend to ensure the optimal performance and security of our systems.\\n\\n\"\n \"Maintenance Details:\\n\"\n \"- Date: 16/12/23 - 17/12/23\\n\"\n \"- Time: 3:00pm\\n\"\n \"- Duration: 1 Hour\\n\"\n \"- Impact: No impact expected\\n\\n\"\n \"During this period, there might be temporary disruptions in certain services. Our team will be working diligently to minimize any inconvenience. If you have any concerns or specific considerations, please feel free to reach out to us.\\n\\n\"\n \"Thank you for your understanding and cooperation.\\n\\n\"\n \"Best regards,\\n\"\n \"IT Department\"\n )\n },\n {\n # Email to Employees\n \"sender\": \"Markus\",\n \"subject\": \"Upcoming Software Update\",\n \"body\": (\n \"Good afternoon, Kyle,\\n\\n\"\n \"We hope you're doing well. Our IT team is excited to inform you about an upcoming software update that will enhance the functionality and security of our systems. The update is scheduled for [Date] at [Time]. Please take note of the following details:\\n\\n\"\n \"- Expected Duration: Two Days\\n\"\n \"- Action Required: As this will be processed during the weekend, no action is required.\\n\"\n \"- Impact: While we anticipate minimal impact on your day-to-day activities, it's essential to be aware of any potential changes. These include: New UI to navigate, logging in or logging out issues.\\n\\n\"\n \"We recommend saving your work and logging out of your system before the update. If you encounter any issues post-update, don't hesitate to contact our IT support team for assistance.\\n\\n\"\n \"Thank you for your cooperation and understanding.\\n\\n\"\n \"Best regards,\\n\"\n \"IT Support Team\"\n )\n },\n # Email from Markus to Billy\n {\n \"sender\": \"Markus\",\n \"subject\": \"Urgent: Password Security Update Required\",\n \"body\": (\n \"Billy,\\n\\n\"\n \"I hope this email finds you well. I wanted to bring to your attention the importance of updating your current password. This is not the first time I've raised this concern, and I want to emphasize its critical nature.\\n\\n\"\n \"In recent security assessments, it has been flagged that your current password might not meet the latest security standards. To ensure the safety of your account and our overall cybersecurity, it is imperative that you change your password promptly.\\n\\n\"\n \"I understand that these reminders may seem repetitive, but they stem from a genuine concern for the security of your account and our collective responsibility in maintaining a robust cybersecurity posture.\\n\\n\"\n \"Please take a moment at your earliest convenience to update your password. If you encounter any issues or have questions, feel free to reach out. Your cooperation is greatly appreciated.\\n\\n\"\n \"Best regards,\\n\"\n \"Markus, Security Team\"\n )\n }\n\n ]\n\n def list_files(self):\n print_slow(\"\\nFiles:\")\n for file in self.files:\n print_slow(f\"\\n{file['name']}\")\n\n def read_file(self, file_name):\n file_found = False\n for file in self.files:\n if file['name'] == file_name:\n file_found = True\n return file['content']\n\n if not file_found:\n print_slow(\"\\nNo file found with that name, please try again.\")\n return None\n\n def list_emails(self):\n print_slow(\"\\nEmails:\")\n for i, email in enumerate(self.emails):\n print_slow(f\"\\n{email['subject']} - From: {email['sender']}\")\n\n def read_email(self, subject):\n for email in self.emails:\n if email['subject'].lower() == subject.lower():\n print_slow(f\"\\nFrom: {email['sender']}\\nSubject: {email['subject']}\\n\\n{email['body']}\")\n return\n print_slow(\"\\nNo email found with that subject, please try again.\")" } ]
import msvcrt import os import pickle import sys import time import colorama import pygame from colorama import Fore, Style from components.common_functions import clear_terminal, print_slow, shop_help, help_user, connect_help, mail_help, \ system_help from conversations.calls import intro_call, first_call, second_call, third_call, fourth_call, fifth_call, sixth_call, \ markus_seen_call from conversations.minigame_calls import code_shatter_call from minigames.code_shatter_minigame import code_shatter_minigame from minigames.eye_spy_minigame import port_scanning from systems.level_1.amy.amy_system import AmySystem from systems.level_1.billy.billy_system import BillySystem from systems.level_1.cameras.camera_1 import camera_first from systems.level_1.markus.markus_system import MarkusSystem
15,253
def save_game(): global inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus with open('savegame.pkl', 'wb') as f: pickle.dump( (inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus), f) # Load the game state from a file def load_game(): global inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus if os.path.exists('savegame.pkl'): with open('savegame.pkl', 'rb') as f: inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus = pickle.load( f) else: # If the savegame file doesn't exist, set the default values inventory = [] player_level = 1 evidence = [] has_intro_call = False has_started_game = False seen_markus = False balance = 30000 emails = [ { "sender": "Hacker's Digest", "subject": "Weekly Hacker's Digest", "body": ( "Issue #143\n\n" "Cipher,\n\n" "Welcome to the latest edition of Hacker's Digest! In this issue: \n\n" "- Unveiling the Latest Exploits\n" "- Spotlight on Cryptocurrency Security\n" "- Interview with a Grey Hat Hacker\n" "- Tool of the Week: EnigmaLink\n\n" "Don't miss out on the latest in the world of hacking and cybersecurity. Stay informed and stay secure!\n\n" "Best regards,\n" "Hacker's Digest Team" ) }, { "sender": "The Cyber Mythbuster", "subject": "Busting Cybersecurity Myths", "body": ( "Cipher,\n\n" "Heard any wild cybersecurity myths lately? This week, we're busting the craziest ones, including:\n\n" "- Using 'Password123' for Maximum Security\n" "- Cyber Ninjas and Their Stealthy VPNs\n" "- USB Drives: The Fountain of Eternal Data\n\n" "Stay myth-free and keep on hacking (responsibly)!\n\n" "Mythbustingly,\n" "The Cyber Mythbuster" ) }, { "sender": "CyberSilliness", "subject": "Where Cyber Meets Comedy", "body": ( "Welcome to the CyberSilliness Gazette\n" "Where we believe that a good laugh is the ultimate antivirus! In this week's hilarity-packed issue:\n\n" "- Cyber Jokes to Crack You Up (Without Cracking Your Passwords)\n" "- Tech Support Horror Stories: A Comedy of Errors\n" "- Chuckle Challenge: Share Your Funniest Cybersecurity Anecdote\n" "- Meet the Cyber Clowns: Our Team's Silly Security Habits Revealed\n\n" "Laughter is contagious, and so is good cybersecurity. Dive into the giggles and stay safe!\n\n" "Silly Regards,\n" "The CyberSilliness Team" ) }, { "sender": "Security Insight Weekly", "subject": "Navigating the Cybersecurity Landscape", "body": ( "Hello Cipher,\n\n" "Welcome to Security Insight Weekly, your reliable source for navigating the ever-evolving cybersecurity landscape. In this week's issue:\n\n" "- Threat Analysis: Understanding Recent Cybersecurity Incidents\n" "- Best Practices for Endpoint Security\n" "- Industry Spotlight: Healthcare Cybersecurity Challenges\n" "- Security Compliance Update: Staying Aligned with Regulations\n\n" "Stay informed and empowered as we delve into the serious aspects of cybersecurity. Your security is our priority.\n\n" "Best regards,\n" "The Security Insight Team" ) }, ] # New function for game settings def game_settings(): global bg_music_enabled print_slow(Fore.GREEN + "░██████╗███████╗████████╗████████╗██╗███╗░░██╗░██████╗░░██████╗") print_slow(Fore.GREEN + "██╔════╝██╔════╝╚══██╔══╝╚══██╔══╝██║████╗░██║██╔════╝░██╔════╝") print_slow(Fore.GREEN + "╚█████╗░█████╗░░░░░██║░░░░░░██║░░░██║██╔██╗██║██║░░██╗░╚█████╗░") print_slow(Fore.GREEN + "░╚═══██╗██╔══╝░░░░░██║░░░░░░██║░░░██║██║╚████║██║░░╚██╗░╚═══██╗") print_slow(Fore.GREEN + "██████╔╝███████╗░░░██║░░░░░░██║░░░██║██║░╚███║╚██████╔╝██████╔╝") print_slow(Fore.GREEN + "╚═════╝░╚══════╝░░░╚═╝░░░░░░╚═╝░░░╚═╝╚═╝░░╚══╝░╚═════╝░╚═════╝░" + Style.RESET_ALL) print_slow("") print_slow("") print_slow("") print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) print_slow( Fore.GREEN + f"| [Background Music] {'Enabled |' if bg_music_enabled else 'Disabled |'}" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Delete Savegame] |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Back to Main Menu] |" + Style.RESET_ALL) print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) choice = input(Fore.GREEN + "\n> " + Style.RESET_ALL) if choice.lower() == "background music": # Toggle background music bg_music_enabled = not bg_music_enabled if bg_music_enabled: pygame.mixer.music.play(-1) print_slow(Fore.GREEN + "\nBackground Music Enabled" + Style.RESET_ALL) time.sleep(1)
# Set the PYGAME_HIDE_SUPPORT_PROMPT environment variable os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "1" # Initialize pygame mixer pygame.mixer.init() # Load the bg music file and loop it pygame.mixer.music.load('bg_music.mp3') pygame.mixer.music.play(-1) # sets the volume to 20% (change value to adjust) pygame.mixer.music.set_volume(0.2) # Define the global variables at the module level inventory = [] balance = 300 emails = [] has_read_email = False has_read_file = False has_intro_call = False seen_markus = False evidence = [] amy_system = AmySystem() billy_system = BillySystem() markus_system = MarkusSystem() bg_music_enabled = True player_level = 1 has_started_game = False # Save the game state to a file def save_game(): global inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus with open('savegame.pkl', 'wb') as f: pickle.dump( (inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus), f) # Load the game state from a file def load_game(): global inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus if os.path.exists('savegame.pkl'): with open('savegame.pkl', 'rb') as f: inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus = pickle.load( f) else: # If the savegame file doesn't exist, set the default values inventory = [] player_level = 1 evidence = [] has_intro_call = False has_started_game = False seen_markus = False balance = 30000 emails = [ { "sender": "Hacker's Digest", "subject": "Weekly Hacker's Digest", "body": ( "Issue #143\n\n" "Cipher,\n\n" "Welcome to the latest edition of Hacker's Digest! In this issue: \n\n" "- Unveiling the Latest Exploits\n" "- Spotlight on Cryptocurrency Security\n" "- Interview with a Grey Hat Hacker\n" "- Tool of the Week: EnigmaLink\n\n" "Don't miss out on the latest in the world of hacking and cybersecurity. Stay informed and stay secure!\n\n" "Best regards,\n" "Hacker's Digest Team" ) }, { "sender": "The Cyber Mythbuster", "subject": "Busting Cybersecurity Myths", "body": ( "Cipher,\n\n" "Heard any wild cybersecurity myths lately? This week, we're busting the craziest ones, including:\n\n" "- Using 'Password123' for Maximum Security\n" "- Cyber Ninjas and Their Stealthy VPNs\n" "- USB Drives: The Fountain of Eternal Data\n\n" "Stay myth-free and keep on hacking (responsibly)!\n\n" "Mythbustingly,\n" "The Cyber Mythbuster" ) }, { "sender": "CyberSilliness", "subject": "Where Cyber Meets Comedy", "body": ( "Welcome to the CyberSilliness Gazette\n" "Where we believe that a good laugh is the ultimate antivirus! In this week's hilarity-packed issue:\n\n" "- Cyber Jokes to Crack You Up (Without Cracking Your Passwords)\n" "- Tech Support Horror Stories: A Comedy of Errors\n" "- Chuckle Challenge: Share Your Funniest Cybersecurity Anecdote\n" "- Meet the Cyber Clowns: Our Team's Silly Security Habits Revealed\n\n" "Laughter is contagious, and so is good cybersecurity. Dive into the giggles and stay safe!\n\n" "Silly Regards,\n" "The CyberSilliness Team" ) }, { "sender": "Security Insight Weekly", "subject": "Navigating the Cybersecurity Landscape", "body": ( "Hello Cipher,\n\n" "Welcome to Security Insight Weekly, your reliable source for navigating the ever-evolving cybersecurity landscape. In this week's issue:\n\n" "- Threat Analysis: Understanding Recent Cybersecurity Incidents\n" "- Best Practices for Endpoint Security\n" "- Industry Spotlight: Healthcare Cybersecurity Challenges\n" "- Security Compliance Update: Staying Aligned with Regulations\n\n" "Stay informed and empowered as we delve into the serious aspects of cybersecurity. Your security is our priority.\n\n" "Best regards,\n" "The Security Insight Team" ) }, ] # New function for game settings def game_settings(): global bg_music_enabled print_slow(Fore.GREEN + "░██████╗███████╗████████╗████████╗██╗███╗░░██╗░██████╗░░██████╗") print_slow(Fore.GREEN + "██╔════╝██╔════╝╚══██╔══╝╚══██╔══╝██║████╗░██║██╔════╝░██╔════╝") print_slow(Fore.GREEN + "╚█████╗░█████╗░░░░░██║░░░░░░██║░░░██║██╔██╗██║██║░░██╗░╚█████╗░") print_slow(Fore.GREEN + "░╚═══██╗██╔══╝░░░░░██║░░░░░░██║░░░██║██║╚████║██║░░╚██╗░╚═══██╗") print_slow(Fore.GREEN + "██████╔╝███████╗░░░██║░░░░░░██║░░░██║██║░╚███║╚██████╔╝██████╔╝") print_slow(Fore.GREEN + "╚═════╝░╚══════╝░░░╚═╝░░░░░░╚═╝░░░╚═╝╚═╝░░╚══╝░╚═════╝░╚═════╝░" + Style.RESET_ALL) print_slow("") print_slow("") print_slow("") print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) print_slow( Fore.GREEN + f"| [Background Music] {'Enabled |' if bg_music_enabled else 'Disabled |'}" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Delete Savegame] |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Back to Main Menu] |" + Style.RESET_ALL) print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) choice = input(Fore.GREEN + "\n> " + Style.RESET_ALL) if choice.lower() == "background music": # Toggle background music bg_music_enabled = not bg_music_enabled if bg_music_enabled: pygame.mixer.music.play(-1) print_slow(Fore.GREEN + "\nBackground Music Enabled" + Style.RESET_ALL) time.sleep(1)
clear_terminal()
0
2023-11-06 09:52:13+00:00
24k
ziqi-zhang/TAOISM
python/test/test_conv.py
[ { "identifier": "register_layer", "path": "python/common_net.py", "snippet": "def register_layer(layer, name):\n layer.register_forward_hook(hooking_layer(name))\n layer.register_backward_hook(hooking_layer_backward(name))\n layer_names.append(name)" }, { "identifier": "register_weight_layer", "path": "python/common_net.py", "snippet": "def register_weight_layer(layer, name):\n register_layer(layer, name)\n layer_weight[name] = layer.weight\n linear_layer_names.append(name)" }, { "identifier": "get_layer_weight", "path": "python/common_net.py", "snippet": "def get_layer_weight(name):\n return layer_weight[name]" }, { "identifier": "get_layer_input", "path": "python/common_net.py", "snippet": "def get_layer_input(name):\n return layer_input[name]" }, { "identifier": "get_layer_weight_grad", "path": "python/common_net.py", "snippet": "def get_layer_weight_grad(name):\n return layer_weight[name].grad.data" }, { "identifier": "get_layer_output", "path": "python/common_net.py", "snippet": "def get_layer_output(name):\n return layer_output[name]" }, { "identifier": "get_layer_output_grad", "path": "python/common_net.py", "snippet": "def get_layer_output_grad(name):\n return layer_output_grad[name]" }, { "identifier": "get_layer_input_grad", "path": "python/common_net.py", "snippet": "def get_layer_input_grad(name):\n return layer_input_grad[name]" }, { "identifier": "GlobalTensor", "path": "python/enclave_interfaces.py", "snippet": "class GlobalTensor(object):\n cpu_tensor = {}\n gpu_tensors = {}\n encrypted_tensors = {}\n LinkedTags = {}\n InverseLinkedTags = {}\n IsInitEnclaveTensor = {}\n EnclaveInterface = None\n eid = None\n is_init_global_tensor = False\n\n @staticmethod\n def init():\n if GlobalTensor.is_init_global_tensor:\n return\n GlobalTensor.EnclaveInterface = EnclaveInterface()\n GlobalTensor.EnclaveInterface.init_enclave()\n GlobalTensor.is_init_global_tensor = True\n\n @staticmethod\n def destroy():\n GlobalTensor.EnclaveInterface.destroy_enclave()\n\n GlobalTensor.cpu_tensor = {}\n GlobalTensor.gpu_tensors = {}\n GlobalTensor.encrypted_tensors = {}\n GlobalTensor.LinkedTags = {}\n GlobalTensor.InverseLinkedTags = {}\n GlobalTensor.IsInitEnclaveTensor = {}\n GlobalTensor.EnclaveInterface = None\n GlobalTensor.eid = None\n GlobalTensor.is_init_global_tensor = False\n\n\n @staticmethod\n def get_eid():\n return GlobalTensor.EnclaveInterface.get_eid()\n\n @staticmethod\n def link_tags(tag1, tag2):\n if tag1 == tag2:\n return\n\n friends = []\n\n def add_friends(tag):\n nonlocal friends\n if tag in GlobalTensor.LinkedTags:\n its_leader_tag = GlobalTensor.LinkedTags[tag]\n if its_leader_tag in GlobalTensor.InverseLinkedTags:\n friends += GlobalTensor.InverseLinkedTags.pop(its_leader_tag)\n else:\n friends += [tag]\n\n add_friends(tag1)\n add_friends(tag2)\n leader_tag = min(friends)\n\n GlobalTensor.InverseLinkedTags[leader_tag] = friends\n for t in friends:\n if t in GlobalTensor.IsInitEnclaveTensor:\n raise ValueError(\"Tags must linked before tensor initialization\")\n GlobalTensor.LinkedTags[t] = leader_tag\n\n @staticmethod\n def get_remapped_tags(tag):\n return GlobalTensor.LinkedTags[tag] if tag in GlobalTensor.LinkedTags else tag\n\n @staticmethod\n def set_cpu(tag, tensor):\n GlobalTensor.cpu_tensor[tag] = tensor.to(torch.device(\"cpu\"))\n\n @staticmethod\n def set_gpu(tag, tensor):\n GlobalTensor.gpu_tensors[tag] = tensor\n\n @staticmethod\n def set_encrypted(tag, tensor):\n GlobalTensor.encrypted_tensors[tag] = tensor\n\n @staticmethod\n def get_cpu(tag):\n return GlobalTensor.cpu_tensor[tag]\n\n @staticmethod\n def get_gpu(tag):\n return GlobalTensor.gpu_tensors[tag]\n\n @staticmethod\n def get_encryption(tag):\n return GlobalTensor.encrypted_tensors[tag]\n\n @staticmethod\n def init_enclave_tensor(tag, size):\n size = list(size)\n if len(size) < 4:\n size = [1] * (4 - len(size)) + size\n remapped_tag = GlobalTensor.get_remapped_tags(tag)\n if remapped_tag in GlobalTensor.IsInitEnclaveTensor:\n return\n else:\n GlobalTensor.IsInitEnclaveTensor[remapped_tag] = True\n eid = GlobalTensor.get_eid()\n GlobalTensor.EnclaveInterface.lib.InitTensor(eid, remapped_tag, size[0], size[1], size[2], size[3])\n\n @staticmethod\n def init_encrypted_tensor(tag, shape):\n GlobalTensor.encrypted_tensors[GlobalTensor.get_remapped_tags(tag)] = \\\n GlobalTensor.EnclaveInterface.create_encrypt_torch(shape)" }, { "identifier": "SecretBatchNorm2dLayer", "path": "python/layers/batch_norm_2d.py", "snippet": "class SecretBatchNorm2dLayer(SecretActivationLayer):\n # https://pytorch.org/docs/stable/nn.html#batchnorm2d\n\n BatchSize = None\n NumChannel = None\n ImgH = None\n ImgW = None\n WeightShape = None\n def __init__(\n self, sid, LayerName, EnclaveMode, link_prev=True, link_next=True,\n manually_register_prev=False, manually_register_next=False, merge_own_tensors=False\n ):\n \n super().__init__(\n sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next, merge_own_tensors\n )\n \n self.ForwardFuncName = \"BatchNorm2d\"\n self.BackwardFuncName = \"DerBatchNorm2d\"\n self.PlainFunc = torch.nn.BatchNorm2d\n self.IsAffine = True\n self.momentum = 0.1\n self.IsCumulative = (self.momentum is None)\n self.epsilon = 1e-5\n\n if EnclaveMode is ExecutionModeOptions.CPU or EnclaveMode is ExecutionModeOptions.GPU:\n self.ForwardFunc = torch.nn.BatchNorm2d\n # if self.is_enclave_mode:\n # self.StoreInEnclave = True\n # else:\n # self.ForwardFunc = torch.nn.BatchNorm2d\n # self.StoreInEnclave = False\n \n\n def init_shape(self):\n self.InputShape = self.PrevLayer.get_output_shape()\n self.OutputShape = self.InputShape\n self.BatchSize, self.NumChannel, self.ImgH, self.ImgW = self.InputShape\n self.WeightShape = [self.NumChannel]\n self.LearnableParamsList = [\n LearnableParamTuple(dw_name=\"DerWeight\", w_name=\"weight\", shape=self.WeightShape),\n LearnableParamTuple(dw_name=\"DerBias\", w_name=\"bias\", shape=self.WeightShape),\n ]\n \n\n # def init(self, start_enclave=True):\n \n # if self.sid == 2:\n # return\n # TensorLoader.init(self, start_enclave)\n\n # if self.is_enclave_mode:\n # self.PlainFunc = self.PlainFunc(self.InputShape[1])\n # self.PlainFunc.eval()\n # self.get_cpu(\"weight\").data.copy_(self.PlainFunc.weight.data)\n # self.get_cpu(\"bias\").data.copy_(self.PlainFunc.bias.data)\n # self.get_cpu(\"RunMean\").data.copy_(self.PlainFunc.running_mean.data)\n # # inject sqrt(running_var) instead of running_var for precision\n # self.get_cpu(\"RunVar\").data.copy_(self.PlainFunc.running_var.data)\n # self.transfer_cpu_to_enclave(\"weight\")\n # self.transfer_cpu_to_enclave(\"bias\")\n # self.transfer_cpu_to_enclave(\"RunMean\")\n # self.transfer_cpu_to_enclave(\"RunVar\")\n # self.batchnorm_init(\n # self.LayerName,\n # \"input\", \"output\", \"weight\", \"bias\",\n # \"DerInput\", \"DerOutput\", \"DerWeight\", \"DerBias\",\n # \"RunMean\", \"RunVar\", \"CurMean\", \"CurVar\",\n # \"mu\",\n # self.BatchSize, self.NumChannel, self.ImgH, self.ImgW,\n # int(self.IsAffine), int(self.IsCumulative), self.momentum, self.epsilon)\n # else:\n # self.ForwardFunc = self.ForwardFunc(self.InputShape[1])\n # self.PlainFunc = self.PlainFunc(self.InputShape[1])\n # self.PlainFunc.eval()\n # self.ForwardFunc.weight.data.copy_(self.PlainFunc.weight.data)\n # self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)\n # self.ForwardFunc.running_mean.data.copy_(self.PlainFunc.running_mean.data)\n # self.ForwardFunc.running_var.data.copy_(self.PlainFunc.running_var.data)\n # self.set_cpu(\"weight\", list(self.ForwardFunc.parameters())[0].data)\n # self.set_cpu(\"bias\", list(self.ForwardFunc.parameters())[1].data)\n # self.set_cpu(\"RunMean\", self.ForwardFunc.running_mean.data)\n # self.set_cpu(\"RunVar\", self.ForwardFunc.running_var.data)\n # self.ForwardFunc.eval()\n\n def init(self, start_enclave=True):\n # if self.LayerName == \"Layer3.10.proxies.0.bn2\":\n # st()\n TensorLoader.init(self, start_enclave)\n\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n self.PlainFunc = self.PlainFunc(self.InputShape[1])\n self.PlainFunc.eval()\n self.get_cpu(\"weight\").data.copy_(self.PlainFunc.weight.data)\n self.get_cpu(\"bias\").data.copy_(self.PlainFunc.bias.data)\n self.get_cpu(\"RunMean\").data.copy_(self.PlainFunc.running_mean.data)\n # inject sqrt(running_var) instead of running_var for precision\n self.get_cpu(\"RunVar\").data.copy_(self.PlainFunc.running_var.data)\n self.transfer_cpu_to_enclave(\"weight\")\n self.transfer_cpu_to_enclave(\"bias\")\n self.transfer_cpu_to_enclave(\"RunMean\")\n self.transfer_cpu_to_enclave(\"RunVar\")\n self.batchnorm_init(\n self.LayerName,\n \"input\", \"output\", \"weight\", \"bias\",\n # \"DerInput\", \"DerOutput\", \"DerWeight\", \"DerBias\",\n \"RunMean\", \"RunVar\", \"CurMean\", \"CurVar\",\n \"mu\",\n self.BatchSize, self.NumChannel, self.ImgH, self.ImgW,\n int(self.IsAffine), int(self.IsCumulative), self.momentum, self.epsilon)\n elif self.EnclaveMode is ExecutionModeOptions.CPU:\n self.ForwardFunc = self.ForwardFunc(self.InputShape[1])\n self.PlainFunc = self.PlainFunc(self.InputShape[1])\n self.PlainFunc.eval()\n self.ForwardFunc.weight.data.copy_(self.PlainFunc.weight.data)\n self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)\n self.ForwardFunc.running_mean.data.copy_(self.PlainFunc.running_mean.data)\n self.ForwardFunc.running_var.data.copy_(self.PlainFunc.running_var.data)\n self.set_cpu(\"weight\", list(self.ForwardFunc.parameters())[0].data)\n self.set_cpu(\"bias\", list(self.ForwardFunc.parameters())[1].data)\n self.set_cpu(\"RunMean\", self.ForwardFunc.running_mean.data)\n self.set_cpu(\"RunVar\", self.ForwardFunc.running_var.data)\n self.ForwardFunc.eval()\n elif self.EnclaveMode is ExecutionModeOptions.GPU:\n self.ForwardFunc = self.ForwardFunc(self.InputShape[1])\n self.PlainFunc = self.PlainFunc(self.InputShape[1])\n self.ForwardFunc.weight.data.copy_(self.PlainFunc.weight.data)\n self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)\n self.ForwardFunc.running_mean.data.copy_(self.PlainFunc.running_mean.data)\n self.ForwardFunc.running_var.data.copy_(self.PlainFunc.running_var.data)\n self.set_gpu(\"weight\", list(self.ForwardFunc.parameters())[0].data)\n self.set_gpu(\"bias\", list(self.ForwardFunc.parameters())[1].data)\n self.set_gpu(\"RunMean\", self.ForwardFunc.running_mean.data)\n self.set_gpu(\"RunVar\", self.ForwardFunc.running_var.data)\n self.PlainFunc.eval()\n self.ForwardFunc.cuda().eval()\n\n # def inject_params(self, params):\n # if self.sid == -2:\n # raise ValueError(\"S2 has no learnable parameters for injection\")\n # self.get_cpu(\"weight\").copy_(params.weight.data)\n # self.get_cpu(\"bias\").copy_(params.bias.data)\n # self.get_cpu(\"RunMean\").copy_(params.running_mean.data)\n # # inject sqrt(running_var) instead of running_var for precision\n # self.get_cpu(\"RunVar\").copy_(params.running_var.data)\n # if self.is_enclave_mode:\n # self.transfer_cpu_to_enclave(\"weight\")\n # self.transfer_cpu_to_enclave(\"bias\")\n # self.transfer_cpu_to_enclave(\"RunMean\")\n # self.transfer_cpu_to_enclave(\"RunVar\")\n\n def inject_params(self, params):\n if self.sid == -2:\n raise ValueError(\"S2 has no learnable parameters for injection\")\n if self.EnclaveMode in [ExecutionModeOptions.CPU, ExecutionModeOptions.Enclave]: \n self.get_cpu(\"weight\").copy_(params.weight.data)\n self.get_cpu(\"bias\").copy_(params.bias.data)\n self.get_cpu(\"RunMean\").copy_(params.running_mean.data)\n self.get_cpu(\"RunVar\").copy_(params.running_var.data)\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n self.transfer_cpu_to_enclave(\"weight\")\n self.transfer_cpu_to_enclave(\"bias\")\n self.transfer_cpu_to_enclave(\"RunMean\")\n self.transfer_cpu_to_enclave(\"RunVar\")\n elif self.EnclaveMode is ExecutionModeOptions.GPU:\n self.get_gpu(\"weight\").copy_(params.weight.data)\n self.get_gpu(\"bias\").copy_(params.bias.data)\n self.get_gpu(\"RunMean\").copy_(params.running_mean.data)\n self.get_gpu(\"RunVar\").copy_(params.running_var.data)\n\n def reset_plain_bn(self):\n # module = torch.BatchNorm2d()\n self.get_cpu(\"weight\").copy_(torch.ones(self.InputShape[1]))\n self.get_cpu(\"bias\").copy_(torch.zeros(self.InputShape[1]))\n self.get_cpu(\"RunMean\").copy_(torch.zeros(self.InputShape[1]))\n self.get_cpu(\"RunVar\").copy_(torch.ones(self.InputShape[1]))\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n self.transfer_cpu_to_enclave(\"weight\")\n self.transfer_cpu_to_enclave(\"bias\")\n self.transfer_cpu_to_enclave(\"RunMean\")\n self.transfer_cpu_to_enclave(\"RunVar\")\n\n\n def inject_to_plain(self, plain_layer: torch.nn.Module) -> None:\n raise NotImplementedError\n if self.sid == -2:\n raise ValueError(\"S2 has no learnable parameters for injection\")\n self.make_sure_cpu_is_latest(\"weight\")\n self.make_sure_cpu_is_latest(\"bias\")\n plain_layer.weight.data.copy_(self.get_cpu(\"weight\"))\n plain_layer.bias.data.copy_(self.get_cpu(\"bias\"))\n plain_layer.running_mean.data.copy_(self.get_cpu(\"RunMean\"))\n plain_layer.running_var.data.copy_(self.get_cpu(\"RunVar\"))\n\n def generate_tensor_name_list(self, force=False):\n if not force and self.tensor_name_list:\n return\n if self.sid == 2:\n self.tensor_name_list = {}\n return\n\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n NeededTensorNames = [\n (\"input\", self.InputShape, None),\n # (\"DerInput\", self.InputShape, None),\n (\"output\", self.OutputShape, None),\n # (\"DerOutput\", self.OutputShape, None),\n (\"weight\", self.WeightShape, None),\n # (\"DerWeight\", self.WeightShape, None),\n (\"bias\", self.WeightShape, None),\n # (\"DerBias\", self.WeightShape, None),\n (\"RunMean\", self.WeightShape, None),\n (\"CurMean\", self.WeightShape, None),\n (\"RunVar\", self.WeightShape, None),\n (\"CurVar\", self.WeightShape, None),\n (\"mu\", self.InputShape, None),\n ]\n else:\n NeededTensorNames = [\n (\"output\", self.OutputShape, None),\n # (\"DerInput\", self.InputShape, None),\n (\"input\", self.InputShape, None),\n (\"weight\", self.WeightShape, None),\n # (\"DerWeight\", self.WeightShape, None),\n (\"bias\", self.WeightShape, None),\n # (\"DerBias\", self.WeightShape, None),\n # (\"DerOutput\", self.OutputShape, None)\n ]\n\n self.tensor_name_list = NeededTensorNames\n\n # def forward(self):\n # if self.sid == 2:\n # return\n # with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n # if self.is_enclave_mode:\n # self.forward_tensor_transfer()\n # self.batchnorm_forward(self.LayerName, int(False))\n # else:\n # self.forward_tensor_transfer()\n # self.requires_grad_on_cpu(\"input\")\n # self.ForwardFunc.bias.data.copy_(self.get_cpu(\"bias\"))\n # self.ForwardFunc.weight.data.copy_(self.get_cpu(\"weight\"))\n # self.ForwardFunc.running_mean.data.copy_(self.get_cpu(\"RunMean\"))\n # # running_var of PlainFunc is ^2 of that in the enclave\n # enclave_running_var = self.get_cpu(\"RunVar\")\n # self.ForwardFunc.running_var.data.copy_(enclave_running_var)\n # self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n\n def forward(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n if self.EnclaveMode == ExecutionModeOptions.Enclave:\n # if self.LayerName == \"Layer2.0.downsample.bn\":\n # st()\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} Input Preprocess\", verbose_level=VerboseLevel.LAYER):\n self.forward_tensor_transfer()\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} batchnorm_forward\", verbose_level=VerboseLevel.LAYER):\n self.batchnorm_forward(self.LayerName, int(False))\n elif self.EnclaveMode == ExecutionModeOptions.CPU:\n self.forward_tensor_transfer()\n self.ForwardFunc.bias.data.copy_(self.get_cpu(\"bias\"))\n self.ForwardFunc.weight.data.copy_(self.get_cpu(\"weight\"))\n self.ForwardFunc.running_mean.data.copy_(self.get_cpu(\"RunMean\"))\n # running_var of PlainFunc is ^2 of that in the enclave\n enclave_running_var = self.get_cpu(\"RunVar\")\n self.ForwardFunc.running_var.data.copy_(enclave_running_var)\n self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n elif self.EnclaveMode == ExecutionModeOptions.GPU:\n self.forward_tensor_transfer()\n self.ForwardFunc.bias.data.copy_(self.get_gpu(\"bias\"))\n self.ForwardFunc.weight.data.copy_(self.get_gpu(\"weight\"))\n self.ForwardFunc.running_mean.data.copy_(self.get_gpu(\"RunMean\"))\n # running_var of PlainFunc is ^2 of that in the enclave\n enclave_running_var = self.get_gpu(\"RunVar\")\n self.ForwardFunc.running_var.data.copy_(enclave_running_var)\n # st()\n # print(self.get_gpu(\"input\")[0,0,0])\n self.set_gpu(\"output\", self.ForwardFunc(self.get_gpu(\"input\").type(SecretConfig.dtypeForCpuOp)))\n\n def backward(self):\n raise NotImplementedError\n if self.sid == 2:\n return\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Backward\", verbose_level=VerboseLevel.LAYER):\n if self.is_enclave_mode:\n self.backward_tensor_transfer()\n self.batchnorm_backward(self.LayerName)\n else:\n self.backward_tensor_transfer()\n BackwardInput, BackwardWeight, BackwardBias = self.get_cpu(\"output\").grad_fn(self.get_cpu(\"DerOutput\"))\n self.set_cpu(\"DerInput\", BackwardInput.data)\n self.set_cpu(\"DerWeight\", BackwardWeight.data)\n self.set_cpu(\"DerBias\", BackwardBias.data)\n if list(self.get_cpu(\"DerWeight\").shape) != self.WeightShape:\n real_shape = self.get_cpu(\"DerWeight\").shape\n ideal_shape = self.WeightShape\n raise ValueError(\n f\"DerWeight is not of shape self.AffineShape: real: {real_shape}, ideal: {ideal_shape}\")\n if list(self.get_cpu(\"DerBias\").shape) != self.WeightShape:\n raise ValueError(\"DerBias is not of shape self.AffineShape\")\n\n def plain_forward(self, NeedBackward=False):\n if self.sid == 2:\n return\n if self.EnclaveMode in [ExecutionModeOptions.Enclave, ExecutionModeOptions.GPU]:\n self.make_sure_cpu_is_latest(\"input\")\n self.make_sure_cpu_is_latest(\"bias\")\n self.make_sure_cpu_is_latest(\"weight\")\n self.requires_grad_on_cpu(\"input\")\n self.PlainFunc.bias.data.copy_(self.get_cpu(\"bias\"))\n self.PlainFunc.weight.data.copy_(self.get_cpu(\"weight\"))\n self.PlainFunc.running_mean.data.copy_(self.get_cpu(\"RunMean\"))\n # self.PlainFunc.running_var.data.copy_(self.get_cpu(\"RunVar\"))\n # running_var of PlainFunc is ^2 of that in the enclave\n enclave_running_var = self.get_cpu(\"RunVar\")\n self.PlainFunc.running_var.data.copy_(enclave_running_var)\n else:\n self.make_sure_cpu_is_latest(\"input\")\n self.requires_grad_on_cpu(\"input\")\n\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainForward\"):\n torch.set_num_threads(1)\n self.PlainForwardResult = self.PlainFunc(self.get_cpu(\"input\"))\n torch.set_num_threads(4)\n\n def plain_backward(self):\n if self.sid == 2:\n return\n self.make_sure_cpu_is_latest(\"DerOutput\")\n GradFunction = self.PlainForwardResult.grad_fn\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainBackward\"):\n torch.set_num_threads(1)\n self.PlainBackwardResult = GradFunction(self.get_cpu(\"DerOutput\"))\n torch.set_num_threads(4)\n\n def show_plain_error(self):\n if self.sid == 2:\n return\n self.make_sure_cpu_is_latest(\"output\")\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"), get_relative=True)\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")\n\n if self.PlainBackwardResult is None:\n return\n if self.is_enclave_mode:\n self.make_sure_cpu_is_latest(\"DerInput\")\n self.make_sure_cpu_is_latest(\"DerWeight\")\n self.make_sure_cpu_is_latest(\"DerBias\")\n else:\n self.make_sure_cpu_is_latest(\"DerInput\")\n BackwardInput, BackwardWeight, BackwardBias = self.PlainBackwardResult\n err_input = compare_expected_actual(BackwardInput, self.get_cpu(\"DerInput\"), show_where_err=False, get_relative=True)\n err_weight = compare_expected_actual(BackwardWeight, self.get_cpu(\"DerWeight\"), show_where_err=False,\n get_relative=True)\n err_bias = compare_expected_actual(BackwardBias, self.get_cpu(\"DerBias\"))\n print(f\"S{self.sid}: {self.LayerName} Backward Error input: {err_input}, weight {err_weight}, bias: {err_bias}\")\n\n def show_plain_error_forward(self):\n if self.sid == 2:\n return\n self.make_sure_cpu_is_latest(\"output\")\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"), get_relative=False, show_values=False)\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")" }, { "identifier": "SecretFlattenLayer", "path": "python/layers/flatten.py", "snippet": "class SecretFlattenLayer(SecretNonlinearLayer):\n batch_size = None\n n_features = None\n input_shape = None\n output_shape = None\n\n def __init__(\n self, sid, LayerName, EnclaveMode, link_prev=True, link_next=True,\n manually_register_prev=False, manually_register_next=False\n ):\n super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)\n self.StoreInEnclave = False\n self.ForwardFuncName = \"Flatten\"\n self.BackwardFuncName = \"DerFlatten\"\n\n\n def init(self, start_enclave=True):\n super().init(start_enclave)\n self.ForwardFunc = lambda x: x.view(-1, self.n_features)\n self.PlainFunc = lambda x: x.view(-1, self.n_features)\n\n def init_shape(self):\n self.input_shape = self.PrevLayer.get_output_shape()\n if len(self.input_shape) != 4:\n return ValueError(\"The dimension of the tensor form prev. layer has to be 4D.\")\n\n self.batch_size = self.input_shape[0]\n self.n_features = self.input_shape[1] * self.input_shape[2] * self.input_shape[3]\n self.output_shape = [self.batch_size, self.n_features]\n\n def get_output_shape(self):\n return self.output_shape\n\n def generate_tensor_name_list(self, force=False):\n if not force and self.tensor_name_list:\n return\n if self.sid == 2:\n self.tensor_name_list = {}\n return\n\n NeededTensorNames = [(\"output\", self.output_shape, None),\n (\"input\", self.input_shape, None),\n (\"DerInput\", self.input_shape, None),\n (\"DerOutput\", self.output_shape, None)\n ]\n\n self.tensor_name_list = NeededTensorNames\n\n def forward(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n if self.EnclaveMode == ExecutionModeOptions.Enclave:\n self.transfer_enclave_to_cpu(\"input\")\n self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n self.transfer_cpu_to_enclave(\"output\")\n elif self.EnclaveMode == ExecutionModeOptions.CPU:\n self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n elif self.EnclaveMode == ExecutionModeOptions.GPU:\n self.set_gpu(\"output\", self.ForwardFunc(self.get_gpu(\"input\")))\n\n # self.forward_tensor_transfer()\n # self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n\n def backward(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Backward\", verbose_level=VerboseLevel.LAYER):\n self.backward_tensor_transfer()\n self.set_cpu(\"DerInput\", self.get_cpu(\"DerOutput\").view(self.input_shape))\n\n def plain_forward(self, NeedBackward=False):\n self.requires_grad_on_cpu(\"input\")\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainForward\"):\n self.PlainForwardResult = self.PlainFunc(self.get_cpu(\"input\"))\n\n def plain_backward(self):\n self.make_sure_cpu_is_latest(\"DerOutput\")\n GradFunction = self.PlainForwardResult.grad_fn\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainBackward\"):\n self.PlainBackwardResult = GradFunction(self.get_cpu(\"DerOutput\"))\n\n def show_plain_error(self):\n if self.StoreInEnclave:\n self.transfer_enclave_to_cpu(\"output\")\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"))\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")\n\n if self.PlainBackwardResult is None:\n return\n err = compare_expected_actual(self.PlainBackwardResult, self.get_cpu(\"DerInput\"), get_relative=True)\n print(f\"S{self.sid}: {self.LayerName} Backward Error {err}\")" }, { "identifier": "SecretInputLayer", "path": "python/layers/input.py", "snippet": "class SecretInputLayer(SecretNonlinearLayer):\n shape = None\n\n def __init__(\n self, sid, LayerName, input_shape, EnclaveMode, link_prev=True, link_next=True, \n manually_register_prev=False, manually_register_next=False\n ):\n super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)\n self.shape = input_shape\n\n def link_tensors(self):\n gt.link_tags(self.get_tag(\"input\", remap=False), self.get_tag(\"output\", remap=False))\n super().link_tensors()\n\n def init_shape(self):\n return\n\n def set_input(self, tensor):\n self.set_tensor_cpu_gpu_enclave(\"input\", tensor)\n\n def get_output_shape(self):\n return self.shape\n\n def forward(self):\n return\n\n def backward(self):\n return\n\n def plain_forward(self):\n return\n\n def plain_backward(self):\n return\n\n def show_plain_error(self):\n return\n\n def print_connection_info(self):\n print(f\"{self.LayerName:30} shape{self.shape} output {self.NextLayer.LayerName:30}\")" }, { "identifier": "SecretMaxpool2dLayer", "path": "python/layers/maxpool2d.py", "snippet": "class SecretMaxpool2dLayer(SecretActivationLayer):\n def __init__(\n self, sid, LayerName, EnclaveMode, filter_hw, stride, padding, link_prev=True, link_next=True,\n manually_register_prev=False, manually_register_next=False\n ):\n super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)\n self.ForwardFuncName = \"Maxpool2d\"\n self.BackwardFuncName = \"DerMaxpool2d\"\n self.filter_hw = filter_hw\n self.startmaxpool = False\n self.PlainFunc = torch.nn.MaxPool2d\n self.maxpoolpadding = padding\n self.stride = stride\n self.STORE_CHUNK_ELEM = 401408\n\n self.ForwardFunc = torch.nn.MaxPool2d\n\n if EnclaveMode == ExecutionModeOptions.Enclave :\n self.ForwardFunc = self.maxpoolfunc\n self.BackwardFunc = self.maxpoolbackfunc\n else:\n self.ForwardFunc = torch.nn.MaxPool2d\n\n def init_shape(self):\n self.InputShape = self.PrevLayer.get_output_shape()\n if len(self.InputShape) != 4:\n raise ValueError(\"Maxpooling2d apply only to 4D Tensor\")\n if self.InputShape[2] != self.InputShape[3]:\n raise ValueError(\"The input tensor has to be square images\")\n if self.InputShape[2] % self.stride != 0:\n raise ValueError(\"The input tensor needs padding for this filter size\")\n InputHw = self.InputShape[2]\n output_hw = InputHw // self.stride\n self.OutputShape = [self.InputShape[0], self.InputShape[1], output_hw, output_hw]\n self.HandleShape = self.InputShape\n # self.Shapefortranspose = [int(round(((self.InputShape[0] * self.InputShape[1] * self.InputShape[2] * self.InputShape[3])/262144)+1/2)), 262144, 1, 1]\n self.Shapefortranspose = [\n int(round(((self.InputShape[0] * self.InputShape[1] * self.InputShape[2] * self.InputShape[3])/self.STORE_CHUNK_ELEM)+1/2)), self.STORE_CHUNK_ELEM, 1, 1]\n\n\n def init(self, start_enclave=True):\n if self.EnclaveMode == ExecutionModeOptions.Enclave:\n self.PlainFunc = self.PlainFunc(self.filter_hw, self.stride, self.maxpoolpadding)\n TensorLoader.init(self, start_enclave)\n\n if self.startmaxpool is False:\n self.startmaxpool = True\n return self.maxpoolinit(self.LayerName, \"inputtrans\", \"outputtrans\")\n else:\n self.ForwardFunc = self.ForwardFunc(self.filter_hw, stride=self.stride, padding=self.maxpoolpadding)\n self.PlainFunc = self.PlainFunc(self.filter_hw, stride=self.stride, padding=self.maxpoolpadding)\n\n # TensorLoader.init(self, start_enclave)\n # self.ForwardFunc = self.ForwardFunc(self.filter_hw, stride=self.stride, padding=self.maxpoolpadding)\n # self.PlainFunc = self.PlainFunc(self.filter_hw, stride=self.stride, padding=self.maxpoolpadding)\n\n # TensorLoader.init(self, start_enclave)\n\n # def forward(self):\n # with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n # self.forward_tensor_transfer()\n # # self.requires_grad_on_cpu(\"input\")\n # if self.EnclaveMode == ExecutionModeOptions.Enclave:\n # self.set_gpu(\"output\", self.ForwardFunc(self.get_gpu(\"input\")))\n # st()\n\n # # if self.PrevLayer.EnclaveMode is not ExecutionModeOptions.Enclave:\n # # self.transfer_enclave_to_cpu(\"input\")\n # # if torch.sum(self.get_cpu(\"input\").abs()) == 0:\n # # raise RuntimeError(f\"{self.LayerName}: SGX input not load\")\n # # self.transfer_cpu_to_enclave(\"input\")\n # # self.transfer_enclave_to_cpu(\"input\")\n # # self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n # # self.transfer_cpu_to_enclave(\"output\")\n # elif self.EnclaveMode == ExecutionModeOptions.CPU:\n # if self.PrevLayer.EnclaveMode is not ExecutionModeOptions.CPU and torch.sum(self.get_cpu(\"input\").abs()) == 0:\n # raise RuntimeError(f\"{self.LayerName}: SGX input not load\")\n # self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n # elif self.EnclaveMode == ExecutionModeOptions.GPU:\n # if self.PrevLayer.EnclaveMode is not ExecutionModeOptions.GPU and torch.sum(self.get_gpu(\"input\").abs()) == 0:\n # raise RuntimeError(f\"{self.LayerName}: SGX input not load\")\n # self.set_gpu(\"output\", self.ForwardFunc(self.get_gpu(\"input\")))\n # else:\n # raise RuntimeError\n\n def maxpoolfunc(self, namein, nameout):\n # assume row_stride and col_stride are both None or both not None\n # assume row_pad and col_pad are both None or both not None\n # if self.LayerName == \"Layer3.0.proxies.2.maxpool\":\n # print(self.LayerName, \"Input: \", self.get_cpu(\"input\")[0,0,0,:10])\n output = self.maxpoolnew(self.LayerName, namein, nameout, self.InputShape, self.OutputShape[2], self.OutputShape[3],\n self.filter_hw, self.filter_hw, self.stride, self.stride, self.maxpoolpadding,\n self.maxpoolpadding)\n # if self.LayerName == \"Layer3.0.proxies.2.maxpool\":\n # self.transfer_enclave_to_cpu(\"output\")\n # print(self.LayerName, \"Output: \", self.get_cpu(\"output\")[0,0,0,:])\n # self.transfer_cpu_to_enclave(\"output\")\n return output\n\n def maxpoolbackfunc(self, nameout, namedout, namedin):\n return self.maxpoolback(self.LayerName, namedout, namedin, self.InputShape, self.OutputShape[2], self.OutputShape[3],\n self.filter_hw, self.filter_hw, self.row_stride, self.col_stride, self.maxpoolpadding,\n self.maxpoolpadding)" }, { "identifier": "SecretOutputLayer", "path": "python/layers/output.py", "snippet": "class SecretOutputLayer(SecretNonlinearLayer):\n TargetShape = None\n loss = 0\n\n def __init__(\n self, sid, LayerName, EnclaveMode, inference=False, link_prev=True, link_next=True, \n manually_register_prev=False, manually_register_next=False\n ):\n super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)\n self.ForwardFunc = torch.nn.CrossEntropyLoss()\n self.PlainFunc = torch.nn.CrossEntropyLoss()\n self.EnclaveMode = ExecutionModeOptions.CPU\n self.inference = inference\n\n\n def init_shape(self):\n self.InputShape = self.PrevLayer.get_output_shape()\n self.OutputShape = [1]\n self.TargetShape = [self.InputShape[0]] # number of Minibatch\n\n def init(self, start_enclave=True):\n TensorLoader.init(self, start_enclave)\n\n def generate_tensor_name_list(self, force=False):\n if not force and self.tensor_name_list:\n return\n if self.sid == 2:\n self.tensor_name_list = {}\n return\n\n NeededTensorNames = [\n (\"output\", self.OutputShape, None),\n (\"DerInput\", self.InputShape, None),\n (\"input\", self.InputShape, None),\n (\"target\", self.TargetShape, None),\n ]\n\n self.tensor_name_list = NeededTensorNames\n\n def load_target(self, tensor):\n self.set_tensor_with_name(\"target\", tensor)\n\n def get_loss(self):\n return self.loss\n \n def get_prediction(self):\n self.forward_tensor_transfer(\"input\")\n if torch.sum(self.get_cpu(\"input\").abs()) == 0:\n raise RuntimeError(\"SGX input not load\")\n return self.get_cpu(\"input\")\n\n def forward(self):\n if not self.inference:\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n self.forward_tensor_transfer()\n self.set_cpu(\"input\", self.get_cpu(\"input\").detach())\n self.requires_grad_on_cpu(\"input\")\n self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\"), self.get_cpu(\"target\")))\n loss = self.get_cpu(\"output\").item()\n self.loss = loss\n\n def backward(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Backward\", verbose_level=VerboseLevel.LAYER):\n self.backward_tensor_transfer(transfer_tensor=\"output\")\n self.get_cpu(\"output\").backward()\n self.set_cpu(\"DerInput\", self.get_cpu(\"input\").grad)\n\n def plain_forward(self):\n if not self.inference:\n self.make_sure_cpu_is_latest(\"input\")\n self.set_cpu(\"input\", self.get_cpu(\"input\").detach())\n self.requires_grad_on_cpu(\"input\")\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainForward\"):\n self.PlainForwardResult = self.PlainFunc(self.get_cpu(\"input\"), self.get_cpu(\"target\"))\n\n def plain_backward(self):\n self.make_sure_cpu_is_latest(\"output\")\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainBackward\"):\n self.PlainForwardResult.backward()\n self.set_cpu(\"DerInput\", self.get_cpu(\"input\").grad)\n\n def show_plain_error(self):\n self.make_sure_cpu_is_latest(\"output\")\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"))\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")\n\n if self.PlainBackwardResult is None:\n return\n self.make_sure_cpu_is_latest(\"DerInput\")\n\n err = compare_expected_actual(self.PlainBackwardResult, self.get_cpu(\"DerInput\"))\n print(f\"S{self.sid}: {self.LayerName} Backward Error {err}\")\n\n def print_connection_info(self):\n print(f\"{self.LayerName:30} shape{self.InputShape}{' ':30} input {self.PrevLayer.LayerName:30}\")" }, { "identifier": "SecretReLULayer", "path": "python/layers/relu.py", "snippet": "class SecretReLULayer(SecretActivationLayer):\n def __init__(\n self, sid, LayerName, EnclaveMode, link_prev=True, link_next=True,\n manually_register_prev=False, manually_register_next=False, merge_own_tensors=False\n ):\n super().__init__(\n sid, LayerName, EnclaveMode, link_prev, link_next,\n manually_register_prev, manually_register_next, merge_own_tensors\n )\n self.ForwardFuncName = \"ReLU\"\n self.BackwardFuncName = \"DerReLU\"\n self.PlainFunc = torch.nn.ReLU\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n self.ForwardFunc = self.relufunc\n self.BackwardFunc = self.relubackfunc\n elif self.EnclaveMode is ExecutionModeOptions.CPU:\n self.ForwardFunc = torch.nn.ReLU\n elif self.EnclaveMode is ExecutionModeOptions.GPU:\n self.ForwardFunc = torch.nn.ReLU\n\n # if self.is_enclave_mode:\n # self.ForwardFunc = self.relufunc\n # self.BackwardFunc = self.relubackfunc\n # self.StoreInEnclave = True\n # else:\n # self.ForwardFunc = torch.nn.ReLU\n # self.StoreInEnclave = False\n\n def init(self, start_enclave=True):\n super().init(start_enclave)\n self.PlainFunc = self.PlainFunc()\n # if not self.is_enclave_mode:\n if self.EnclaveMode is not ExecutionModeOptions.Enclave:\n self.ForwardFunc = self.ForwardFunc()\n\n def relufunc(self, namein, nameout):\n return self.relunew(namein, nameout, self.InputShape)\n\n def relubackfunc(self, nameout, namedout, namedin):\n return self.relubackward(nameout, namedout, namedin, self.InputShape)\n\n def show_plain_error_forward(self):\n if self.sid == 2:\n return\n self.make_sure_cpu_is_latest(\"output\")\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"), get_relative=False, show_values=False)\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")" }, { "identifier": "init_communicate", "path": "python/sgx_net.py", "snippet": "def init_communicate(rank, master_address, master_port, backend='gloo'):\n os.environ['MASTER_ADDR'] = master_address\n os.environ['MASTER_PORT'] = master_port\n dist.init_process_group(backend, rank=rank, world_size=SecretConfig.worldSize)" }, { "identifier": "warming_up_cuda", "path": "python/sgx_net.py", "snippet": "def warming_up_cuda():\n device = torch.device(\"cuda:0\")\n # device = torch.device(\"cpu\")\n\n print(\"Execution device: \", device)\n print(\"PyTorch version: \", torch.__version__)\n print(\"CUDA version: \", torch.version.cuda)\n print(\"CUDA device:\", torch.cuda.get_device_name(0))\n\n batch_size, n_input_channel, n_output_channel, img_hw, filter_hw = 512, 512, 256, 4, 3\n x_shape = [batch_size, n_input_channel, img_hw, img_hw]\n w_shape = [n_output_channel, n_input_channel, filter_hw, filter_hw]\n with NamedTimerInstance(\"Warming up Cuda double\"):\n dummy_a = get_random_uniform(SecretConfig.PrimeLimit, x_shape).type(SecretConfig.dtypeForSave)\n dummy_b = get_random_uniform(SecretConfig.PrimeLimit, w_shape).type(SecretConfig.dtypeForSave)\n F.conv2d(dummy_a.cuda().type(SecretConfig.dtypeForCudaMm), dummy_b.cuda().type(SecretConfig.dtypeForCudaMm),\n padding=1)\n\n with NamedTimerInstance(\"Warming up Cuda dobule 2nd\"):\n F.conv2d(dummy_a.cuda().type(torch.double), dummy_b.cuda().type(torch.double),\n padding=1)\n\n with NamedTimerInstance(\"Warming up Cuda float\"):\n F.conv2d(dummy_a.cuda().type(torch.float), dummy_b.cuda().type(torch.float), padding=1)\n\n with NamedTimerInstance(\"Warming up Cuda float 2nd\"):\n F.conv2d(dummy_a.cuda().type(torch.float), dummy_b.cuda().type(torch.float), padding=1)\n\n batch_size, n_input_channel, n_output_channel, img_hw, filter_hw = 64, 64, 64, 8, 3\n x_shape = [batch_size, n_input_channel, img_hw, img_hw]\n w_shape = [n_output_channel, n_input_channel, filter_hw, filter_hw]\n with NamedTimerInstance(\"Warming up Cpu\"):\n dummy_a = get_random_uniform(SecretConfig.PrimeLimit, x_shape).type(SecretConfig.dtypeForSave)\n dummy_b = get_random_uniform(SecretConfig.PrimeLimit, w_shape).type(SecretConfig.dtypeForSave)\n F.conv2d(dummy_a.type(SecretConfig.dtypeForCpuOp), dummy_b.type(SecretConfig.dtypeForCpuOp),\n padding=1)\n\n with NamedTimerInstance(\"Warming up CppExtension\"):\n GlobalCppExtension.get_conv2d_cudnn()" }, { "identifier": "SecretNeuralNetwork", "path": "python/sgx_net.py", "snippet": "class SecretNeuralNetwork(TensorLoader):\n nn_name = None\n layers = None\n\n def __init__(self, sid, nn_name):\n super().__init__()\n self.sid = sid\n self.init(start_enclave=False)\n self.nn_name = nn_name\n\n def set_layers(self, layers):\n self.layers = layers\n\n if not isinstance(self.layers[0], SecretInputLayer):\n raise ValueError(\"The first layer has to be input layer\")\n if not isinstance(self.layers[-1], SecretOutputLayer):\n raise ValueError(\"The last layer has to be output layer\")\n \n for i in range(len(self.layers) - 1):\n PrevLayer = self.layers[i]\n NextLayer = self.layers[i + 1]\n if not PrevLayer.manually_register_next:\n PrevLayer.register_next_layer(NextLayer)\n if not NextLayer.manually_register_prev:\n NextLayer.register_prev_layer(PrevLayer)\n\n \n for layer in self.layers:\n # print(f\"Init_shape/link layer {layer.LayerName}\")\n layer.set_eid(self.get_eid())\n layer.init_shape()\n # if layer.LayerName in [\"Layer1.0.weighted_add\", \"Layer1.0.proxies.0.bn\"]:\n # st()\n layer.link_tensors()\n # print(layer.LayerName)\n # layer.print_tensor_link_relation()\n # if layer.LayerName in [\"Layer1.0.weighted_add\", \"Layer1.0.proxies.0.bn\"]:\n # st()\n \n for idx, layer in enumerate(self.layers):\n # print(f\"Init layer {layer.LayerName}\")\n # if layer.LayerName == \"Layer1.0.main.relu2\":\n # st()\n layer.init(start_enclave=False)\n # if idx > 3:\n # print(layer.LayerName, self.layers[4].get_cpu(\"input\").shape, self.layers[4].PrevLayer.LayerName)\n\n def execute_for_each_layer(self, func, reverse=False):\n layers = self.layers[::-1] if reverse else self.layers\n for layer in layers:\n # print(f\"SID: {self.sid} {layer.LayerName}, {func}\")\n if self.sid == 2 and layer.IsDummyForS2:\n continue\n # print(\"Processing \", layer.LayerName)\n func(layer)\n \n # st()\n\n def classifier_output(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.nn_name} classifier_output\"):\n self.forward()\n if self.sid == 2:\n return\n # layers: input_layer, ..., fc_layer, output_layer\n last_fc = self.layers[-2]\n last_fc.transfer_enclave_to_cpu(\"output\")\n outputs = last_fc.get_cpu(\"output\")\n _, predicted = torch.max(outputs.data, 1)\n return predicted\n\n def get_loss(self):\n return self.layers[-1].get_loss()\n\n def forward_with_time(self):\n def run_forward(layer):\n layer.forward()\n t0 = time()\n with NetworkNamedTimerInstance(f\"S{self.sid}: {self.nn_name} Forward\"):\n self.execute_for_each_layer(run_forward)\n t1 = time()\n # time in ms\n elapse_time = (t1 - t0) * (10 ** 3) \n return elapse_time\n\n def forward(self):\n def run_forward(layer):\n layer.forward()\n with NetworkNamedTimerInstance(f\"S{self.sid}: {self.nn_name} Forward\"):\n self.execute_for_each_layer(run_forward)\n\n def backward(self):\n def run_backward(layer):\n layer.backward()\n with NamedTimerInstance(f\"S{self.sid}: {self.nn_name} Backward\"):\n self.execute_for_each_layer(run_backward, reverse=True)\n\n def plain_forward(self):\n with NetworkNamedTimerInstance(f\"S{self.sid}: {self.nn_name} PlainForward\"):\n self.execute_for_each_layer(lambda x: x.plain_forward())\n\n def plain_backward(self):\n with NetworkNamedTimerInstance(f\"S{self.sid}: {self.nn_name} PlainBackward\"):\n self.execute_for_each_layer(lambda x: x.plain_backward(), reverse=True)\n\n def show_plain_error(self):\n self.execute_for_each_layer(lambda x: x.show_plain_error())" }, { "identifier": "SgdOptimizer", "path": "python/sgx_net.py", "snippet": "class SgdOptimizer(TensorLoader):\n def __init__(self, sid):\n super().__init__()\n self.sid = sid\n self.learning_rate = 0.05\n self.momentum = 0.9\n self.weight_decay = 5e-4\n self.momentum_init_flags = defaultdict(lambda: False)\n self.ideal_momentum_buf = {}\n\n self.lr_gamma = 0.5\n self.lr_step = 30\n self.step_counter = 0\n\n self.layers = None\n\n def set_layers(self, layers):\n self.layers = layers\n\n def generate_tensor_name_list(self, force=False):\n # Run if forced or self.tensor_name_list is not generated\n if not force and self.tensor_name_list:\n return\n if self.sid == 2:\n return\n\n self.tensor_name_list = []\n for layer in self.layers:\n for (DerName, ParamName, shape) in layer.LearnableParamsList:\n self.tensor_name_list.append((ParamName + \"Momentum\", shape, None))\n\n def update_params(self, test_with_ideal=False):\n if self.sid == 2:\n return\n for layer in self.layers:\n self.update_params_in_layer(layer, test_with_ideal=test_with_ideal)\n\n def update_params_in_layer(self, layer, test_with_ideal=False):\n # ref: https://github.com/pytorch/pytorch/blob/master/torch/optim/sgd.py\n if layer.LearnableParamsList is None:\n return\n\n task_ids = []\n for (der_name, param_name, shape) in layer.LearnableParamsList:\n momentum_name = param_name + \"Momentum\"\n global_momentum_name = layer.name_modifier(momentum_name)\n\n if layer.StoreInEnclave:\n if test_with_ideal:\n ideal_p, ideal_momentum = self.ideal_update_params_with_name(layer, der_name, param_name, shape)\n first_momentum = not self.momentum_init_flags[global_momentum_name]\n if first_momentum:\n # print(\"FIRST MOMENTUM\")\n self.momentum_init_flags[global_momentum_name] = True\n layer.init_enclave_tensor(momentum_name, shape)\n task_id = layer.sgd_update(param_name=param_name, grad_name=der_name, momentum_name=momentum_name,\n lr=self.learning_rate, momentum=self.momentum,\n weight_decay=self.weight_decay,\n first_momentum=first_momentum, is_async=True)\n if test_with_ideal:\n while not self.get_task_status(task_id):\n pass\n layer.generate_cpu_tensor(momentum_name, shape)\n layer.transfer_enclave_to_cpu(momentum_name)\n layer.transfer_enclave_to_cpu(param_name)\n param_err = compare_expected_actual(ideal_p, layer.get_cpu(param_name), get_relative=True)\n print(f\"S{self.sid}: {layer.LayerName} Param Error: {param_err}\")\n momentum_err = compare_expected_actual(ideal_momentum, layer.get_cpu(momentum_name), get_relative=True)\n print(f\"S{self.sid}: {layer.LayerName} Momentum Error: {momentum_err}\")\n else:\n task_ids.append(task_id)\n else:\n DerCpu = layer.get_cpu(der_name)\n ParamsCpu = layer.get_cpu(param_name)\n\n if test_with_ideal:\n ideal_p, ideal_momentum = self.ideal_update_params_with_name(layer, der_name, param_name, shape)\n\n DerCpu.add_(self.weight_decay, ParamsCpu)\n\n if not self.momentum_init_flags[global_momentum_name]:\n self.momentum_init_flags[global_momentum_name] = True\n layer.generate_cpu_tensor(momentum_name, shape)\n layer.get_cpu(momentum_name).copy_(DerCpu)\n MomentumCpu = layer.get_cpu(momentum_name)\n else:\n MomentumCpu = layer.get_cpu(momentum_name)\n MomentumCpu.mul_(self.momentum).add_(1, DerCpu)\n\n ParamsCpu.add_(-self.learning_rate, MomentumCpu)\n\n if test_with_ideal:\n param_err = compare_expected_actual(ideal_p, layer.get_cpu(param_name), get_relative=True)\n print(f\"S{self.sid}: {layer.LayerName} Param Error: {param_err}\")\n momentum_err = compare_expected_actual(ideal_momentum, layer.get_cpu(momentum_name), get_relative=True)\n print(f\"S{self.sid}: {layer.LayerName} Momentum Error: {momentum_err}\")\n\n # Wait for all tasks to be finished\n for task_id in task_ids:\n while not self.get_task_status(task_id):\n pass\n\n def ideal_update_params_with_name(self, layer, der_name, param_name, shape):\n weight_decay = self.weight_decay\n momentum = self.momentum\n dampening = 0\n nesterov = False\n lr = self.learning_rate\n\n global_momentum_name = layer.name_modifier(param_name + 'Momentum')\n\n if layer.StoreInEnclave:\n layer.transfer_enclave_to_cpu(der_name)\n layer.transfer_enclave_to_cpu(param_name)\n d_p = torch.clone(layer.get_cpu(der_name)).detach()\n p = torch.clone(layer.get_cpu(param_name)).detach()\n\n if weight_decay != 0:\n d_p.add_(weight_decay, p)\n if global_momentum_name not in self.ideal_momentum_buf:\n buf = self.ideal_momentum_buf[global_momentum_name] = torch.clone(d_p).detach()\n else:\n buf = self.ideal_momentum_buf[global_momentum_name]\n buf.mul_(momentum).add_(1 - dampening, d_p)\n if nesterov:\n d_p = d_p.add(momentum, buf)\n else:\n d_p = buf\n p.add_(-lr, d_p)\n\n return p, buf" }, { "identifier": "SGXLinearBase", "path": "python/layers/sgx_linear_base.py", "snippet": "class SGXLinearBase(SecretLayerBase):\n batch_size = None\n InputShape = None\n WeightShape = None\n OutputShape = None\n\n def __init__(\n self, sid, LayerName, EnclaveMode, batch_size, n_output_features, \n n_input_features=None, is_enclave_mode=False, link_prev=True, link_next=True,\n manually_register_prev=False, manually_register_next=False\n ):\n super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)\n\n self.ForwardFuncName = \"SGXLinear\"\n self.BackwardFuncName = \"DerSGXLinear\"\n self.PlainFunc = torch.nn.Linear\n self.is_enclave_mode = is_enclave_mode\n self.n_output_features = n_output_features\n self.n_input_features = n_input_features\n self.batch_size = batch_size\n\n if EnclaveMode is ExecutionModeOptions.CPU or EnclaveMode is ExecutionModeOptions.GPU:\n self.ForwardFunc = torch.nn.Linear\n # if self.is_enclave_mode:\n # self.StoreInEnclave = True\n # else:\n # self.ForwardFunc = torch.nn.Linear\n # self.StoreInEnclave = False\n\n def init_shape(self):\n self.WeightShape = self.DerWeightShape = [self.n_output_features, self.n_input_features]\n self.BiasShape = self.DerBiasShape = [self.n_output_features]\n if self.n_input_features is None:\n self.InputShape = self.PrevLayer.get_output_shape()\n else:\n self.InputShape = self.DerInputShape = [self.batch_size, self.n_input_features]\n self.OutputShape = self.DerOutputShape = [self.batch_size, self.n_output_features]\n self.LearnableParamsList = [\n LearnableParamTuple(dw_name=\"DerWeight\", w_name=\"weight\", shape=self.WeightShape),\n LearnableParamTuple(dw_name=\"DerBias\", w_name=\"bias\", shape=self.WeightShape),\n ]\n\n def init(self, start_enclave=True):\n TensorLoader.init(self, start_enclave)\n \n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n self.PlainFunc = self.PlainFunc(self.n_input_features, self.n_output_features)\n self.get_cpu(\"weight\").data.copy_(self.PlainFunc.weight.data)\n self.get_cpu(\"bias\").data.copy_(self.PlainFunc.bias.data)\n self.transfer_cpu_to_enclave(\"weight\")\n self.transfer_cpu_to_enclave(\"bias\")\n self.sgx_linear_init(\n self.LayerName,\n \"input\", \"output\", \"weight\", \"bias\",\n # \"DerInput\", \"DerOutput\", \"DerWeight\", \"DerBias\",\n self.batch_size, self.n_input_features, self.n_output_features)\n else:\n self.ForwardFunc = self.ForwardFunc(self.n_input_features, self.n_output_features)\n self.PlainFunc = self.PlainFunc(self.n_input_features, self.n_output_features)\n self.ForwardFunc.weight.data.copy_(self.PlainFunc.weight.data)\n self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)\n if self.EnclaveMode is ExecutionModeOptions.CPU:\n self.set_cpu(\"weight\", list(self.ForwardFunc.parameters())[0].data)\n self.set_cpu(\"bias\", list(self.ForwardFunc.parameters())[1].data)\n elif self.EnclaveMode is ExecutionModeOptions.GPU:\n self.set_gpu(\"weight\", list(self.ForwardFunc.parameters())[0].data)\n self.set_gpu(\"bias\", list(self.ForwardFunc.parameters())[1].data)\n self.ForwardFunc.cuda()\n # print(\"======== SGX linear init finish\")\n\n def link_tensors(self):\n super().link_tensors()\n\n def init_params(self):\n cpu_w = torch.zeros(self.w_shape)\n torch.nn.init.xavier_normal_(cpu_w, 1)\n self.set_tensor_cpu_enclave(\"weight\", cpu_w)\n cpu_b = torch.zeros(self.b_shape)\n torch.nn.init.constant_(cpu_b, 0)\n self.set_tensor_cpu_enclave(\"bias\", cpu_b)\n\n def get_output_shape(self):\n return self.OutputShape\n\n def inject_params(self, params):\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n cpu_w = self.get_cpu(\"weight\")\n cpu_w.copy_(params.weight.data)\n self.transfer_cpu_to_enclave(\"weight\")\n cpu_b = self.get_cpu(\"bias\")\n cpu_b.copy_(params.bias.data)\n self.transfer_cpu_to_enclave(\"bias\")\n elif self.EnclaveMode is ExecutionModeOptions.CPU:\n cpu_w = self.get_cpu(\"weight\")\n cpu_w.copy_(params.weight.data)\n cpu_b = self.get_cpu(\"bias\")\n cpu_b.copy_(params.bias.data)\n elif self.EnclaveMode is ExecutionModeOptions.GPU:\n cpu_w = self.get_gpu(\"weight\")\n cpu_w.copy_(params.weight.data)\n cpu_b = self.get_gpu(\"bias\")\n cpu_b.copy_(params.bias.data)\n\n def inject_to_plain(self, plain_layer: torch.nn.Module) -> None:\n self.make_sure_cpu_is_latest(\"weight\")\n plain_layer.weight.data.copy_(self.get_cpu(\"weight\"))\n self.make_sure_cpu_is_latest(\"bias\")\n plain_layer.bias.data.copy_(self.get_cpu(\"bias\"))\n\n def generate_tensor_name_list(self, force=False):\n if not force and self.tensor_name_list:\n return\n NeededTensorNames = [(\"output\", self.OutputShape, None),\n # (\"DerInput\", self.InputShape, None),\n (\"input\", self.InputShape, None),\n # (\"DerOutput\", self.OutputShape, None),\n (\"weight\", self.WeightShape, None),\n (\"bias\", self.BiasShape, None),\n ]\n\n self.tensor_name_list = NeededTensorNames\n\n def forward(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n self.forward_tensor_transfer()\n self.sgx_linear_forward(self.LayerName)\n elif self.EnclaveMode == ExecutionModeOptions.CPU:\n self.forward_tensor_transfer()\n self.requires_grad_on_cpu(\"input\")\n self.ForwardFunc.weight.data.copy_(self.get_cpu(\"weight\"))\n self.ForwardFunc.bias.data.copy_(self.get_cpu(\"bias\"))\n self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n elif self.EnclaveMode == ExecutionModeOptions.GPU:\n self.forward_tensor_transfer()\n self.ForwardFunc.weight.data.copy_(self.get_gpu(\"weight\"))\n self.ForwardFunc.bias.data.copy_(self.get_gpu(\"bias\"))\n self.set_gpu(\"output\", self.ForwardFunc(self.get_gpu(\"input\").type(SecretConfig.dtypeForCpuOp)))\n\n def plain_forward(self, NeedBackward=False):\n if self.is_enclave_mode:\n self.make_sure_cpu_is_latest(\"input\")\n self.make_sure_cpu_is_latest(\"weight\")\n self.make_sure_cpu_is_latest(\"bias\")\n # self.requires_grad_on_cpu(\"input\")\n self.PlainFunc.weight.data.copy_(self.get_cpu(\"weight\"))\n self.PlainFunc.bias.data.copy_(self.get_cpu(\"bias\"))\n else:\n self.make_sure_cpu_is_latest(\"input\")\n self.requires_grad_on_cpu(\"input\")\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainForward\"):\n # torch.set_num_threads(1)\n self.PlainForwardResult = self.PlainFunc(self.get_cpu(\"input\"))\n # torch.set_num_threads(4)\n\n def show_plain_error_forward(self):\n self.make_sure_cpu_is_latest(\"output\")\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"), get_relative=True)\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")" }, { "identifier": "SGXConvBase", "path": "python/layers/sgx_conv_base.py", "snippet": "class SGXConvBase(SecretLayerBase):\n batch_size = None\n pytorch_x_shape, sgx_x_shape = None, None\n pytorch_w_shape, sgx_w_shape = None, None\n bias_shape = None\n pytorch_y_shape, sgx_y_shape = None, None\n\n def __init__(\n self, sid, LayerName, EnclaveMode,\n n_output_channel, filter_hw, stride, padding, batch_size=None, n_input_channel=None,\n img_hw=None, bias=True,\n is_enclave_mode=False, link_prev=True, link_next=True, manually_register_prev=False, manually_register_next=False\n ):\n super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)\n\n self.ForwardFuncName = \"SGXConv\"\n self.BackwardFuncName = \"DerSGXConv\"\n self.PlainFunc = torch.nn.Conv2d\n self.is_enclave_mode = is_enclave_mode\n self.batch_size = batch_size\n self.n_input_channel = n_input_channel\n self.n_output_channel = n_output_channel\n self.img_hw = img_hw\n self.filter_hw = filter_hw\n self.padding = padding\n self.stride = stride\n self.bias = bias\n\n if EnclaveMode is ExecutionModeOptions.CPU or EnclaveMode is ExecutionModeOptions.GPU:\n self.ForwardFunc = torch.nn.Conv2d\n\n # --------------\n # Add BIAS!!!!!\n # --------------\n\n def init_shape(self):\n if self.batch_size is None and self.PrevLayer is not None:\n self.pytorch_x_shape = self.PrevLayer.get_output_shape()\n self.batch_size, self.n_input_channel, self.img_hw, _ = self.pytorch_x_shape\n else:\n self.pytorch_x_shape = [self.batch_size, self.n_input_channel, self.img_hw, self.img_hw]\n # print(self.LayerName)\n # st()\n # BHWC\n self.sgx_x_shape = [self.pytorch_x_shape[0], self.pytorch_x_shape[2], self.pytorch_x_shape[3], self.pytorch_x_shape[1]]\n # pytorch weight is out * in * h * w\n self.pytorch_w_shape = [self.n_output_channel, self.n_input_channel, self.filter_hw, self.filter_hw]\n # w shape is in * w * h * out, the transpose of out * h * w * in\n self.sgx_w_shape = [self.n_output_channel, self.filter_hw, self.filter_hw, self.n_input_channel]\n # BCHW\n self.pytorch_y_shape = calc_conv2d_output_shape_stride(self.pytorch_x_shape, self.pytorch_w_shape, self.padding, self.stride)\n # BHWC\n self.sgx_y_shape = [self.pytorch_y_shape[0], self.pytorch_y_shape[2], self.pytorch_y_shape[3], self.pytorch_y_shape[1]]\n self.bias_shape = [self.n_output_channel]\n\n # print(\n # f\"Init_shape pytorch_input {self.pytorch_x_shape}, sgx_input {self.sgx_x_shape}, \"\n # f\"pytorch_output {self.pytorch_y_shape}, sgx_output {self.sgx_y_shape}, \"\n # f\"pytorch_weight {self.pytorch_w_shape}, sgx_weight {self.sgx_w_shape}, \"\n # f\"bias {self.bias_shape}\"\n # )\n\n self.LearnableParamsList = [\n LearnableParamTuple(dw_name=\"DerWeight\", w_name=\"weight\", shape=self.sgx_w_shape),\n LearnableParamTuple(dw_name=\"DerBias\", w_name=\"bias\", shape=self.bias_shape),\n ]\n\n def init(self, start_enclave=True):\n # print(f\"Weight shape {self.sgx_w_shape}\")\n TensorLoader.init(self, start_enclave)\n \n \n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n self.PlainFunc = self.PlainFunc(\n self.n_input_channel, self.n_output_channel, self.filter_hw,\n self.stride, self.padding, bias=self.bias)\n weight_pytorch_form = self.PlainFunc.weight.data\n weight_tf_form = self.weight_pytorch2tf(weight_pytorch_form)\n self.get_cpu(\"weight\").data.copy_(weight_tf_form)\n self.transfer_cpu_to_enclave(\"weight\")\n # Bias\n if self.bias:\n bias_data = self.PlainFunc.bias.data\n else:\n bias_data = torch.zeros(self.bias_shape)\n self.get_cpu(\"bias\").data.copy_(bias_data)\n self.transfer_cpu_to_enclave(\"bias\")\n self.sgx_conv_init(\n self.LayerName,\n \"sgx_input\", \"sgx_output\", \"weight\", \"bias\",\n # \"sgx_DerInput\", \"sgx_DerOutput\", \"DerWeight\", \"DerBias\",\n # \"input\", \"output\", \"weight\", \n # \"DerInput\", \"DerOutput\", \"DerWeight\", \n self.batch_size, self.img_hw, self.img_hw, self.n_input_channel, \n self.pytorch_y_shape[2], self.pytorch_y_shape[3], self.n_output_channel, \n self.filter_hw, self.padding, self.stride)\n elif self.EnclaveMode in[ ExecutionModeOptions.CPU, ExecutionModeOptions.GPU]:\n self.ForwardFunc = self.ForwardFunc(\n self.n_input_channel, self.n_output_channel, self.filter_hw,\n self.stride, self.padding, bias=self.bias)\n self.PlainFunc = self.PlainFunc(\n self.n_input_channel, self.n_output_channel, self.filter_hw,\n self.stride, self.padding, bias=self.bias)\n self.ForwardFunc.weight.data.copy_(self.PlainFunc.weight.data)\n weight_pytorch_form = list(self.ForwardFunc.parameters())[0].data\n weight_tf_form = self.weight_pytorch2tf(weight_pytorch_form)\n if self.EnclaveMode is ExecutionModeOptions.CPU:\n self.set_cpu(\"weight\", weight_tf_form)\n if self.bias:\n self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)\n bias_data = self.PlainFunc.bias.data\n self.set_cpu(\"bias\", bias_data)\n elif self.EnclaveMode is ExecutionModeOptions.GPU:\n self.set_gpu(\"weight\", weight_tf_form)\n if self.bias:\n self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)\n bias_data = self.PlainFunc.bias.data\n self.set_gpu(\"bias\", bias_data)\n self.ForwardFunc.cuda()\n\n\n def link_tensors(self):\n super().link_tensors()\n\n def init_params(self):\n cpu_w = torch.zeros(self.sgx_w_shape)\n torch.nn.init.xavier_normal_(cpu_w, 1)\n self.set_tensor_cpu_gpu_enclave(\"weight\", cpu_w)\n\n def get_output_shape(self):\n return self.pytorch_y_shape\n \n def weight_pytorch2tf(self, weight_pytorch_form):\n # weight_pytorch_form is out * in * h * w\n # out * (h * w) * in, \n # h and w dont transpose\n # weight_tf_form = weight_pytorch_form.permute(1,3,2,0).contiguous()\n weight_tf_form = weight_pytorch_form.permute(0,2,3,1).contiguous()\n return weight_tf_form\n\n def weight_tf2pytorch(self, weight_tf_form):\n # weight_tf_form is out * (h * w) * in, the transpose of out * (h * w) * in\n # out * in * h * w\n # h and w dont transpose\n # weight_pytorch_form = weight_tf_form.permute(3, 0, 2, 1).contiguous()\n weight_pytorch_form = weight_tf_form.permute(0,3,1,2).contiguous()\n return weight_pytorch_form\n\n def feature_pytorch2tf(self, tensor_pytorch_form):\n # tensor_pytorch_form is b * in * h * w\n # b * h * w * in\n tensor_tf_form = tensor_pytorch_form.permute(0, 2, 3, 1).contiguous()\n return tensor_tf_form\n \n def feature_tf2pytorch(self, tensor_tf_form):\n # tensor_tf_form is b * h * w * in\n # b * in * h * w\n tensor_pytorch_form = tensor_tf_form.permute(0, 3, 1, 2).contiguous()\n return tensor_pytorch_form\n\n def inject_params(self, params):\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n cpu_w = self.get_cpu(\"weight\")\n weight_pytorch_form = params.weight.data\n weight_tf_form = self.weight_pytorch2tf(weight_pytorch_form)\n cpu_w.copy_(weight_tf_form)\n self.transfer_cpu_to_enclave(\"weight\")\n\n # bias\n assert (\n (self.bias and params.bias is not None) or\n (not self.bias and params.bias is None)\n )\n if self.bias:\n bias_data = params.bias.data\n else:\n bias_data = torch.zeros(self.n_output_channel)\n cpu_b = self.get_cpu(\"bias\")\n cpu_b.copy_(bias_data)\n self.transfer_cpu_to_enclave(\"bias\")\n elif self.EnclaveMode is ExecutionModeOptions.CPU:\n weight_pytorch_form = params.weight.data\n weight_tf_form = self.weight_pytorch2tf(weight_pytorch_form)\n self.get_cpu(\"weight\").copy_(weight_tf_form)\n # bias\n assert (\n (self.bias and params.bias is not None) or\n (not self.bias and params.bias is None)\n )\n if self.bias:\n self.get_cpu(\"bias\").copy_(params.bias.data)\n\n # Move weight to ForwardFunc\n weight_tf_form = self.get_cpu(\"weight\")\n weight_pytorch_form = self.weight_tf2pytorch(weight_tf_form)\n self.ForwardFunc.weight.data.copy_(weight_pytorch_form)\n\n elif self.EnclaveMode is ExecutionModeOptions.GPU:\n weight_pytorch_form = params.weight.data\n weight_tf_form = self.weight_pytorch2tf(weight_pytorch_form)\n self.get_gpu(\"weight\").copy_(weight_tf_form)\n # bias\n assert (\n (self.bias and params.bias is not None) or\n (not self.bias and params.bias is None)\n )\n if self.bias:\n self.get_gpu(\"bias\").copy_(params.bias.data)\n\n # Move weight to ForwardFunc\n weight_tf_form = self.get_gpu(\"weight\")\n weight_pytorch_form = self.weight_tf2pytorch(weight_tf_form)\n self.ForwardFunc.weight.data.copy_(weight_pytorch_form)\n\n\n def inject_to_plain(self, plain_layer: torch.nn.Module) -> None:\n self.make_sure_cpu_is_latest(\"weight\")\n weight_tf_form = self.get_cpu(\"weight\")\n weight_pytorch_form = self.weight_tf2pytorch(weight_tf_form)\n plain_layer.weight.data.copy_(weight_pytorch_form)\n\n assert (\n (self.bias and plain_layer.bias is not None) or\n (not self.bias and plain_layer.bias is None)\n )\n if self.bias:\n self.make_sure_cpu_is_latest(\"bias\")\n bias_data = self.get_cpu(\"bias\")\n plain_layer.weight.data.copy_(bias_data)\n\n def generate_tensor_name_list(self, force=False):\n if not force and self.tensor_name_list:\n return\n NeededTensorNames = [(\"output\", self.pytorch_y_shape, None), (\"sgx_output\", self.sgx_y_shape, None),\n (\"DerInput\", self.pytorch_x_shape, None), (\"sgx_DerInput\", self.sgx_x_shape, None),\n (\"input\", self.pytorch_x_shape, None), (\"sgx_input\", self.sgx_x_shape, None),\n (\"DerOutput\", self.pytorch_y_shape, None), (\"sgx_DerOutput\", self.sgx_y_shape, None),\n (\"weight\", self.sgx_w_shape, None),\n (\"bias\", self.bias_shape, None),\n ]\n self.tensor_name_list = NeededTensorNames\n\n\n def forward(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n self.forward_tensor_transfer(\"input\")\n if self.EnclaveMode == ExecutionModeOptions.Enclave:\n \n # \"input\" is pytorch form\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} Input Preprocess\", verbose_level=VerboseLevel.LAYER):\n if self.PrevLayer.EnclaveMode is ExecutionModeOptions.Enclave:\n self.transfer_enclave_to_cpu(\"input\")\n input_pytorch_form = self.get_cpu(\"input\")\n \n if torch.sum(self.get_cpu(\"input\").abs()) == 0:\n print(self.LayerName)\n raise RuntimeError(\"SGX input not load\")\n input_tf_form = self.feature_pytorch2tf(input_pytorch_form)\n self.set_cpu(\"sgx_input\", input_tf_form)\n self.transfer_cpu_to_enclave(\"sgx_input\")\n # self.forward_tensor_transfer(\"sgx_input\")\n # print(self.get_cpu(\"sgx_input\").squeeze())\n \n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} sgx_conv_forward\", verbose_level=VerboseLevel.LAYER):\n # if self.LayerName == \"Layer2.0.downsample.conv\":\n # st()\n self.sgx_conv_forward(self.LayerName)\n \n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} Output Postprocess\", verbose_level=VerboseLevel.LAYER):\n self.make_sure_cpu_is_latest(\"sgx_output\")\n output_tf_form = self.get_cpu(\"sgx_output\")\n output_pytorch_form = self.feature_tf2pytorch(output_tf_form)\n self.set_cpu(\"output\", output_pytorch_form)\n self.transfer_cpu_to_enclave(\"output\")\n elif self.EnclaveMode == ExecutionModeOptions.CPU:\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} Input Preprocess\", verbose_level=VerboseLevel.LAYER):\n self.forward_tensor_transfer()\n # self.requires_grad_on_cpu(\"input\")\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} Weight Transfer\", verbose_level=VerboseLevel.LAYER):\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} get weight_tf_form\", verbose_level=VerboseLevel.LAYER):\n weight_tf_form = self.get_cpu(\"weight\")\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} weight_tf2pytorch\", verbose_level=VerboseLevel.LAYER):\n weight_pytorch_form = self.weight_tf2pytorch(weight_tf_form)\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} copy data\", verbose_level=VerboseLevel.LAYER):\n self.ForwardFunc.weight.data.copy_(weight_pytorch_form)\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} GPU conv forward\", verbose_level=VerboseLevel.LAYER):\n self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n elif self.EnclaveMode == ExecutionModeOptions.GPU:\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} Input Preprocess\", verbose_level=VerboseLevel.LAYER):\n self.forward_tensor_transfer()\n # self.requires_grad_on_cpu(\"input\")\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} Weight Transfer\", verbose_level=VerboseLevel.LAYER):\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} get weight_tf_form\", verbose_level=VerboseLevel.LAYER):\n weight_tf_form = self.get_gpu(\"weight\")\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} weight_tf2pytorch\", verbose_level=VerboseLevel.LAYER):\n weight_pytorch_form = self.weight_tf2pytorch(weight_tf_form)\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} copy data\", verbose_level=VerboseLevel.LAYER):\n self.ForwardFunc.weight.data.copy_(weight_pytorch_form)\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} GPU conv forward\", verbose_level=VerboseLevel.LAYER):\n self.set_gpu(\"output\", self.ForwardFunc(self.get_gpu(\"input\").type(SecretConfig.dtypeForCpuOp)))\n\n\n def plain_forward(self, NeedBackward=False):\n if self.EnclaveMode == ExecutionModeOptions.Enclave:\n self.make_sure_cpu_is_latest(\"input\")\n self.make_sure_cpu_is_latest(\"weight\")\n if self.bias:\n self.make_sure_cpu_is_latest(\"bias\")\n # self.requires_grad_on_cpu(\"input\")\n weight_tf_form = self.get_cpu(\"weight\")\n weight_pytorch_form = self.weight_tf2pytorch(weight_tf_form)\n self.PlainFunc.weight.data.copy_(weight_pytorch_form)\n if self.bias:\n bias_data = self.get_cpu(\"bias\")\n self.PlainFunc.bias.data.copy_(bias_data)\n elif self.EnclaveMode in [ExecutionModeOptions.CPU, ExecutionModeOptions.GPU]:\n self.make_sure_cpu_is_latest(\"input\")\n self.requires_grad_on_cpu(\"input\")\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainForward\"):\n # torch.set_num_threads(1)\n self.PlainForwardResult = self.PlainFunc(self.get_cpu(\"input\"))\n # torch.set_num_threads(4)\n\n def show_plain_error_forward(self):\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"), get_relative=True)\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")\n\n def print_connection_info(self):\n print(f\"{self.LayerName:20} shape{self.pytorch_x_shape}{' ':20} mode{self.EnclaveMode}{' ':20} input {self.PrevLayer.LayerName:20} output {self.NextLayer.LayerName:20}\")" }, { "identifier": "ExecutionModeOptions", "path": "python/utils/basic_utils.py", "snippet": "class ExecutionModeOptions(Enum):\n Enclave = 1\n CPU = 2\n GPU = 3" }, { "identifier": "Logger", "path": "python/utils/logger_utils.py", "snippet": "class Logger(object):\n logfile_path = \"logfile.log\"\n\n def __init__(self):\n self.terminal = sys.stdout\n self.log = open(self.logfile_path, \"a\")\n\n def reset_logfile(self, path):\n self.logfile_path = path\n self.log = open(self.logfile_path, \"a\")\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n #this flush method is needed for python 3 compatibility.\n #this handles the flush command by doing nothing.\n #you might want to specify some extra behavior here.\n # pass\n self.terminal.flush()\n self.log.flush()" }, { "identifier": "NamedTimerInstance", "path": "python/utils/timer_utils.py", "snippet": "class NamedTimerInstance(object):\n def __init__(self, name, verbose_level=VerboseLevel.EVERY):\n self.name = name\n self.verbose_level = verbose_level\n\n def __enter__(self):\n return NamedTimer.start(self.name, verbose_level=self.verbose_level)\n ...\n\n def __exit__(self, *args):\n NamedTimer.end(self.name)\n ..." }, { "identifier": "VerboseLevel", "path": "python/utils/timer_utils.py", "snippet": "class VerboseLevel(IntEnum):\n EVERY = 1\n LAYER = 2\n RUN = 3\n EPOCH = 4" }, { "identifier": "NamedTimer", "path": "python/utils/timer_utils.py", "snippet": "class NamedTimer(object):\n __instance = None\n\n @staticmethod\n def get_instance():\n if NamedTimer.__instance is None:\n NamedTimer()\n return NamedTimer.__instance\n\n def __init__(self):\n NamedTimer.__instance = self\n self.timers = {}\n self.verbose_level = VerboseLevel.EVERY\n\n @staticmethod\n def start_timer(name, **kwargs):\n NamedTimer.get_instance().timers[name] = Timer(name, **kwargs)\n return NamedTimer.get_instance().timers[name]\n\n @staticmethod\n def start(name, **kwargs):\n return NamedTimer.get_instance().start_timer(name, **kwargs)\n\n @staticmethod\n def end_timer(name, **kwargs):\n NamedTimer.get_instance().timers[name].end(**kwargs)\n\n @staticmethod\n def end(name, tmp_name=None):\n # print(NamedTimer.get_instance().timers[name].verbose_level, NamedTimer.get_instance().verbose_level)\n NamedTimer.get_instance().end_timer(name, tmp_name=tmp_name)\n\n @staticmethod\n def set_verbose_level(verbose_level):\n if not isinstance(verbose_level, VerboseLevel):\n raise ValueError(\"Please set an enum from VerboseLevel\")\n NamedTimer.get_instance().verbose_level = verbose_level" }, { "identifier": "compare_expected_actual", "path": "python/utils/torch_utils.py", "snippet": "def compare_expected_actual(expected, actual, show_where_err=False, get_relative=False, verbose=False, show_values=False):\n def purify(x):\n # return torch.tensor(x)\n res = x\n # if not (isinstance(x, torch.Tensor) or isinstance(x, torch.Variable)):\n if not (isinstance(x, torch.Tensor) ):\n res = torch.tensor(x)\n # return x.detach().numpy()\n return res.type(torch.float).to(\"cpu\")\n expected = purify(expected)\n actual = purify(actual)\n\n if show_values:\n print(\"expected:\", expected[0, 0])\n print(\"actual:\", actual[0, 0])\n\n avg_abs_diff = torch.mean(torch.abs(expected - actual)).item()\n res = avg_abs_diff\n\n if show_where_err:\n show_indices = torch.abs(expected - actual) / torch.abs(expected) > 0.5\n # show_indices = (expected != actual)\n print(\"error indices: \", np.where(show_indices.cpu()))\n print(\"expected values:\", expected[show_indices])\n print(\"difference:\", (expected - actual)[show_indices])\n\n if get_relative:\n tmp_expected, tmp_actual = expected[expected != 0], actual[expected != 0]\n relative_diff = torch.abs(tmp_expected - tmp_actual) / torch.abs(tmp_expected)\n relative_avg_diff = torch.mean(torch.abs(tmp_actual - tmp_expected)) / torch.mean(torch.abs(tmp_expected))\n Error = namedtuple(\"Error\", (\"AvgAbsDiff\", \"RelAvgDiff\", \"AvgRelDiff\", \"StdRelDiff\"))\n res = Error(avg_abs_diff, relative_avg_diff.item(), torch.mean(relative_diff).item(), torch.std(relative_diff).item())\n\n if verbose:\n print(res)\n\n return res" } ]
import os import sys import numpy as np import torch import torch.distributed as dist import sys import pdb from pdb import set_trace as st from torch import optim, nn from python.common_net import register_layer, register_weight_layer, get_layer_weight, get_layer_input, \ get_layer_weight_grad, get_layer_output, get_layer_output_grad, get_layer_input_grad from python.enclave_interfaces import GlobalTensor from python.layers.batch_norm_2d import SecretBatchNorm2dLayer from python.layers.flatten import SecretFlattenLayer from python.layers.input import SecretInputLayer from python.layers.maxpool2d import SecretMaxpool2dLayer from python.layers.output import SecretOutputLayer from python.layers.relu import SecretReLULayer from python.sgx_net import init_communicate, warming_up_cuda, SecretNeuralNetwork, SgdOptimizer from python.layers.sgx_linear_base import SGXLinearBase from python.layers.sgx_conv_base import SGXConvBase from python.utils.basic_utils import ExecutionModeOptions from python.utils.logger_utils import Logger from python.quantize_net import NetQ from python.test_sgx_net import argparser_distributed, marshal_process, load_cifar10, seed_torch from python.utils.timer_utils import NamedTimerInstance, VerboseLevel, NamedTimer from python.utils.torch_utils import compare_expected_actual from pdb import set_trace as st
21,043
device_cuda = torch.device("cuda:0") torch.set_printoptions(precision=10) def compare_layer_member(layer: SGXLinearBase, layer_name: str, extract_func , member_name: str, save_path=None) -> None: print(member_name) layer.make_sure_cpu_is_latest(member_name) compare_expected_actual(extract_func(layer_name), layer.get_cpu(member_name), get_relative=True, verbose=True) if save_path is not None: if not os.path.exists(save_path): os.makedirs(save_path) print("Directory ", save_path, " Created ") else: print("Directory ", save_path, " already exists") torch.save(extract_func(layer_name), os.path.join(save_path, member_name + "_expected")) torch.save(layer.get_cpu(member_name), os.path.join(save_path, member_name + "_actual")) def compare_layer(layer: SGXLinearBase, layer_name: str, save_path=None) -> None: print("comparing with layer in expected NN :", layer_name)
device_cuda = torch.device("cuda:0") torch.set_printoptions(precision=10) def compare_layer_member(layer: SGXLinearBase, layer_name: str, extract_func , member_name: str, save_path=None) -> None: print(member_name) layer.make_sure_cpu_is_latest(member_name) compare_expected_actual(extract_func(layer_name), layer.get_cpu(member_name), get_relative=True, verbose=True) if save_path is not None: if not os.path.exists(save_path): os.makedirs(save_path) print("Directory ", save_path, " Created ") else: print("Directory ", save_path, " already exists") torch.save(extract_func(layer_name), os.path.join(save_path, member_name + "_expected")) torch.save(layer.get_cpu(member_name), os.path.join(save_path, member_name + "_actual")) def compare_layer(layer: SGXLinearBase, layer_name: str, save_path=None) -> None: print("comparing with layer in expected NN :", layer_name)
compare_name_function = [("input", get_layer_input), ("output", get_layer_output),
3
2023-11-01 10:37:37+00:00
24k
Codra-Ingenierie-Informatique/DataLab
cdl/core/gui/panel/signal.py
[ { "identifier": "_", "path": "cdl/config.py", "snippet": "CONF_VERSION = \"1.0.0\"\nAPP_NAME = \"DataLab\"\nMOD_NAME = \"cdl\"\nAPP_DESC = _(\"\"\"DataLab is a generic signal and image processing platform\"\"\")\nAPP_PATH = osp.dirname(__file__)\nDEBUG = os.environ.get(\"DEBUG\", \"\").lower() in (\"1\", \"true\")\nTEST_SEGFAULT_ERROR = len(os.environ.get(\"TEST_SEGFAULT_ERROR\", \"\")) > 0\nDATETIME_FORMAT = \"%d/%m/%Y - %H:%M:%S\"\nDATAPATH = configtools.get_module_data_path(MOD_NAME, \"data\")\nSHOTPATH = osp.join(\n configtools.get_module_data_path(MOD_NAME), os.pardir, \"doc\", \"images\", \"shots\"\n)\nOTHER_PLUGINS_PATHLIST = [configtools.get_module_data_path(MOD_NAME, \"plugins\")]\nIS_FROZEN = is_frozen(MOD_NAME)\nPLOTPY_DEFAULTS = {\n \"plot\": {\n # \"antialiasing\": False,\n # \"title/font/size\": 12,\n # \"title/font/bold\": False,\n # \"marker/curve/text/font/size\": 8,\n # \"marker/curve/text/font/family\": \"default\",\n # \"marker/curve/text/font/bold\": False,\n # \"marker/curve/text/font/italic\": False,\n \"marker/curve/text/textcolor\": \"black\",\n # \"marker/curve/text/background_color\": \"#ffffff\",\n # \"marker/curve/text/background_alpha\": 0.8,\n # \"marker/cross/text/font/family\": \"default\",\n # \"marker/cross/text/font/size\": 8,\n # \"marker/cross/text/font/bold\": False,\n # \"marker/cross/text/font/italic\": False,\n \"marker/cross/text/textcolor\": \"black\",\n # \"marker/cross/text/background_color\": \"#ffffff\",\n \"marker/cross/text/background_alpha\": 0.7,\n # \"marker/cross/line/style\": \"DashLine\",\n # \"marker/cross/line/color\": \"yellow\",\n # \"marker/cross/line/width\": 1,\n # \"marker/cursor/text/font/size\": 8,\n # \"marker/cursor/text/font/family\": \"default\",\n # \"marker/cursor/text/font/bold\": False,\n # \"marker/cursor/text/font/italic\": False,\n # \"marker/cursor/text/textcolor\": \"#ff9393\",\n # \"marker/cursor/text/background_color\": \"#ffffff\",\n # \"marker/cursor/text/background_alpha\": 0.8,\n \"shape/drag/symbol/marker\": \"NoSymbol\",\n \"shape/mask/symbol/size\": 5,\n \"shape/mask/sel_symbol/size\": 8,\n # -----------------------------------------------------------------------------\n # Annotated shape style for annotations:\n \"shape/annotation/line/style\": \"SolidLine\",\n \"shape/annotation/line/color\": \"#ffff00\",\n \"shape/annotation/line/width\": 1,\n \"shape/annotation/fill/style\": \"SolidPattern\",\n \"shape/annotation/fill/color\": MAIN_BG_COLOR,\n \"shape/annotation/fill/alpha\": 0.1,\n \"shape/annotation/symbol/marker\": \"Rect\",\n \"shape/annotation/symbol/size\": 3,\n \"shape/annotation/symbol/edgecolor\": \"#ffff00\",\n \"shape/annotation/symbol/facecolor\": \"#ffff00\",\n \"shape/annotation/symbol/alpha\": 1.0,\n \"shape/annotation/sel_line/style\": \"SolidLine\",\n \"shape/annotation/sel_line/color\": \"#00ff00\",\n \"shape/annotation/sel_line/width\": 1,\n \"shape/annotation/sel_fill/style\": \"SolidPattern\",\n \"shape/annotation/sel_fill/color\": MAIN_BG_COLOR,\n \"shape/annotation/sel_fill/alpha\": 0.1,\n \"shape/annotation/sel_symbol/marker\": \"Rect\",\n \"shape/annotation/sel_symbol/size\": 9,\n \"shape/annotation/sel_symbol/edgecolor\": \"#00aa00\",\n \"shape/annotation/sel_symbol/facecolor\": \"#00ff00\",\n \"shape/annotation/sel_symbol/alpha\": 0.7,\n # -----------------------------------------------------------------------------\n # Annotated shape style for result shapes / signals:\n \"shape/result/s/line/style\": \"SolidLine\",\n \"shape/result/s/line/color\": MAIN_FG_COLOR,\n \"shape/result/s/line/width\": 1,\n \"shape/result/s/fill/style\": \"SolidPattern\",\n \"shape/result/s/fill/color\": MAIN_BG_COLOR,\n \"shape/result/s/fill/alpha\": 0.1,\n \"shape/result/s/symbol/marker\": \"XCross\",\n \"shape/result/s/symbol/size\": 7,\n \"shape/result/s/symbol/edgecolor\": MAIN_FG_COLOR,\n \"shape/result/s/symbol/facecolor\": MAIN_FG_COLOR,\n \"shape/result/s/symbol/alpha\": 1.0,\n \"shape/result/s/sel_line/style\": \"SolidLine\",\n \"shape/result/s/sel_line/color\": \"#00ff00\",\n \"shape/result/s/sel_line/width\": 1,\n \"shape/result/s/sel_fill/style\": \"SolidPattern\",\n \"shape/result/s/sel_fill/color\": MAIN_BG_COLOR,\n \"shape/result/s/sel_fill/alpha\": 0.1,\n \"shape/result/s/sel_symbol/marker\": \"Rect\",\n \"shape/result/s/sel_symbol/size\": 9,\n \"shape/result/s/sel_symbol/edgecolor\": \"#00aa00\",\n \"shape/result/s/sel_symbol/facecolor\": \"#00ff00\",\n \"shape/result/s/sel_symbol/alpha\": 0.7,\n # -----------------------------------------------------------------------------\n # Annotated shape style for result shapes / images:\n \"shape/result/i/line/style\": \"SolidLine\",\n \"shape/result/i/line/color\": \"#ffff00\",\n \"shape/result/i/line/width\": 1,\n \"shape/result/i/fill/style\": \"SolidPattern\",\n \"shape/result/i/fill/color\": MAIN_BG_COLOR,\n \"shape/result/i/fill/alpha\": 0.1,\n \"shape/result/i/symbol/marker\": \"Rect\",\n \"shape/result/i/symbol/size\": 3,\n \"shape/result/i/symbol/edgecolor\": \"#ffff00\",\n \"shape/result/i/symbol/facecolor\": \"#ffff00\",\n \"shape/result/i/symbol/alpha\": 1.0,\n \"shape/result/i/sel_line/style\": \"SolidLine\",\n \"shape/result/i/sel_line/color\": \"#00ff00\",\n \"shape/result/i/sel_line/width\": 1,\n \"shape/result/i/sel_fill/style\": \"SolidPattern\",\n \"shape/result/i/sel_fill/color\": MAIN_BG_COLOR,\n \"shape/result/i/sel_fill/alpha\": 0.1,\n \"shape/result/i/sel_symbol/marker\": \"Rect\",\n \"shape/result/i/sel_symbol/size\": 9,\n \"shape/result/i/sel_symbol/edgecolor\": \"#00aa00\",\n \"shape/result/i/sel_symbol/facecolor\": \"#00ff00\",\n \"shape/result/i/sel_symbol/alpha\": 0.7,\n # -----------------------------------------------------------------------------\n },\n}\ndef is_frozen(module_name: str) -> bool:\ndef get_mod_source_dir() -> str | None:\n def get_def_dict(cls, category: str) -> dict:\n def set_def_dict(cls, category: str, def_dict: dict) -> None:\ndef get_old_log_fname(fname):\ndef initialize():\ndef reset():\nclass MainSection(conf.Section, metaclass=conf.SectionMeta):\nclass ConsoleSection(conf.Section, metaclass=conf.SectionMeta):\nclass IOSection(conf.Section, metaclass=conf.SectionMeta):\nclass ProcSection(conf.Section, metaclass=conf.SectionMeta):\nclass ViewSection(conf.Section, metaclass=conf.SectionMeta):\nclass Conf(conf.Configuration, metaclass=conf.ConfMeta):" }, { "identifier": "roieditor", "path": "cdl/core/gui/roieditor.py", "snippet": "class BaseROIEditorMeta(type(QW.QWidget), abc.ABCMeta):\nclass BaseROIEditor(QW.QWidget, metaclass=BaseROIEditorMeta):\nclass ROIRangeInfo(ObjectInfo):\nclass SignalROIEditor(BaseROIEditor):\nclass ImageROIEditor(BaseROIEditor):\n ICON_NAME = None\n OBJ_NAME = None\n ICON_NAME = \"signal_roi_new.svg\"\n OBJ_NAME = _(\"signal\")\n ICON_NAME = \"image_roi_new.svg\"\n OBJ_NAME = _(\"image\")\n def __init__(\n self,\n parent: QW.QDialog,\n obj: BaseObj,\n extract: bool,\n singleobj: bool | None = None,\n ):\n def modified(self) -> bool:\n def modified(self, value: bool):\n def dialog_accepted(self):\n def get_data(self) -> ROIDataParam:\n def setup_widget(self):\n def add_roi_item(self, roi_item):\n def update_roi_titles(self):\n def item_removed(self, item):\n def item_moved(self):\n def get_roi_item_coords(roi_item):\n def __init__(self, roi_items):\n def get_text(self):\n def setup_widget(self):\n def add_roi(self):\n def update_roi_titles(self):\n def get_roi_item_coords(roi_item):\n def setup_widget(self):\n def add_roi(self, geometry: RoiDataGeometries):\n def update_roi_titles(self):\n def get_roi_item_coords(roi_item):" }, { "identifier": "SignalActionHandler", "path": "cdl/core/gui/actionhandler.py", "snippet": "class SignalActionHandler(BaseActionHandler):\n \"\"\"Object handling signal panel GUI interactions: actions, menus, ...\"\"\"\n\n OBJECT_STR = _(\"signal\")\n\n def create_first_actions(self):\n \"\"\"Create actions that are added to the menus in the first place\"\"\"\n with self.new_category(ActionCategory.PROCESSING):\n self.new_action(\n _(\"Normalize\"), triggered=self.panel.processor.compute_normalize\n )\n self.new_action(\n _(\"Derivative\"), triggered=self.panel.processor.compute_derivative\n )\n self.new_action(\n _(\"Integral\"), triggered=self.panel.processor.compute_integral\n )\n\n super().create_first_actions()\n\n with self.new_category(ActionCategory.OPERATION):\n self.new_action(\n _(\"Peak detection\"),\n separator=True,\n triggered=self.panel.processor.compute_peak_detection,\n icon=get_icon(\"peak_detect.svg\"),\n )\n\n with self.new_category(ActionCategory.PROCESSING):\n self.new_action(\n _(\"Interpolation\"),\n triggered=self.panel.processor.compute_interpolation,\n )\n self.new_action(\n _(\"Resampling\"), triggered=self.panel.processor.compute_resampling\n )\n self.new_action(\n _(\"Detrending\"), triggered=self.panel.processor.compute_detrending\n )\n\n def cra_fit(title, fitdlgfunc):\n \"\"\"Create curve fitting action\"\"\"\n return self.new_action(\n title,\n triggered=lambda: self.panel.processor.compute_fit(title, fitdlgfunc),\n )\n\n with self.new_category(ActionCategory.PROCESSING):\n with self.new_menu(_(\"Fitting\")):\n cra_fit(_(\"Gaussian fit\"), fitdialog.gaussianfit)\n cra_fit(_(\"Lorentzian fit\"), fitdialog.lorentzianfit)\n cra_fit(_(\"Voigt fit\"), fitdialog.voigtfit)\n self.new_action(\n _(\"Polynomial fit\"),\n triggered=self.panel.processor.compute_polyfit,\n )\n self.new_action(\n _(\"Multi-Gaussian fit\"),\n triggered=self.panel.processor.compute_multigaussianfit,\n )\n\n with self.new_category(ActionCategory.COMPUTING):\n self.new_action(\n _(\"Full width at half-maximum\"),\n triggered=self.panel.processor.compute_fwhm,\n tip=_(\"Compute Full Width at Half-Maximum (FWHM)\"),\n )\n self.new_action(\n _(\"Full width at\") + \" 1/e²\",\n triggered=self.panel.processor.compute_fw1e2,\n tip=_(\"Compute Full Width at Maximum\") + \"/e²\",\n )\n\n with self.new_category(ActionCategory.VIEW):\n antialiasing_action = self.new_action(\n _(\"Curve anti-aliasing\"),\n icon=get_icon(\"curve_antialiasing.svg\"),\n toggled=self.panel.toggle_anti_aliasing,\n tip=_(\"Toggle curve anti-aliasing on/off (may slow down plotting)\"),\n toolbar_pos=-1,\n )\n antialiasing_action.setChecked(Conf.view.sig_antialiasing.get(True))\n\n def create_last_actions(self):\n \"\"\"Create actions that are added to the menus in the end\"\"\"\n with self.new_category(ActionCategory.OPERATION):\n self.new_action(\n _(\"Convolution\"),\n triggered=self.panel.processor.compute_convolution,\n separator=True,\n )\n super().create_last_actions()" }, { "identifier": "BaseDataPanel", "path": "cdl/core/gui/panel/base.py", "snippet": "class BaseDataPanel(AbstractPanel):\n \"\"\"Object handling the item list, the selected item properties and plot\"\"\"\n\n PANEL_STR = \"\" # e.g. \"Signal Panel\"\n PARAMCLASS: SignalObj | ImageObj = None # Replaced in child object\n ANNOTATION_TOOLS = ()\n DIALOGSIZE = (800, 600)\n # Replaced by the right class in child object:\n IO_REGISTRY: SignalIORegistry | ImageIORegistry | None = None\n SIG_STATUS_MESSAGE = QC.Signal(str) # emitted by \"qt_try_except\" decorator\n SIG_REFRESH_PLOT = QC.Signal(str, bool) # Connected to PlotHandler.refresh_plot\n ROIDIALOGOPTIONS = {}\n # Replaced in child object:\n ROIDIALOGCLASS: roieditor.SignalROIEditor | roieditor.ImageROIEditor | None = None\n\n @abc.abstractmethod\n def __init__(self, parent: QW.QWidget, plotwidget: PlotWidget, toolbar) -> None:\n super().__init__(parent)\n self.mainwindow: CDLMainWindow = parent\n self.objprop = ObjectProp(self, self.PARAMCLASS)\n self.objmodel = objectmodel.ObjectModel()\n self.objview = objectview.ObjectView(self, self.objmodel)\n self.objview.SIG_IMPORT_FILES.connect(self.handle_dropped_files)\n self.objview.populate_tree()\n self.plothandler: SignalPlotHandler | ImagePlotHandler = None\n self.processor: SignalProcessor | ImageProcessor = None\n self.acthandler: actionhandler.BaseActionHandler = None\n self.__metadata_clipboard = {}\n self.context_menu = QW.QMenu()\n self.__separate_views: dict[QW.QDialog, SignalObj | ImageObj] = {}\n\n def closeEvent(self, event):\n \"\"\"Reimplement QMainWindow method\"\"\"\n self.processor.close()\n super().closeEvent(event)\n\n # ------AbstractPanel interface-----------------------------------------------------\n def serialize_object_to_hdf5(\n self, obj: SignalObj | ImageObj, writer: NativeH5Writer\n ) -> None:\n \"\"\"Serialize object to HDF5 file\"\"\"\n # Before serializing, update metadata from plot item parameters, in order to\n # save the latest visualization settings:\n try:\n item = self.plothandler[obj.uuid]\n obj.update_metadata_from_plot_item(item)\n except KeyError:\n # Plot item has not been created yet (this happens when auto-refresh has\n # been disabled)\n pass\n super().serialize_object_to_hdf5(obj, writer)\n\n def serialize_to_hdf5(self, writer: NativeH5Writer) -> None:\n \"\"\"Serialize whole panel to a HDF5 file\"\"\"\n with writer.group(self.H5_PREFIX):\n for group in self.objmodel.get_groups():\n with writer.group(self.get_serializable_name(group)):\n with writer.group(\"title\"):\n writer.write_str(group.title)\n for obj in group.get_objects():\n self.serialize_object_to_hdf5(obj, writer)\n\n def deserialize_from_hdf5(self, reader: NativeH5Reader) -> None:\n \"\"\"Deserialize whole panel from a HDF5 file\"\"\"\n with reader.group(self.H5_PREFIX):\n for name in reader.h5.get(self.H5_PREFIX, []):\n with reader.group(name):\n group = self.add_group(\"\")\n with reader.group(\"title\"):\n group.title = reader.read_str()\n for obj_name in reader.h5.get(f\"{self.H5_PREFIX}/{name}\", []):\n obj = self.deserialize_object_from_hdf5(reader, obj_name)\n self.add_object(obj, group.uuid, set_current=False)\n self.selection_changed()\n\n def __len__(self) -> int:\n \"\"\"Return number of objects\"\"\"\n return len(self.objmodel)\n\n def __getitem__(self, nb: int) -> SignalObj | ImageObj:\n \"\"\"Return object from its number (1 to N)\"\"\"\n return self.objmodel.get_object_from_number(nb)\n\n def __iter__(self):\n \"\"\"Iterate over objects\"\"\"\n return iter(self.objmodel)\n\n def create_object(self) -> SignalObj | ImageObj:\n \"\"\"Create object (signal or image)\n\n Returns:\n SignalObj or ImageObj object\n \"\"\"\n return self.PARAMCLASS() # pylint: disable=not-callable\n\n @qt_try_except()\n def add_object(\n self,\n obj: SignalObj | ImageObj,\n group_id: str | None = None,\n set_current: bool = True,\n ) -> None:\n \"\"\"Add object\n\n Args:\n obj: SignalObj or ImageObj object\n group_id: group id\n set_current: if True, set the added object as current\n \"\"\"\n if obj in self.objmodel:\n # Prevent adding the same object twice\n raise ValueError(\n f\"Object {hex(id(obj))} already in panel. \"\n f\"The same object cannot be added twice: \"\n f\"please use a copy of the object.\"\n )\n if group_id is None:\n group_id = self.objview.get_current_group_id()\n if group_id is None:\n groups = self.objmodel.get_groups()\n if groups:\n group_id = groups[0].uuid\n else:\n group_id = self.add_group(\"\").uuid\n obj.check_data()\n self.objmodel.add_object(obj, group_id)\n self.objview.add_object_item(obj, group_id, set_current=set_current)\n self.SIG_OBJECT_ADDED.emit()\n self.objview.update_tree()\n\n def remove_all_objects(self) -> None:\n \"\"\"Remove all objects\"\"\"\n # iterate over a copy of self.__separate_views dict keys to avoid RuntimeError:\n # dictionary changed size during iteration\n for dlg in list(self.__separate_views):\n dlg.done(QW.QDialog.DialogCode.Rejected)\n self.objmodel.clear()\n self.plothandler.clear()\n self.objview.populate_tree()\n self.SIG_REFRESH_PLOT.emit(\"selected\", True)\n super().remove_all_objects()\n\n # ---- Signal/Image Panel API ------------------------------------------------------\n def setup_panel(self) -> None:\n \"\"\"Setup panel\"\"\"\n self.acthandler.create_all_actions()\n self.processor.SIG_ADD_SHAPE.connect(self.plothandler.add_shapes)\n self.SIG_REFRESH_PLOT.connect(self.plothandler.refresh_plot)\n self.objview.SIG_SELECTION_CHANGED.connect(self.selection_changed)\n self.objview.SIG_ITEM_DOUBLECLICKED.connect(\n lambda oid: self.open_separate_view([oid])\n )\n self.objview.SIG_CONTEXT_MENU.connect(self.__popup_contextmenu)\n self.objprop.properties.SIG_APPLY_BUTTON_CLICKED.connect(\n self.properties_changed\n )\n self.addWidget(self.objview)\n self.addWidget(self.objprop)\n self.add_results_button()\n\n def get_category_actions(\n self, category: actionhandler.ActionCategory\n ) -> list[QW.QAction]: # pragma: no cover\n \"\"\"Return actions for category\"\"\"\n return self.acthandler.feature_actions.get(category, [])\n\n def __popup_contextmenu(self, position: QC.QPoint) -> None: # pragma: no cover\n \"\"\"Popup context menu at position\"\"\"\n # Note: For now, this is completely unnecessary to clear context menu everytime,\n # but implementing it this way could be useful in the future in menu contents\n # should take into account current object selection\n self.context_menu.clear()\n actions = self.get_category_actions(actionhandler.ActionCategory.CONTEXT_MENU)\n add_actions(self.context_menu, actions)\n self.context_menu.popup(position)\n\n # ------Creating, adding, removing objects------------------------------------------\n def add_group(self, title: str) -> objectmodel.ObjectGroup:\n \"\"\"Add group\"\"\"\n group = self.objmodel.add_group(title)\n self.objview.add_group_item(group)\n return group\n\n # TODO: [P2] New feature: move objects up/down\n # TODO: [P2] New feature: move objects to another group\n def __duplicate_individual_obj(\n self, oid: str, new_group_id: str | None = None, set_current: bool = True\n ) -> None:\n \"\"\"Duplicate individual object\"\"\"\n obj = self.objmodel[oid]\n if new_group_id is None:\n new_group_id = self.objmodel.get_object_group_id(obj)\n self.add_object(obj.copy(), group_id=new_group_id, set_current=set_current)\n\n def duplicate_object(self) -> None:\n \"\"\"Duplication signal/image object\"\"\"\n if not self.mainwindow.confirm_memory_state():\n return\n # Duplicate individual objects (exclusive with respect to groups)\n for oid in self.objview.get_sel_object_uuids():\n self.__duplicate_individual_obj(oid, set_current=False)\n # Duplicate groups (exclusive with respect to individual objects)\n for group in self.objview.get_sel_groups():\n new_group = self.add_group(group.title)\n for oid in self.objmodel.get_group_object_ids(group.uuid):\n self.__duplicate_individual_obj(oid, new_group.uuid, set_current=False)\n self.selection_changed(update_items=True)\n\n def copy_metadata(self) -> None:\n \"\"\"Copy object metadata\"\"\"\n obj = self.objview.get_sel_objects()[0]\n self.__metadata_clipboard = obj.metadata.copy()\n new_pref = obj.short_id + \"_\"\n for key, value in obj.metadata.items():\n if ResultShape.match(key, value):\n mshape = ResultShape.from_metadata_entry(key, value)\n if not re.match(obj.PREFIX + r\"[0-9]{3}[\\s]*\", mshape.label):\n # Handling additional result (e.g. diameter)\n for a_key, a_value in obj.metadata.items():\n if isinstance(a_key, str) and a_key.startswith(mshape.label):\n self.__metadata_clipboard.pop(a_key)\n self.__metadata_clipboard[new_pref + a_key] = a_value\n mshape.label = new_pref + mshape.label\n # Handling result shape\n self.__metadata_clipboard.pop(key)\n self.__metadata_clipboard[mshape.key] = value\n\n def paste_metadata(self) -> None:\n \"\"\"Paste metadata to selected object(s)\"\"\"\n sel_objects = self.objview.get_sel_objects(include_groups=True)\n for obj in sorted(sel_objects, key=lambda obj: obj.short_id, reverse=True):\n obj.metadata.update(self.__metadata_clipboard)\n self.SIG_REFRESH_PLOT.emit(\"selected\", True)\n\n def remove_object(self) -> None:\n \"\"\"Remove signal/image object\"\"\"\n sel_groups = self.objview.get_sel_groups()\n if sel_groups:\n answer = QW.QMessageBox.warning(\n self,\n _(\"Delete group(s)\"),\n _(\"Are you sure you want to delete the selected group(s)?\"),\n QW.QMessageBox.Yes | QW.QMessageBox.No,\n )\n if answer == QW.QMessageBox.No:\n return\n sel_objects = self.objview.get_sel_objects(include_groups=True)\n for obj in sorted(sel_objects, key=lambda obj: obj.short_id, reverse=True):\n for dlg, obj_i in self.__separate_views.items():\n if obj_i is obj:\n dlg.done(QW.QDialog.DialogCode.Rejected)\n self.plothandler.remove_item(obj.uuid)\n self.objview.remove_item(obj.uuid, refresh=False)\n self.objmodel.remove_object(obj)\n for group in sel_groups:\n self.objview.remove_item(group.uuid, refresh=False)\n self.objmodel.remove_group(group)\n self.objview.update_tree()\n self.selection_changed(update_items=True)\n self.SIG_OBJECT_REMOVED.emit()\n\n def delete_all_objects(self) -> None: # pragma: no cover\n \"\"\"Confirm before removing all objects\"\"\"\n if len(self) == 0:\n return\n answer = QW.QMessageBox.warning(\n self,\n _(\"Delete all\"),\n _(\"Do you want to delete all objects (%s)?\") % self.PANEL_STR,\n QW.QMessageBox.Yes | QW.QMessageBox.No,\n )\n if answer == QW.QMessageBox.Yes:\n self.remove_all_objects()\n\n def delete_metadata(self, refresh_plot: bool = True) -> None:\n \"\"\"Delete metadata of selected objects\n\n Args:\n refresh_plot (bool | None): Refresh plot. Defaults to True.\n \"\"\"\n for index, obj in enumerate(self.objview.get_sel_objects(include_groups=True)):\n obj.reset_metadata_to_defaults()\n if index == 0:\n self.selection_changed()\n if refresh_plot:\n self.SIG_REFRESH_PLOT.emit(\"selected\", True)\n\n def add_annotations_from_items(\n self, items: list, refresh_plot: bool = True\n ) -> None:\n \"\"\"Add object annotations (annotation plot items).\n\n Args:\n items (list): annotation plot items\n refresh_plot (bool | None): refresh plot. Defaults to True.\n \"\"\"\n for obj in self.objview.get_sel_objects(include_groups=True):\n obj.add_annotations_from_items(items)\n if refresh_plot:\n self.SIG_REFRESH_PLOT.emit(\"selected\", True)\n\n def update_metadata_view_settings(self) -> None:\n \"\"\"Update metadata view settings\"\"\"\n for obj in self.objmodel:\n obj.update_metadata_view_settings()\n self.SIG_REFRESH_PLOT.emit(\"all\", True)\n\n def copy_titles_to_clipboard(self) -> None:\n \"\"\"Copy object titles to clipboard (for reproducibility)\"\"\"\n QW.QApplication.clipboard().setText(str(self.objview))\n\n def new_group(self) -> None:\n \"\"\"Create a new group\"\"\"\n # Open a message box to enter the group name\n group_name, ok = QW.QInputDialog.getText(self, _(\"New group\"), _(\"Group name:\"))\n if ok:\n self.add_group(group_name)\n\n def rename_group(self) -> None:\n \"\"\"Rename a group\"\"\"\n # Open a message box to enter the group name\n group = self.objview.get_sel_groups()[0]\n group_name, ok = QW.QInputDialog.getText(\n self, _(\"Rename group\"), _(\"Group name:\"), QW.QLineEdit.Normal, group.title\n )\n if ok:\n group.title = group_name\n self.objview.update_item(group.uuid)\n\n @abc.abstractmethod\n def get_newparam_from_current(\n self, newparam: NewSignalParam | NewImageParam | None = None\n ) -> NewSignalParam | NewImageParam | None:\n \"\"\"Get new object parameters from the current object.\n\n Args:\n newparam (guidata.dataset.DataSet): new object parameters.\n If None, create a new one.\n\n Returns:\n New object parameters\n \"\"\"\n\n @abc.abstractmethod\n def new_object(\n self,\n newparam: NewSignalParam | NewImageParam | None = None,\n addparam: gds.DataSet | None = None,\n edit: bool = True,\n add_to_panel: bool = True,\n ) -> SignalObj | ImageObj | None:\n \"\"\"Create a new object (signal/image).\n\n Args:\n newparam (guidata.dataset.DataSet): new object parameters\n addparam (guidata.dataset.DataSet): additional parameters\n edit (bool): Open a dialog box to edit parameters (default: True)\n add_to_panel (bool): Add object to panel (default: True)\n\n Returns:\n New object\n \"\"\"\n\n def set_current_object_title(self, title: str) -> None:\n \"\"\"Set current object title\"\"\"\n obj = self.objview.get_current_object()\n obj.title = title\n self.objview.update_item(obj.uuid)\n\n def open_object(\n self, filename: str\n ) -> SignalObj | ImageObj | list[SignalObj | ImageObj]:\n \"\"\"Open object from file (signal/image), add it to DataLab and return it.\n\n Args:\n filename (str): file name\n\n Returns:\n New object or list of new objects\n \"\"\"\n obj_or_objlist = self.IO_REGISTRY.read(filename)\n objs = obj_or_objlist if isinstance(obj_or_objlist, list) else [obj_or_objlist]\n for obj in objs:\n self.add_object(obj, set_current=obj is objs[-1])\n self.selection_changed()\n if len(objs) == 1:\n return objs[0]\n return objs\n\n def save_object(self, obj, filename: str | None = None) -> None:\n \"\"\"Save object to file (signal/image)\"\"\"\n if filename is None:\n basedir = Conf.main.base_dir.get()\n filters = self.IO_REGISTRY.get_filters(IOAction.SAVE)\n with save_restore_stds():\n filename, _filt = getsavefilename(self, _(\"Save as\"), basedir, filters)\n if filename:\n with qt_try_loadsave_file(self.parent(), filename, \"save\"):\n Conf.main.base_dir.set(filename)\n self.IO_REGISTRY.write(filename, obj)\n\n def handle_dropped_files(self, filenames: list[str] | None = None) -> None:\n \"\"\"Handle dropped files\n\n Args:\n filenames (list(str)): File names\n\n Returns:\n None\n \"\"\"\n h5_fnames = [fname for fname in filenames if fname.endswith(\".h5\")]\n other_fnames = list(set(filenames) - set(h5_fnames))\n if h5_fnames:\n self.mainwindow.open_h5_files(h5_fnames, import_all=True)\n if other_fnames:\n self.open_objects(other_fnames)\n\n def open_objects(\n self, filenames: list[str] | None = None\n ) -> list[SignalObj | ImageObj]:\n \"\"\"Open objects from file (signals/images), add them to DataLab and return them.\n\n Args:\n filenames (list(str)): File names\n\n Returns:\n list of new objects\n \"\"\"\n if not self.mainwindow.confirm_memory_state():\n return []\n if filenames is None: # pragma: no cover\n basedir = Conf.main.base_dir.get()\n filters = self.IO_REGISTRY.get_filters(IOAction.LOAD)\n with save_restore_stds():\n filenames, _filt = getopenfilenames(self, _(\"Open\"), basedir, filters)\n objs = []\n for filename in filenames:\n with qt_try_loadsave_file(self.parent(), filename, \"load\"):\n Conf.main.base_dir.set(filename)\n objs.append(self.open_object(filename))\n return objs\n\n def save_objects(self, filenames: list[str] | None = None) -> None:\n \"\"\"Save selected objects to file (signal/image).\n\n Args:\n filenames (list(str)): File names\n\n Returns:\n None\n \"\"\"\n objs = self.objview.get_sel_objects(include_groups=True)\n if filenames is None: # pragma: no cover\n filenames = [None] * len(objs)\n assert len(filenames) == len(objs)\n for index, obj in enumerate(objs):\n filename = filenames[index]\n self.save_object(obj, filename)\n\n def import_metadata_from_file(self, filename: str | None = None) -> None:\n \"\"\"Import metadata from file (JSON).\n\n Args:\n filename (str): File name\n\n Returns:\n None\n \"\"\"\n if filename is None: # pragma: no cover\n basedir = Conf.main.base_dir.get()\n with save_restore_stds():\n filename, _filter = getopenfilename(\n self, _(\"Import metadata\"), basedir, \"*.json\"\n )\n if filename:\n with qt_try_loadsave_file(self.parent(), filename, \"load\"):\n Conf.main.base_dir.set(filename)\n obj = self.objview.get_sel_objects(include_groups=True)[0]\n obj.import_metadata_from_file(filename)\n self.SIG_REFRESH_PLOT.emit(\"selected\", True)\n\n def export_metadata_from_file(self, filename: str | None = None) -> None:\n \"\"\"Export metadata to file (JSON).\n\n Args:\n filename (str): File name\n\n Returns:\n None\n \"\"\"\n obj = self.objview.get_sel_objects(include_groups=True)[0]\n if filename is None: # pragma: no cover\n basedir = Conf.main.base_dir.get()\n with save_restore_stds():\n filename, _filt = getsavefilename(\n self, _(\"Export metadata\"), basedir, \"*.json\"\n )\n if filename:\n with qt_try_loadsave_file(self.parent(), filename, \"save\"):\n Conf.main.base_dir.set(filename)\n obj.export_metadata_to_file(filename)\n\n # ------Refreshing GUI--------------------------------------------------------------\n def selection_changed(self, update_items: bool = False) -> None:\n \"\"\"Object selection changed: update object properties, refresh plot and update\n object view.\n\n Args:\n update_items (bool): Update plot items (default: False)\n \"\"\"\n selected_objects = self.objview.get_sel_objects(include_groups=True)\n selected_groups = self.objview.get_sel_groups()\n self.objprop.update_properties_from(self.objview.get_current_object())\n self.acthandler.selected_objects_changed(selected_groups, selected_objects)\n self.SIG_REFRESH_PLOT.emit(\"selected\", update_items)\n\n def properties_changed(self) -> None:\n \"\"\"The properties 'Apply' button was clicked: update object properties,\n refresh plot and update object view.\"\"\"\n obj = self.objview.get_current_object()\n update_dataset(obj, self.objprop.properties.dataset)\n self.objview.update_item(obj.uuid)\n self.SIG_REFRESH_PLOT.emit(\"selected\", True)\n\n # ------Plotting data in modal dialogs----------------------------------------------\n def open_separate_view(self, oids: list[str] | None = None) -> QW.QDialog | None:\n \"\"\"\n Open separate view for visualizing selected objects\n\n Args:\n oids (list(str)): Object IDs\n\n Returns:\n QDialog instance\n \"\"\"\n title = _(\"Annotations\")\n if oids is None:\n oids = self.objview.get_sel_object_uuids(include_groups=True)\n obj = self.objmodel[oids[0]]\n dlg = self.create_new_dialog(oids, edit=True, name=\"new_window\")\n if dlg is None:\n return None\n width, height = self.DIALOGSIZE\n dlg.resize(width, height)\n mgr = dlg.get_manager()\n mgr.get_itemlist_panel().show()\n toolbar = QW.QToolBar(title, self)\n dlg.button_layout.insertWidget(0, toolbar)\n # dlg.layout().insertWidget(1, toolbar) # other possible location\n # dlg.plot_layout.addWidget(toolbar, 1, 0, 1, 1) # other possible location\n mgr.add_toolbar(toolbar, id(toolbar))\n toolbar.setToolButtonStyle(QC.Qt.ToolButtonTextUnderIcon)\n for tool in self.ANNOTATION_TOOLS:\n mgr.add_tool(tool, toolbar_id=id(toolbar))\n plot = dlg.get_plot()\n plot.unselect_all()\n for item in plot.items:\n item.set_selectable(False)\n for item in obj.iterate_shape_items(editable=True):\n plot.add_item(item)\n self.__separate_views[dlg] = obj\n dlg.show()\n dlg.finished.connect(self.__separate_view_finished)\n return dlg\n\n def __separate_view_finished(self, result: int) -> None:\n \"\"\"Separate view was closed\n\n Args:\n result: result\n \"\"\"\n dlg: PlotDialog = self.sender()\n if result == QW.QDialog.DialogCode.Accepted:\n rw_items = []\n for item in dlg.get_plot().get_items():\n if not item.is_readonly() and is_plot_item_serializable(item):\n rw_items.append(item)\n obj = self.__separate_views[dlg]\n obj.annotations = items_to_json(rw_items)\n self.selection_changed(update_items=True)\n self.__separate_views.pop(dlg)\n dlg.deleteLater()\n\n def manual_refresh(self) -> None:\n \"\"\"Manual refresh\"\"\"\n self.plothandler.refresh_plot(\"selected\", True, force=True)\n\n def create_new_dialog(\n self,\n oids: list[str],\n edit: bool = False,\n toolbar: bool = True,\n title: str | None = None,\n tools: list[GuiTool] | None = None,\n name: str | None = None,\n options: dict | None = None,\n ) -> PlotDialog | None:\n \"\"\"Create new pop-up signal/image plot dialog.\n\n Args:\n oids (list(str)): Object IDs\n edit (bool): Edit mode\n toolbar (bool): Show toolbar\n title (str): Dialog title\n tools (list(GuiTool)): list of tools to add to the toolbar\n name (str): Dialog name\n options (dict): Plot options\n\n Returns:\n QDialog instance\n \"\"\"\n if title is not None or len(oids) == 1:\n if title is None:\n title = self.objview.get_sel_objects(include_groups=True)[0].title\n title = f\"{title} - {APP_NAME}\"\n else:\n title = APP_NAME\n\n plot_options = self.plothandler.get_current_plot_options()\n if options is not None:\n plot_options = plot_options.copy(options)\n\n # pylint: disable=not-callable\n dlg = PlotDialog(\n parent=self,\n title=title,\n edit=edit,\n options=plot_options,\n toolbar=toolbar,\n )\n dlg.setWindowIcon(get_icon(\"DataLab.svg\"))\n if tools is not None:\n for tool in tools:\n dlg.get_manager().add_tool(tool)\n plot = dlg.get_plot()\n\n objs = self.objmodel.get_objects(oids)\n dlg.setObjectName(f\"{objs[0].PREFIX}_{name}\")\n\n with create_progress_bar(\n self, _(\"Creating plot items\"), max_=len(objs)\n ) as progress:\n for index, obj in enumerate(objs):\n progress.setValue(index + 1)\n QW.QApplication.processEvents()\n if progress.wasCanceled():\n return None\n item = obj.make_item(update_from=self.plothandler[obj.uuid])\n item.set_readonly(True)\n plot.add_item(item, z=0)\n plot.set_active_item(item)\n plot.replot()\n return dlg\n\n def create_new_dialog_for_selection(\n self,\n title: str,\n name: str,\n options: dict[str, any] = None,\n toolbar: bool = False,\n tools: list[GuiTool] = None,\n ) -> tuple[QW.QDialog | None, SignalObj | ImageObj]:\n \"\"\"Create new pop-up dialog for the currently selected signal/image.\n\n Args:\n title (str): Dialog title\n name (str): Dialog name\n options (dict): Plot options\n toolbar (bool): Show toolbar\n tools (list(GuiTool)): list of tools to add to the toolbar\n\n Returns:\n QDialog instance, selected object\n \"\"\"\n obj = self.objview.get_sel_objects(include_groups=True)[0]\n dlg = self.create_new_dialog(\n [obj.uuid],\n edit=True,\n toolbar=toolbar,\n title=f\"{title} - {obj.title}\",\n tools=tools,\n name=name,\n options=options,\n )\n return dlg, obj\n\n def get_roi_dialog(\n self, extract: bool, singleobj: bool\n ) -> cdl.core.computation.base.ROIDataParam:\n \"\"\"Get ROI data (array) from specific dialog box.\n\n Args:\n extract (bool): Extract ROI from data\n singleobj (bool): Single object\n\n Returns:\n ROI data\n \"\"\"\n roi_s = _(\"Regions of interest\")\n options = self.ROIDIALOGOPTIONS\n dlg, obj = self.create_new_dialog_for_selection(roi_s, \"roi_dialog\", options)\n if dlg is None:\n return None\n plot = dlg.get_plot()\n plot.unselect_all()\n for item in plot.items:\n item.set_selectable(False)\n # pylint: disable=not-callable\n roi_editor = self.ROIDIALOGCLASS(dlg, obj, extract, singleobj)\n dlg.plot_layout.addWidget(roi_editor, 1, 0, 1, 1)\n if exec_dialog(dlg):\n return roi_editor.get_data()\n return None\n\n def get_object_with_dialog(\n self, title: str, parent: QW.QWidget | None = None\n ) -> SignalObj | ImageObj | None:\n \"\"\"Get object with dialog box.\n\n Args:\n title: Dialog title\n parent: Parent widget\n\n Returns:\n Object (signal or image, or None if dialog was canceled)\n \"\"\"\n parent = self if parent is None else parent\n dlg = objectview.GetObjectDialog(parent, self, title)\n if exec_dialog(dlg):\n obj_uuid = dlg.get_current_object_uuid()\n return self.objmodel[obj_uuid]\n return None\n\n def add_results_button(self) -> None:\n \"\"\"Add 'Show results' button\"\"\"\n btn = QW.QPushButton(get_icon(\"show_results.svg\"), _(\"Show results\"), self)\n btn.setToolTip(_(\"Show results obtained from previous computations\"))\n self.objprop.add_button(btn)\n btn.clicked.connect(self.show_results)\n self.acthandler.add_action(\n btn,\n select_condition=actionhandler.SelectCond.at_least_one,\n )\n\n def show_results(self) -> None:\n \"\"\"Show results\"\"\"\n\n @dataclasses.dataclass\n class ResultData:\n \"\"\"Result data associated to a shapetype\"\"\"\n\n results: list[ResultShape] = None\n xlabels: list[str] = None\n ylabels: list[str] = None\n\n rdatadict: dict[ShapeTypes, ResultData] = {}\n objs = self.objview.get_sel_objects(include_groups=True)\n for obj in objs:\n for result in obj.iterate_resultshapes():\n rdata = rdatadict.setdefault(result.shapetype, ResultData([], None, []))\n title = f\"{result.label}\"\n rdata.results.append(result)\n rdata.xlabels = result.xlabels\n for _i_row_res in range(result.array.shape[0]):\n ylabel = f\"{obj.short_id}: {result.label}\"\n rdata.ylabels.append(ylabel)\n if rdatadict:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n for rdata in rdatadict.values():\n dlg = ArrayEditor(self.parent())\n title = _(\"Results\")\n dlg.setup_and_check(\n np.vstack([result.array for result in rdata.results]),\n title,\n readonly=True,\n xlabels=rdata.xlabels,\n ylabels=rdata.ylabels,\n )\n dlg.setObjectName(f\"{objs[0].PREFIX}_results\")\n dlg.resize(750, 300)\n exec_dialog(dlg)\n else:\n msg = \"<br>\".join(\n [\n _(\"No result currently available for this object.\"),\n \"\",\n _(\n \"This feature shows result arrays as displayed after \"\n 'calling one of the computing feature (see \"Compute\" menu).'\n ),\n ]\n )\n QW.QMessageBox.information(self, APP_NAME, msg)\n\n def add_label_with_title(self, title: str | None = None) -> None:\n \"\"\"Add a label with object title on the associated plot\n\n Args:\n title (str | None): Label title. Defaults to None.\n If None, the title is the object title.\n \"\"\"\n objs = self.objview.get_sel_objects(include_groups=True)\n for obj in objs:\n obj.add_label_with_title(title=title)\n self.SIG_REFRESH_PLOT.emit(\"selected\", True)" }, { "identifier": "SignalPlotHandler", "path": "cdl/core/gui/plothandler.py", "snippet": "class SignalPlotHandler(BasePlotHandler):\n \"\"\"Object handling signal plot items, plot dialogs, plot options\"\"\"\n\n PLOT_TYPE = PlotType.CURVE\n\n def toggle_anti_aliasing(self, state: bool) -> None:\n \"\"\"Toggle anti-aliasing\n\n Args:\n state: if True, enable anti-aliasing\n \"\"\"\n self.plot.set_antialiasing(state)\n self.plot.replot()" }, { "identifier": "SignalProcessor", "path": "cdl/core/gui/processor/signal.py", "snippet": "class SignalProcessor(BaseProcessor):\n \"\"\"Object handling signal processing: operations, processing, computing\"\"\"\n\n # pylint: disable=duplicate-code\n\n @qt_try_except()\n def compute_sum(self) -> None:\n \"\"\"Compute sum\"\"\"\n self.compute_n1(\"Σ\", cps.compute_add, title=_(\"Sum\"))\n\n @qt_try_except()\n def compute_average(self) -> None:\n \"\"\"Compute average\"\"\"\n\n def func_objs(new_obj: SignalObj, old_objs: list[SignalObj]) -> None:\n \"\"\"Finalize average computation\"\"\"\n new_obj.data = new_obj.data / float(len(old_objs))\n if new_obj.dy is not None:\n new_obj.dy = new_obj.dy / float(len(old_objs))\n\n self.compute_n1(\"μ\", cps.compute_add, func_objs=func_objs, title=_(\"Average\"))\n\n @qt_try_except()\n def compute_product(self) -> None:\n \"\"\"Compute product\"\"\"\n self.compute_n1(\"Π\", cps.compute_product, title=_(\"Product\"))\n\n @qt_try_except()\n def compute_roi_extraction(\n self, param: cdl.param.ROIDataParam | None = None\n ) -> None:\n \"\"\"Extract Region Of Interest (ROI) from data\"\"\"\n param = self._get_roidataparam(param)\n if param is None or param.is_empty:\n return\n obj = self.panel.objview.get_sel_objects()[0]\n group = obj.roidata_to_params(param.roidata)\n if param.singleobj:\n self.compute_11(cps.extract_multiple_roi, group, title=_(\"Extract ROI\"))\n else:\n self.compute_1n(cps.extract_single_roi, group.datasets, \"ROI\", edit=False)\n\n @qt_try_except()\n def compute_swap_axes(self) -> None:\n \"\"\"Swap data axes\"\"\"\n self.compute_11(cps.compute_swap_axes, title=_(\"Swap axes\"))\n\n @qt_try_except()\n def compute_abs(self) -> None:\n \"\"\"Compute absolute value\"\"\"\n self.compute_11(cps.compute_abs, title=_(\"Absolute value\"))\n\n @qt_try_except()\n def compute_re(self) -> None:\n \"\"\"Compute real part\"\"\"\n self.compute_11(cps.compute_re, title=_(\"Real part\"))\n\n @qt_try_except()\n def compute_im(self) -> None:\n \"\"\"Compute imaginary part\"\"\"\n self.compute_11(cps.compute_im, title=_(\"Imaginary part\"))\n\n @qt_try_except()\n def compute_astype(self, param: cdl.param.DataTypeSParam | None = None) -> None:\n \"\"\"Convert data type\"\"\"\n self.compute_11(\n cps.compute_astype, param, cps.DataTypeSParam, title=_(\"Convert data type\")\n )\n\n @qt_try_except()\n def compute_log10(self) -> None:\n \"\"\"Compute Log10\"\"\"\n self.compute_11(cps.compute_log10, title=\"Log10\")\n\n @qt_try_except()\n def compute_difference(self, obj2: SignalObj | None = None) -> None:\n \"\"\"Compute difference between two signals\"\"\"\n self.compute_n1n(\n obj2,\n _(\"signal to subtract\"),\n cps.compute_difference,\n title=_(\"Difference\"),\n )\n\n @qt_try_except()\n def compute_quadratic_difference(self, obj2: SignalObj | None = None) -> None:\n \"\"\"Compute quadratic difference between two signals\"\"\"\n self.compute_n1n(\n obj2,\n _(\"signal to subtract\"),\n cps.compute_quadratic_difference,\n title=_(\"Quadratic difference\"),\n )\n\n @qt_try_except()\n def compute_division(self, obj2: SignalObj | None = None) -> None:\n \"\"\"Compute division between two signals\"\"\"\n self.compute_n1n(\n obj2,\n _(\"divider\"),\n cps.compute_division,\n title=_(\"Division\"),\n )\n\n @qt_try_except()\n def compute_peak_detection(\n self, param: cdl.param.PeakDetectionParam | None = None\n ) -> None:\n \"\"\"Detect peaks from data\"\"\"\n obj = self.panel.objview.get_sel_objects()[0]\n edit, param = self.init_param(\n param, cps.PeakDetectionParam, _(\"Peak detection\")\n )\n if edit:\n dlg = signalpeakdialog.SignalPeakDetectionDialog(self.panel)\n dlg.setup_data(obj.x, obj.y)\n if exec_dialog(dlg):\n param.threshold = int(dlg.get_threshold() * 100)\n param.min_dist = dlg.get_min_dist()\n else:\n return\n self.compute_11(cps.compute_peak_detection, param)\n\n # ------Signal Processing\n @qt_try_except()\n def compute_normalize(self, param: cdl.param.NormalizeYParam | None = None) -> None:\n \"\"\"Normalize data\"\"\"\n self.compute_11(\n cps.compute_normalize, param, cps.NormalizeYParam, title=_(\"Normalize\")\n )\n\n @qt_try_except()\n def compute_derivative(self) -> None:\n \"\"\"Compute derivative\"\"\"\n self.compute_11(cps.compute_derivative, title=_(\"Derivative\"))\n\n @qt_try_except()\n def compute_integral(self) -> None:\n \"\"\"Compute integral\"\"\"\n self.compute_11(cps.compute_integral, title=_(\"Integral\"))\n\n @qt_try_except()\n def compute_calibration(\n self, param: cdl.param.XYCalibrateParam | None = None\n ) -> None:\n \"\"\"Compute data linear calibration\"\"\"\n self.compute_11(\n cps.compute_calibration,\n param,\n cps.XYCalibrateParam,\n title=_(\"Linear calibration\"),\n comment=\"y = a.x + b\",\n )\n\n @qt_try_except()\n def compute_threshold(self, param: cpb.ThresholdParam | None = None) -> None:\n \"\"\"Compute threshold clipping\"\"\"\n self.compute_11(\n cps.compute_threshold, param, cpb.ThresholdParam, title=_(\"Thresholding\")\n )\n\n @qt_try_except()\n def compute_clip(self, param: cpb.ClipParam | None = None) -> None:\n \"\"\"Compute maximum data clipping\"\"\"\n self.compute_11(cps.compute_clip, param, cpb.ClipParam, title=_(\"Clipping\"))\n\n @qt_try_except()\n def compute_gaussian_filter(self, param: cpb.GaussianParam | None = None) -> None:\n \"\"\"Compute gaussian filter\"\"\"\n self.compute_11(\n cps.compute_gaussian_filter,\n param,\n cpb.GaussianParam,\n title=_(\"Gaussian filter\"),\n )\n\n @qt_try_except()\n def compute_moving_average(\n self, param: cpb.MovingAverageParam | None = None\n ) -> None:\n \"\"\"Compute moving average\"\"\"\n self.compute_11(\n cps.compute_moving_average,\n param,\n cpb.MovingAverageParam,\n title=_(\"Moving average\"),\n )\n\n @qt_try_except()\n def compute_moving_median(self, param: cpb.MovingMedianParam | None = None) -> None:\n \"\"\"Compute moving median\"\"\"\n self.compute_11(\n cps.compute_moving_median,\n param,\n cpb.MovingMedianParam,\n title=_(\"Moving median\"),\n )\n\n @qt_try_except()\n def compute_wiener(self) -> None:\n \"\"\"Compute Wiener filter\"\"\"\n self.compute_11(cps.compute_wiener, title=_(\"Wiener filter\"))\n\n @qt_try_except()\n def compute_fft(self, param: cdl.param.FFTParam | None = None) -> None:\n \"\"\"Compute iFFT\"\"\"\n if param is None:\n param = cpb.FFTParam.create(shift=Conf.proc.fft_shift_enabled.get())\n self.compute_11(cps.compute_fft, param, title=_(\"FFT\"), edit=False)\n\n @qt_try_except()\n def compute_ifft(self, param: cdl.param.FFTParam | None = None) -> None:\n \"\"\"Compute FFT\"\"\"\n if param is None:\n param = cpb.FFTParam.create(shift=Conf.proc.fft_shift_enabled.get())\n self.compute_11(cps.compute_ifft, param, title=_(\"iFFT\"), edit=False)\n\n @qt_try_except()\n def compute_interpolation(\n self,\n obj2: SignalObj | None = None,\n param: cdl.param.InterpolationParam | None = None,\n ):\n \"\"\"Compute interpolation\"\"\"\n self.compute_n1n(\n obj2,\n _(\"signal for X values\"),\n cps.compute_interpolation,\n param,\n cps.InterpolationParam,\n title=_(\"Interpolation\"),\n )\n\n @qt_try_except()\n def compute_resampling(self, param: cdl.param.ResamplingParam | None = None):\n \"\"\"Compute resampling\"\"\"\n edit, param = self.init_param(param, cps.ResamplingParam, _(\"Resampling\"))\n if edit:\n obj = self.panel.objview.get_sel_objects()[0]\n if param.xmin is None:\n param.xmin = obj.x[0]\n if param.xmax is None:\n param.xmax = obj.x[-1]\n if param.dx is None:\n param.dx = obj.x[1] - obj.x[0]\n if param.nbpts is None:\n param.nbpts = len(obj.x)\n self.compute_11(\n cps.compute_resampling,\n param,\n cps.ResamplingParam,\n title=_(\"Resampling\"),\n edit=edit,\n )\n\n @qt_try_except()\n def compute_detrending(self, param: cdl.param.DetrendingParam | None = None):\n \"\"\"Compute detrending\"\"\"\n self.compute_11(\n cps.compute_detrending,\n param,\n cps.DetrendingParam,\n title=_(\"Detrending\"),\n )\n\n @qt_try_except()\n def compute_convolution(self, obj2: SignalObj | None = None) -> None:\n \"\"\"Compute convolution\"\"\"\n self.compute_n1n(\n obj2,\n _(\"signal to convolve with\"),\n cps.compute_convolution,\n title=_(\"Convolution\"),\n )\n\n @qt_try_except()\n def compute_fit(self, name, fitdlgfunc):\n \"\"\"Compute fitting curve\"\"\"\n for obj in self.panel.objview.get_sel_objects():\n self.__row_compute_fit(obj, name, fitdlgfunc)\n\n @qt_try_except()\n def compute_polyfit(\n self, param: cdl.param.PolynomialFitParam | None = None\n ) -> None:\n \"\"\"Compute polynomial fitting curve\"\"\"\n txt = _(\"Polynomial fit\")\n edit, param = self.init_param(param, cps.PolynomialFitParam, txt)\n if not edit or param.edit(self.panel.parent()):\n dlgfunc = fitdialog.polynomialfit\n self.compute_fit(\n txt,\n lambda x, y, degree=param.degree, parent=self.panel.parent(): dlgfunc(\n x, y, degree, parent=parent\n ),\n )\n\n def __row_compute_fit(\n self, obj: SignalObj, name: str, fitdlgfunc: Callable\n ) -> None:\n \"\"\"Curve fitting computing sub-method\"\"\"\n output = fitdlgfunc(obj.x, obj.y, parent=self.panel.parent())\n if output is not None:\n y, params = output\n results = {}\n for param in params:\n if re.match(r\"[\\S\\_]*\\d{2}$\", param.name):\n shname = param.name[:-2]\n value = results.get(shname, np.array([]))\n results[shname] = np.array(list(value) + [param.value])\n else:\n results[param.name] = param.value\n # Creating new signal\n signal = create_signal(f\"{name}({obj.title})\", obj.x, y, metadata=results)\n # Creating new plot item\n self.panel.add_object(signal)\n\n @qt_try_except()\n def compute_multigaussianfit(self) -> None:\n \"\"\"Compute multi-Gaussian fitting curve\"\"\"\n fitdlgfunc = fitdialog.multigaussianfit\n for obj in self.panel.objview.get_sel_objects():\n dlg = signalpeakdialog.SignalPeakDetectionDialog(self.panel)\n dlg.setup_data(obj.x, obj.y)\n if exec_dialog(dlg):\n # Computing x, y\n peaks = dlg.get_peak_indexes()\n self.__row_compute_fit(\n obj,\n _(\"Multi-Gaussian fit\"),\n lambda x, y, peaks=peaks, parent=self.panel.parent(): fitdlgfunc(\n x, y, peaks, parent=parent\n ),\n )\n\n # ------Signal Computing\n @qt_try_except()\n def compute_fwhm(self, param: cdl.param.FWHMParam | None = None) -> None:\n \"\"\"Compute FWHM\"\"\"\n self.compute_10(\n cps.compute_fwhm, ShapeTypes.SEGMENT, param, cps.FWHMParam, title=_(\"FWHM\")\n )\n\n @qt_try_except()\n def compute_fw1e2(self) -> None:\n \"\"\"Compute FW at 1/e²\"\"\"\n self.compute_10(cps.compute_fw1e2, ShapeTypes.SEGMENT, title=_(\"FW\") + \"1/e²\")\n\n def _get_stat_funcs(self) -> list[tuple[str, Callable[[np.ndarray], float]]]:\n \"\"\"Return statistics functions list\"\"\"\n return [\n (\"min(y)\", lambda xy: xy[1].min()),\n (\"max(y)\", lambda xy: xy[1].max()),\n (\"<y>\", lambda xy: xy[1].mean()),\n (\"Median(y)\", lambda xy: np.median(xy[1])),\n (\"σ(y)\", lambda xy: xy[1].std()),\n (\"Σ(y)\", lambda xy: xy[1].sum()),\n (\"∫ydx\", lambda xy: np.trapz(xy[1], xy[0])),\n ]" }, { "identifier": "SignalIORegistry", "path": "cdl/core/io/signal/base.py", "snippet": "class SignalIORegistry(BaseIORegistry):\n \"\"\"Metaclass for registering signal I/O handler classes\"\"\"\n\n _io_format_instances: list[SignalFormatBase] = []" }, { "identifier": "SignalObj", "path": "cdl/core/model/signal.py", "snippet": "class SignalObj(gds.DataSet, base.BaseObj):\n \"\"\"Signal object\"\"\"\n\n PREFIX = \"s\"\n CONF_FMT = Conf.view.sig_format\n DEFAULT_FMT = \"g\"\n VALID_DTYPES = (np.float32, np.float64, np.complex128)\n\n uuid = gds.StringItem(\"UUID\").set_prop(\"display\", hide=True)\n\n _tabs = gds.BeginTabGroup(\"all\")\n\n _datag = gds.BeginGroup(_(\"Data and metadata\"))\n title = gds.StringItem(_(\"Signal title\"), default=_(\"Untitled\"))\n xydata = gds.FloatArrayItem(_(\"Data\"), transpose=True, minmax=\"rows\")\n metadata = gds.DictItem(_(\"Metadata\"), default={})\n _e_datag = gds.EndGroup(_(\"Data and metadata\"))\n\n _unitsg = gds.BeginGroup(_(\"Titles and units\"))\n title = gds.StringItem(_(\"Signal title\"), default=_(\"Untitled\"))\n _tabs_u = gds.BeginTabGroup(\"units\")\n _unitsx = gds.BeginGroup(_(\"X-axis\"))\n xlabel = gds.StringItem(_(\"Title\"), default=\"\")\n xunit = gds.StringItem(_(\"Unit\"), default=\"\")\n _e_unitsx = gds.EndGroup(_(\"X-axis\"))\n _unitsy = gds.BeginGroup(_(\"Y-axis\"))\n ylabel = gds.StringItem(_(\"Title\"), default=\"\")\n yunit = gds.StringItem(_(\"Unit\"), default=\"\")\n _e_unitsy = gds.EndGroup(_(\"Y-axis\"))\n _e_tabs_u = gds.EndTabGroup(\"units\")\n _e_unitsg = gds.EndGroup(_(\"Titles and units\"))\n\n _e_tabs = gds.EndTabGroup(\"all\")\n\n def __init__(self, title=None, comment=None, icon=\"\"):\n \"\"\"Constructor\n\n Args:\n title (str): title\n comment (str): comment\n icon (str): icon\n \"\"\"\n gds.DataSet.__init__(self, title, comment, icon)\n base.BaseObj.__init__(self)\n self.regenerate_uuid()\n\n def regenerate_uuid(self):\n \"\"\"Regenerate UUID\n\n This method is used to regenerate UUID after loading the object from a file.\n This is required to avoid UUID conflicts when loading objects from file\n without clearing the workspace first.\n \"\"\"\n self.uuid = str(uuid4())\n\n def copy(\n self, title: str | None = None, dtype: np.dtype | None = None\n ) -> SignalObj:\n \"\"\"Copy object.\n\n Args:\n title (str): title\n dtype (numpy.dtype): data type\n\n Returns:\n SignalObj: copied object\n \"\"\"\n title = self.title if title is None else title\n obj = SignalObj(title=title)\n obj.title = title\n if dtype not in (None, float, complex, np.complex128):\n raise RuntimeError(\"Signal data only supports float64/complex128 dtype\")\n obj.metadata = deepcopy(self.metadata)\n obj.xydata = np.array(self.xydata, copy=True, dtype=dtype)\n return obj\n\n def set_data_type(self, dtype: np.dtype) -> None: # pylint: disable=unused-argument\n \"\"\"Change data type.\n\n Args:\n dtype (numpy.dtype): data type\n \"\"\"\n raise RuntimeError(\"Setting data type is not support for signals\")\n\n def set_xydata(\n self,\n x: np.ndarray | list,\n y: np.ndarray | list,\n dx: np.ndarray | list | None = None,\n dy: np.ndarray | list | None = None,\n ) -> None:\n \"\"\"Set xy data\n\n Args:\n x (numpy.ndarray): x data\n y (numpy.ndarray): y data\n dx (numpy.ndarray): dx data (optional: error bars)\n dy (numpy.ndarray): dy data (optional: error bars)\n \"\"\"\n if x is not None:\n x = np.array(x)\n if y is not None:\n y = np.array(y)\n if dx is not None:\n dx = np.array(dx)\n if dy is not None:\n dy = np.array(dy)\n if dx is None and dy is None:\n self.xydata = np.vstack([x, y])\n else:\n if dx is None:\n dx = np.zeros_like(dy)\n if dy is None:\n dy = np.zeros_like(dx)\n self.xydata = np.vstack((x, y, dx, dy))\n\n def __get_x(self) -> np.ndarray | None:\n \"\"\"Get x data\"\"\"\n if self.xydata is not None:\n return self.xydata[0]\n return None\n\n def __set_x(self, data) -> None:\n \"\"\"Set x data\"\"\"\n self.xydata[0] = np.array(data)\n\n def __get_y(self) -> np.ndarray | None:\n \"\"\"Get y data\"\"\"\n if self.xydata is not None:\n return self.xydata[1]\n return None\n\n def __set_y(self, data) -> None:\n \"\"\"Set y data\"\"\"\n self.xydata[1] = np.array(data)\n\n def __get_dx(self) -> np.ndarray | None:\n \"\"\"Get dx data\"\"\"\n if self.xydata is not None and len(self.xydata) > 2:\n return self.xydata[2]\n return None\n\n def __set_dx(self, data) -> None:\n \"\"\"Set dx data\"\"\"\n if self.xydata is not None and len(self.xydata) > 2:\n self.xydata[2] = np.array(data)\n else:\n raise ValueError(\"dx data not available\")\n\n def __get_dy(self) -> np.ndarray | None:\n \"\"\"Get dy data\"\"\"\n if self.xydata is not None and len(self.xydata) > 3:\n return self.xydata[3]\n return None\n\n def __set_dy(self, data) -> None:\n \"\"\"Set dy data\"\"\"\n if self.xydata is not None and len(self.xydata) > 3:\n self.xydata[3] = np.array(data)\n else:\n raise ValueError(\"dy data not available\")\n\n x = property(__get_x, __set_x)\n y = data = property(__get_y, __set_y)\n dx = property(__get_dx, __set_dx)\n dy = property(__get_dy, __set_dy)\n\n def get_data(self, roi_index: int | None = None) -> np.ndarray:\n \"\"\"\n Return original data (if ROI is not defined or `roi_index` is None),\n or ROI data (if both ROI and `roi_index` are defined).\n\n Args:\n roi_index (int): ROI index\n\n Returns:\n numpy.ndarray: data\n \"\"\"\n if self.roi is None or roi_index is None:\n return self.x, self.y\n i1, i2 = self.roi[roi_index, :]\n return self.x[i1:i2], self.y[i1:i2]\n\n def update_plot_item_parameters(self, item: CurveItem) -> None:\n \"\"\"Update plot item parameters from object data/metadata\n\n Takes into account a subset of plot item parameters. Those parameters may\n have been overriden by object metadata entries or other object data. The goal\n is to update the plot item accordingly.\n\n This is *almost* the inverse operation of `update_metadata_from_plot_item`.\n\n Args:\n item: plot item\n \"\"\"\n update_dataset(item.param.line, self.metadata)\n update_dataset(item.param.symbol, self.metadata)\n super().update_plot_item_parameters(item)\n\n def update_metadata_from_plot_item(self, item: CurveItem) -> None:\n \"\"\"Update metadata from plot item.\n\n Takes into account a subset of plot item parameters. Those parameters may\n have been modified by the user through the plot item GUI. The goal is to\n update the metadata accordingly.\n\n This is *almost* the inverse operation of `update_plot_item_parameters`.\n\n Args:\n item: plot item\n \"\"\"\n super().update_metadata_from_plot_item(item)\n restore_dataset(item.param.line, self.metadata)\n restore_dataset(item.param.symbol, self.metadata)\n\n def make_item(self, update_from: CurveItem = None) -> CurveItem:\n \"\"\"Make plot item from data.\n\n Args:\n update_from (CurveItem): plot item to update from\n\n Returns:\n CurveItem: plot item\n \"\"\"\n if len(self.xydata) in (2, 3, 4):\n if len(self.xydata) == 2: # x, y signal\n x, y = self.xydata\n item = make.mcurve(x.real, y.real, label=self.title)\n elif len(self.xydata) == 3: # x, y, dy error bar signal\n x, y, dy = self.xydata\n item = make.merror(x.real, y.real, dy.real, label=self.title)\n elif len(self.xydata) == 4: # x, y, dx, dy error bar signal\n x, y, dx, dy = self.xydata\n item = make.merror(x.real, y.real, dx.real, dy.real, label=self.title)\n CurveStyles.apply_style(item.param)\n else:\n raise RuntimeError(\"data not supported\")\n if update_from is None:\n if execenv.demo_mode:\n item.param.line.width = 3\n self.update_plot_item_parameters(item)\n else:\n update_dataset(item.param, update_from.param)\n item.update_params()\n return item\n\n def update_item(self, item: CurveItem, data_changed: bool = True) -> None:\n \"\"\"Update plot item from data.\n\n Args:\n item (CurveItem): plot item\n data_changed (bool): if True, data has changed\n \"\"\"\n if data_changed:\n if len(self.xydata) == 2: # x, y signal\n x, y = self.xydata\n item.set_data(x.real, y.real)\n elif len(self.xydata) == 3: # x, y, dy error bar signal\n x, y, dy = self.xydata\n item.set_data(x.real, y.real, dy=dy.real)\n elif len(self.xydata) == 4: # x, y, dx, dy error bar signal\n x, y, dx, dy = self.xydata\n item.set_data(x.real, y.real, dx.real, dy.real)\n item.param.label = self.title\n self.update_plot_item_parameters(item)\n\n def roi_coords_to_indexes(self, coords: list) -> np.ndarray:\n \"\"\"Convert ROI coordinates to indexes.\n\n Args:\n coords (list): coordinates\n\n Returns:\n numpy.ndarray: indexes\n \"\"\"\n indexes = np.array(coords, int)\n for row in range(indexes.shape[0]):\n for col in range(indexes.shape[1]):\n x0 = coords[row][col]\n indexes[row, col] = np.abs(self.x - x0).argmin()\n return indexes\n\n def get_roi_param(self, title: str, *defaults) -> gds.DataSet:\n \"\"\"Return ROI parameters dataset.\n\n Args:\n title (str): title\n *defaults: default values\n \"\"\"\n imax = len(self.x) - 1\n i0, i1 = defaults\n param = ROIParam(title)\n param.col1 = i0\n param.col2 = i1\n param.set_global_prop(\"data\", min=-1, max=imax)\n return param\n\n @staticmethod\n def params_to_roidata(params: gds.DataSetGroup) -> np.ndarray:\n \"\"\"Convert ROI dataset group to ROI array data.\n\n Args:\n params (DataSetGroup): ROI dataset group\n\n Returns:\n numpy.ndarray: ROI array data\n \"\"\"\n roilist = []\n for roiparam in params.datasets:\n roiparam: ROIParam\n roilist.append([roiparam.col1, roiparam.col2])\n if len(roilist) == 0:\n return None\n return np.array(roilist, int)\n\n def new_roi_item(self, fmt: str, lbl: bool, editable: bool):\n \"\"\"Return a new ROI item from scratch\n\n Args:\n fmt (str): format string\n lbl (bool): if True, add label\n editable (bool): if True, ROI is editable\n \"\"\"\n coords = self.x.min(), self.x.max()\n return base.make_roi_item(\n lambda x, y, _title: make.range(x, y),\n coords,\n \"ROI\",\n fmt,\n lbl,\n editable,\n option=\"shape/drag\",\n )\n\n def iterate_roi_items(self, fmt: str, lbl: bool, editable: bool = True):\n \"\"\"Make plot item representing a Region of Interest.\n\n Args:\n fmt (str): format string\n lbl (bool): if True, add label\n editable (bool): if True, ROI is editable\n\n Yields:\n PlotItem: plot item\n \"\"\"\n if self.roi is not None:\n for index, coords in enumerate(self.x[self.roi]):\n yield base.make_roi_item(\n lambda x, y, _title: make.range(x, y),\n coords,\n f\"ROI{index:02d}\",\n fmt,\n lbl,\n editable,\n option=\"shape/drag\",\n )\n\n def add_label_with_title(self, title: str | None = None) -> None:\n \"\"\"Add label with title annotation\n\n Args:\n title (str): title (if None, use signal title)\n \"\"\"\n title = self.title if title is None else title\n if title:\n label = make.label(title, \"TL\", (0, 0), \"TL\")\n self.add_annotations_from_items([label])" }, { "identifier": "create_signal_from_param", "path": "cdl/core/model/signal.py", "snippet": "def create_signal_from_param(\n newparam: NewSignalParam,\n addparam: gds.DataSet | None = None,\n edit: bool = False,\n parent: QW.QWidget | None = None,\n) -> SignalObj | None:\n \"\"\"Create a new Signal object from a dialog box.\n\n Args:\n newparam (NewSignalParam): new signal parameters\n addparam (guidata.dataset.DataSet): additional parameters\n edit (bool): Open a dialog box to edit parameters (default: False)\n parent (QWidget): parent widget\n\n Returns:\n SignalObj: signal object or None if canceled\n \"\"\"\n global SIG_NB # pylint: disable=global-statement\n if newparam is None:\n newparam = new_signal_param()\n incr_sig_nb = not newparam.title\n if incr_sig_nb:\n newparam.title = f\"{newparam.title} {SIG_NB + 1:d}\"\n if not edit or addparam is not None or newparam.edit(parent=parent):\n prefix = newparam.stype.name.lower()\n if incr_sig_nb:\n SIG_NB += 1\n signal = create_signal(newparam.title)\n xarr = np.linspace(newparam.xmin, newparam.xmax, newparam.size)\n p = addparam\n if newparam.stype == SignalTypes.ZEROS:\n signal.set_xydata(xarr, np.zeros(newparam.size))\n elif newparam.stype in (SignalTypes.UNIFORMRANDOM, SignalTypes.NORMALRANDOM):\n pclass = {\n SignalTypes.UNIFORMRANDOM: base.UniformRandomParam,\n SignalTypes.NORMALRANDOM: base.NormalRandomParam,\n }[newparam.stype]\n if p is None:\n p = pclass(_(\"Signal\") + \" - \" + prefix)\n if edit and not p.edit(parent=parent):\n return None\n rng = np.random.default_rng(p.seed)\n if newparam.stype == SignalTypes.UNIFORMRANDOM:\n yarr = rng.random((newparam.size,)) * (p.vmax - p.vmin) + p.vmin\n if signal.title == DEFAULT_TITLE:\n signal.title = f\"{prefix}(vmin={p.vmin:.3g},vmax={p.vmax:.3g})\"\n elif newparam.stype == SignalTypes.NORMALRANDOM:\n yarr = rng.normal(p.mu, p.sigma, size=(newparam.size,))\n if signal.title == DEFAULT_TITLE:\n signal.title = f\"{prefix}(mu={p.mu:.3g},sigma={p.sigma:.3g})\"\n else:\n raise NotImplementedError(f\"New param type: {prefix}\")\n signal.set_xydata(xarr, yarr)\n elif newparam.stype in (\n SignalTypes.GAUSS,\n SignalTypes.LORENTZ,\n SignalTypes.VOIGT,\n ):\n func, title = {\n SignalTypes.GAUSS: (fit.GaussianModel.func, _(\"Gaussian\")),\n SignalTypes.LORENTZ: (fit.LorentzianModel.func, _(\"Lorentzian\")),\n SignalTypes.VOIGT: (fit.VoigtModel.func, \"Voigt\"),\n }[newparam.stype]\n if p is None:\n p = GaussLorentzVoigtParam(title)\n if edit and not p.edit(parent=parent):\n return None\n yarr = func(xarr, p.a, p.sigma, p.mu, p.ymin)\n signal.set_xydata(xarr, yarr)\n if signal.title == DEFAULT_TITLE:\n signal.title = (\n f\"{prefix}(a={p.a:.3g},sigma={p.sigma:.3g},\"\n f\"mu={p.mu:.3g},ymin={p.ymin:.3g})\"\n )\n elif newparam.stype in (\n SignalTypes.SINUS,\n SignalTypes.COSINUS,\n SignalTypes.SAWTOOTH,\n SignalTypes.TRIANGLE,\n SignalTypes.SQUARE,\n SignalTypes.SINC,\n ):\n func, title = {\n SignalTypes.SINUS: (np.sin, _(\"Sinusoid\")),\n SignalTypes.COSINUS: (np.cos, _(\"Sinusoid\")),\n SignalTypes.SAWTOOTH: (sps.sawtooth, _(\"Sawtooth function\")),\n SignalTypes.TRIANGLE: (triangle_func, _(\"Triangle function\")),\n SignalTypes.SQUARE: (sps.square, _(\"Square function\")),\n SignalTypes.SINC: (np.sinc, _(\"Cardinal sine\")),\n }[newparam.stype]\n if p is None:\n p = PeriodicParam(title)\n if edit and not p.edit(parent=parent):\n return None\n freq = p.get_frequency_in_hz()\n yarr = p.a * func(2 * np.pi * freq * xarr + np.deg2rad(p.phase)) + p.ymin\n signal.set_xydata(xarr, yarr)\n if signal.title == DEFAULT_TITLE:\n signal.title = (\n f\"{prefix}(f={p.freq:.3g} {p.freq_unit.value}),\"\n f\"a={p.a:.3g},ymin={p.ymin:.3g},phase={p.phase:.3g}°)\"\n )\n elif newparam.stype == SignalTypes.STEP:\n if p is None:\n p = StepParam(_(\"Step function\"))\n if edit and not p.edit(parent=parent):\n return None\n yarr = np.ones_like(xarr) * p.a1\n yarr[xarr > p.x0] = p.a2\n signal.set_xydata(xarr, yarr)\n if signal.title == DEFAULT_TITLE:\n signal.title = f\"{prefix}(x0={p.x0:.3g},a1={p.a1:.3g},a2={p.a2:.3g})\"\n return signal\n return None" }, { "identifier": "new_signal_param", "path": "cdl/core/model/signal.py", "snippet": "def new_signal_param(\n title: str | None = None,\n stype: str | None = None,\n xmin: float | None = None,\n xmax: float | None = None,\n size: int | None = None,\n) -> NewSignalParam:\n \"\"\"Create a new Signal dataset instance.\n\n Args:\n title (str): dataset title (default: None, uses default title)\n stype (str): signal type (default: None, uses default type)\n xmin (float): X min (default: None, uses default value)\n xmax (float): X max (default: None, uses default value)\n size (int): signal size (default: None, uses default value)\n\n Returns:\n NewSignalParam: new signal dataset instance\n \"\"\"\n title = DEFAULT_TITLE if title is None else title\n param = NewSignalParam(title=title, icon=get_icon(\"new_signal.svg\"))\n param.title = title\n if xmin is not None:\n param.xmin = xmin\n if xmax is not None:\n param.xmax = xmax\n if size is not None:\n param.size = size\n if stype is not None:\n param.stype = stype\n return param" } ]
from typing import TYPE_CHECKING from plotpy.tools import ( HCursorTool, HRangeTool, LabelTool, RectangleTool, SegmentTool, VCursorTool, XCursorTool, ) from cdl.config import _ from cdl.core.gui import roieditor from cdl.core.gui.actionhandler import SignalActionHandler from cdl.core.gui.panel.base import BaseDataPanel from cdl.core.gui.plothandler import SignalPlotHandler from cdl.core.gui.processor.signal import SignalProcessor from cdl.core.io.signal import SignalIORegistry from cdl.core.model.signal import SignalObj, create_signal_from_param, new_signal_param from plotpy.plot import PlotWidget from qtpy import QtWidgets as QW from cdl.core.model.signal import NewSignalParam import guidata.dataset as gds
19,399
# -*- coding: utf-8 -*- # # Licensed under the terms of the BSD 3-Clause # (see cdl/LICENSE for details) """DataLab Signal Panel""" # pylint: disable=invalid-name # Allows short reference names like x, y, ... from __future__ import annotations if TYPE_CHECKING: # pragma: no cover class SignalPanel(BaseDataPanel): """Object handling the item list, the selected item properties and plot, specialized for Signal objects""" PANEL_STR = _("Signal panel") PARAMCLASS = SignalObj ANNOTATION_TOOLS = ( LabelTool, VCursorTool, HCursorTool, XCursorTool, SegmentTool, RectangleTool, HRangeTool, )
# -*- coding: utf-8 -*- # # Licensed under the terms of the BSD 3-Clause # (see cdl/LICENSE for details) """DataLab Signal Panel""" # pylint: disable=invalid-name # Allows short reference names like x, y, ... from __future__ import annotations if TYPE_CHECKING: # pragma: no cover class SignalPanel(BaseDataPanel): """Object handling the item list, the selected item properties and plot, specialized for Signal objects""" PANEL_STR = _("Signal panel") PARAMCLASS = SignalObj ANNOTATION_TOOLS = ( LabelTool, VCursorTool, HCursorTool, XCursorTool, SegmentTool, RectangleTool, HRangeTool, )
IO_REGISTRY = SignalIORegistry
6
2023-11-09 16:56:03+00:00
24k
ingra14m/Tensor4D-DNeRF
exp_runner.py
[ { "identifier": "Dataset", "path": "models/dataset.py", "snippet": "class Dataset:\n def __init__(self, conf):\n super(Dataset, self).__init__()\n print('Load data: Begin')\n self.device = torch.device('cuda')\n self.conf = conf\n\n self.data_dir = conf.get_string('data_dir')\n self.render_cameras_name = conf.get_string('render_cameras_name')\n self.object_cameras_name = conf.get_string('object_cameras_name')\n\n self.camera_outside_sphere = conf.get_bool('camera_outside_sphere', default=True)\n self.scale_mat_scale = conf.get_float('scale_mat_scale', default=1.1)\n self.near = conf.get_float('near', default=-1)\n self.far = conf.get_float('far', default=-1)\n self.n_frames = conf.get_int('n_frames', default=128)\n\n camera_dict = np.load(os.path.join(self.data_dir, self.render_cameras_name))\n self.camera_dict = camera_dict\n self.images_lis = sorted(glob(os.path.join(self.data_dir, 'image/*.png')))\n self.n_images = len(self.images_lis)\n self.images_np = np.stack([cv.imread(im_name) for im_name in self.images_lis]) / 256.0\n self.masks_lis = sorted(glob(os.path.join(self.data_dir, 'mask/*.png')))\n self.masks_np = np.stack([cv.imread(im_name) for im_name in self.masks_lis]) / 256.0\n\n # world_mat is a projection matrix from world to image\n self.world_mats_np = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]\n self.fid_list = [torch.LongTensor(np.array([camera_dict['fid_%d' % idx]])) for idx in range(self.n_images)]\n self.scale_mats_np = []\n\n # scale_mat: used for coordinate normalization, we assume the scene to render is inside a unit sphere at origin.\n self.scale_mats_np = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]\n\n self.intrinsics_all = []\n self.pose_all = []\n self.proj_all = []\n\n for scale_mat, world_mat in zip(self.scale_mats_np, self.world_mats_np):\n P = world_mat @ scale_mat\n P = P[:3, :4]\n intrinsics, pose = load_K_Rt_from_P(None, P)\n self.intrinsics_all.append(torch.from_numpy(intrinsics).float())\n self.pose_all.append(torch.from_numpy(pose).float())\n self.proj_all.append(torch.from_numpy(P).float())\n\n self.images = torch.from_numpy(self.images_np.astype(np.float32)).cpu() # [n_images, H, W, 3]\n self.masks = torch.from_numpy(self.masks_np.astype(np.float32)).cpu() # [n_images, H, W, 3]\n self.errors = self.masks[:, :, :, :1].clone()\n self.errors = F.interpolate(self.errors.permute(0, 3, 1, 2), (self.images.shape[1] // 8, self.images.shape[2] // 8), mode='bilinear')\n self.errors = F.max_pool2d(self.errors, 7, stride=1, padding=3)\n self.errors = self.errors.permute(0, 2, 3, 1)\n self.radius = torch.zeros(self.masks.shape[0], self.masks.shape[2], self.masks.shape[1], 1) # [n_images, W, H, 3]\n \n self.intrinsics_all = torch.stack(self.intrinsics_all).to(self.device) # [n_images, 4, 4]\n self.intrinsics_all_inv = torch.inverse(self.intrinsics_all) # [n_images, 4, 4]\n self.focal = self.intrinsics_all[0][0, 0]\n self.pose_all = torch.stack(self.pose_all).to(self.device) # [n_images, 4, 4]\n self.proj_all = torch.stack(self.proj_all).to(self.device)\n self.H, self.W = self.images.shape[1], self.images.shape[2]\n self.image_pixels = self.H * self.W\n self.fid_all = torch.stack(self.fid_list).to(self.device)\n self.time_emb_list = (self.fid_all / self.n_frames * 2) - 0.95\n\n object_bbox_min = np.array([-1.01, -1.01, -1.01, 1.0])\n object_bbox_max = np.array([ 1.01, 1.01, 1.01, 1.0])\n # Object scale mat: region of interest to **extract mesh**\n object_scale_mat = np.load(os.path.join(self.data_dir, self.object_cameras_name))['scale_mat_0']\n object_bbox_min = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_min[:, None]\n object_bbox_max = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_max[:, None]\n self.object_bbox_min = object_bbox_min[:3, 0]\n self.object_bbox_max = object_bbox_max[:3, 0]\n self.process_radius()\n\n print('Load data: End')\n\n def process_radius(self):\n for img_idx in tqdm(range(self.images.shape[0])):\n tx = torch.linspace(0, self.W - 1, self.W, device=self.device)\n ty = torch.linspace(0, self.H - 1, self.H, device=self.device)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n rays_v = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n dx = torch.sqrt(torch.sum((rays_v[:-1, :, :] - rays_v[1:, :, :]) ** 2, dim=-1))\n dx = torch.cat([dx, dx[-2:-1, :]], dim=0)\n # Cut the distance in half, and then round it out so that it's\n # halfway between inscribed by / circumscribed about the pixel.\n radii = dx[..., None] * 2 / np.sqrt(12)\n self.radius[img_idx] = radii.detach().cpu() # W H 3\n\n def gen_rays_at(self, img_idx, resolution_level=1):\n \"\"\"\n Generate rays at world space from one camera.\n \"\"\"\n l = resolution_level\n tx = torch.linspace(0, self.W - 1, self.W // l, device=self.device)\n ty = torch.linspace(0, self.H - 1, self.H // l, device=self.device)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n rays_v = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n dx = torch.sqrt(torch.sum((rays_v[:-1, :, :] - rays_v[1:, :, :]) ** 2, dim=-1))\n dx = torch.cat([dx, dx[-2:-1, :]], dim=0)\n rays_r = dx[..., None] * 2 / np.sqrt(12)\n rays_o = self.pose_all[img_idx, None, None, :3, 3].expand(rays_v.shape) # W, H, 3\n rays_v = rays_v / torch.linalg.norm(rays_v, ord=2, dim=-1, keepdim=True) # W, H, 3\n return rays_o.transpose(0, 1), rays_v.transpose(0, 1), rays_r.transpose(0, 1)\n\n def gen_random_rays_at(self, img_idx, batch_size):\n \"\"\"\n Generate random rays at world space from one camera.\n \"\"\"\n error = self.errors[img_idx].reshape(-1).numpy()\n max_error = np.max(error) + 1e-8\n error = error / max_error\n error[error < 0.1] = 0.1\n error = error / np.sum(error)\n index = np.arange(0, self.W*self.H // 64)\n select_index = np.random.choice(index, size=[batch_size], p=error)\n pixels_y = torch.LongTensor(select_index // (self.W // 8)) * 8\n pixels_y += torch.randint_like(pixels_y, 8)\n pixels_x = torch.LongTensor(select_index % (self.W // 8)) * 8\n pixels_x += torch.randint_like(pixels_x, 8)\n\n color = self.images[img_idx][(pixels_y, pixels_x)] # batch_size, 3\n mask = self.masks[img_idx][(pixels_y, pixels_x)] # batch_size, 3\n rays_r = self.radius[img_idx][(pixels_x, pixels_y)] # batch_size, 1\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1).float().to(self.device) # batch_size, 3\n p = torch.matmul(self.intrinsics_all_inv[img_idx, None, :3, :3], p[:, :, None]).squeeze() # batch_size, 3\n rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # batch_size, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, :3, :3], rays_v[:, :, None]).squeeze() # batch_size, 3\n rays_o = self.pose_all[img_idx, None, :3, 3].expand(rays_v.shape) # batch_size, 3\n return torch.cat([rays_o.cpu(), rays_v.cpu(), color.cpu(), mask[:, :1].cpu(), rays_r.cpu()], dim=-1).cuda(), pixels_y.cpu(), pixels_x.cpu() # batch_size, 10\n\n def gen_rays_between(self, idx_0, idx_1, ratio, resolution_level=1):\n \"\"\"\n Interpolate pose between two cameras.\n \"\"\"\n l = resolution_level\n tx = torch.linspace(0, self.W - 1, self.W // l, device=self.device)\n ty = torch.linspace(0, self.H - 1, self.H // l, device=self.device)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n rays_v = torch.matmul(self.intrinsics_all_inv[0, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n trans = self.pose_all[idx_0, :3, 3] * (1.0 - ratio) + self.pose_all[idx_1, :3, 3] * ratio\n pose_0 = self.pose_all[idx_0].detach().cpu().numpy()\n pose_1 = self.pose_all[idx_1].detach().cpu().numpy()\n pose_0 = np.linalg.inv(pose_0)\n pose_1 = np.linalg.inv(pose_1)\n rot_0 = pose_0[:3, :3]\n rot_1 = pose_1[:3, :3]\n rots = Rot.from_matrix(np.stack([rot_0, rot_1]))\n key_times = [0, 1]\n slerp = Slerp(key_times, rots)\n rot = slerp(ratio)\n pose = np.diag([1.0, 1.0, 1.0, 1.0])\n pose = pose.astype(np.float32)\n pose[:3, :3] = rot.as_matrix()\n pose[:3, 3] = ((1.0 - ratio) * pose_0 + ratio * pose_1)[:3, 3]\n pose = np.linalg.inv(pose)\n rot = torch.from_numpy(pose[:3, :3]).cuda()\n trans = torch.from_numpy(pose[:3, 3]).cuda()\n rays_v = torch.matmul(rot[None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n dx = torch.sqrt(torch.sum((rays_v[:-1, :, :] - rays_v[1:, :, :]) ** 2, dim=-1))\n dx = torch.cat([dx, dx[-2:-1, :]], dim=0)\n rays_r = dx[..., None] * 2 / np.sqrt(12)\n rays_v = rays_v / torch.linalg.norm(rays_v, ord=2, dim=-1, keepdim=True) # W, H, 3\n rays_o = trans[None, None, :3].expand(rays_v.shape) # W, H, 3\n return rays_o.transpose(0, 1), rays_v.transpose(0, 1), rays_r.transpose(0, 1)\n\n def near_far_from_sphere(self, rays_o, rays_d):\n if self.near > 0:\n return self.near, self.far\n a = torch.sum(rays_d**2, dim=-1, keepdim=True)\n b = 2.0 * torch.sum(rays_o * rays_d, dim=-1, keepdim=True)\n mid = 0.5 * (-b) / a\n near = mid - 1.0\n far = mid + 1.0\n return near, far\n\n def image_at(self, idx, resolution_level):\n img = cv.imread(self.images_lis[idx])\n return (cv.resize(img, (self.W // resolution_level, self.H // resolution_level))).clip(0, 255)" }, { "identifier": "BlenderDataset", "path": "models/dataset.py", "snippet": "class BlenderDataset:\n def __init__(self, conf):\n super(BlenderDataset, self).__init__()\n print('Load data: Begin')\n self.device = torch.device('cuda')\n self.conf = conf\n\n self.near = conf.get_float('near', default=-1)\n self.far = conf.get_float('far', default=-1)\n self.n_frames = conf.get_int('n_frames', default=128)\n\n self.data_dir = conf.get_string('data_dir')\n splits = ['train']\n metas = {}\n for s in splits:\n with open(os.path.join(self.data_dir, 'transforms_{}.json'.format(s)), 'r') as fp:\n metas[s] = json.load(fp)\n self.images_lis = sorted(glob(os.path.join(self.data_dir, 'train/*.png')), key=lambda x: int(x.split('.')[0].split('_')[-1]))\n # if self.data_dir.split('/')[-2] == 'lego':\n # # self.images_lis = self.images_lis[1:]\n # self.images_lis.append('/data00/yzy/Git_Project/data/dynamic/D-NeRF/lego/val/r_0.png')\n all_imgs = []\n all_poses = []\n all_masks = []\n all_times = []\n counts = [0]\n for s in splits:\n meta = metas[s]\n\n imgs = []\n poses = []\n times = []\n\n for t, frame in enumerate(meta['frames']):\n fname = os.path.join(self.data_dir, frame['file_path'] + '.png')\n image = cv.imread(fname, cv.IMREAD_UNCHANGED)\n imgs.append(image)\n pose = np.array(frame['transform_matrix'])\n time = np.array([frame['time']])\n\n a = pose[:, 0:1]\n b = pose[:, 1:2]\n c = pose[:, 2:3]\n d = pose[:, 3:].copy()\n d[:3, :] *= 0.8\n\n pose = np.concatenate([a, -b, -c, d], 1)\n\n poses.append(pose)\n times.append(time)\n\n imgs = (np.array(imgs) / 255.).astype(np.float32) # keep all 4 channels (RGBA)\n poses = np.array(poses).astype(np.float32)\n times = np.array(times).astype(np.float32)\n masks = (imgs[..., 3:] > 0).astype(np.float32)\n imgs = imgs[..., :3]\n counts.append(counts[-1] + imgs.shape[0])\n all_imgs.append(imgs)\n all_poses.append(poses)\n all_masks.append(masks)\n all_times.append(times)\n\n self.images = torch.from_numpy(np.concatenate(all_imgs, 0)).cpu()\n self.masks = torch.from_numpy(np.concatenate(all_masks, 0)).cpu()\n self.radius = torch.zeros(self.masks.shape[0], self.masks.shape[2], self.masks.shape[1], 1) # no use\n self.errors = self.masks[:, :, :, :1].clone()\n self.errors = F.interpolate(self.errors.permute(0, 3, 1, 2),\n (self.images.shape[1] // 8, self.images.shape[2] // 8), mode='bilinear')\n self.errors = F.max_pool2d(self.errors, 7, stride=1, padding=3)\n self.errors = self.errors.permute(0, 2, 3, 1)\n self.n_images = self.images.shape[0]\n\n self.fid_list = [torch.LongTensor(np.array([idx])) for idx in range(self.n_images)]\n # if self.data_dir.split('/')[-2] == 'lego':\n # self.fid_list[-1] = torch.LongTensor(np.array([0]))\n self.pose_all = torch.from_numpy(np.concatenate(all_poses, 0)).to(self.device)\n self.fid_all = torch.stack(self.fid_list).to(self.device)\n self.time_emb_list = torch.from_numpy(np.concatenate(all_times, 0)).to(self.device)\n\n self.H, self.W = self.images[0].shape[:2]\n self.image_pixels = self.H * self.W\n\n camera_angle_x = float(meta['camera_angle_x'])\n self.focal = .5 * self.W / np.tan(.5 * camera_angle_x)\n intrinsics = torch.Tensor(\n [[self.focal, 0, 0.5 * self.W, 0],\n [0, self.focal, 0.5 * self.H, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]]).to(self.device)\n self.intrinsics_all = intrinsics.expand(self.n_images, -1, -1)\n self.intrinsics_all_inv = torch.inverse(self.intrinsics_all) # [n_images, 4, 4]\n self.object_bbox_min = np.array([-1.01, -1.01, -1.01]) # hard code bbox\n self.object_bbox_max = np.array([1.01, 1.01, 1.01])\n self.process_radius()\n\n print('Load data: End')\n\n def process_radius(self):\n for img_idx in tqdm(range(self.images.shape[0])):\n tx = torch.linspace(0, self.W - 1, self.W, device=self.device)\n ty = torch.linspace(0, self.H - 1, self.H, device=self.device)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n rays_v = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n dx = torch.sqrt(torch.sum((rays_v[:-1, :, :] - rays_v[1:, :, :]) ** 2, dim=-1))\n dx = torch.cat([dx, dx[-2:-1, :]], dim=0)\n # Cut the distance in half, and then round it out so that it's\n # halfway between inscribed by / circumscribed about the pixel.\n radii = dx[..., None] * 2 / np.sqrt(12)\n self.radius[img_idx] = radii.detach().cpu() # W H 3\n\n def gen_rays_at(self, img_idx, resolution_level=1):\n \"\"\"\n Generate rays at world space from one camera.\n \"\"\"\n l = resolution_level\n tx = torch.linspace(0, self.W - 1, self.W // l, device=self.device)\n ty = torch.linspace(0, self.H - 1, self.H // l, device=self.device)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n rays_v = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3],\n p[:, :, :, None]).squeeze() # W, H, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n dx = torch.sqrt(torch.sum((rays_v[:-1, :, :] - rays_v[1:, :, :]) ** 2, dim=-1))\n dx = torch.cat([dx, dx[-2:-1, :]], dim=0)\n rays_r = dx[..., None] * 2 / np.sqrt(12)\n rays_o = self.pose_all[img_idx, None, None, :3, 3].expand(rays_v.shape) # W, H, 3\n rays_v = rays_v / torch.linalg.norm(rays_v, ord=2, dim=-1, keepdim=True) # W, H, 3\n return rays_o.transpose(0, 1), rays_v.transpose(0, 1), rays_r.transpose(0, 1)\n\n def gen_random_rays_at(self, img_idx, batch_size):\n \"\"\"\n Generate random rays at world space from one camera.\n \"\"\"\n error = self.errors[img_idx].reshape(-1).numpy()\n max_error = np.max(error) + 1e-8\n error = error / max_error\n error[error < 0.1] = 0.1\n error = error / np.sum(error)\n index = np.arange(0, self.W * self.H // 64)\n select_index = np.random.choice(index, size=[batch_size], p=error)\n pixels_y = torch.LongTensor(select_index // (self.W // 8)) * 8\n pixels_y += torch.randint_like(pixels_y, 8)\n pixels_x = torch.LongTensor(select_index % (self.W // 8)) * 8\n pixels_x += torch.randint_like(pixels_x, 8)\n\n color = self.images[img_idx][(pixels_y, pixels_x)] # batch_size, 3\n mask = self.masks[img_idx][(pixels_y, pixels_x)] # batch_size, 3\n rays_r = self.radius[img_idx][(pixels_x, pixels_y)] # batch_size, 1\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1).float().to(\n self.device) # batch_size, 3\n p = torch.matmul(self.intrinsics_all_inv[img_idx, None, :3, :3], p[:, :, None]).squeeze() # batch_size, 3\n rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # batch_size, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, :3, :3], rays_v[:, :, None]).squeeze() # batch_size, 3\n rays_o = self.pose_all[img_idx, None, :3, 3].expand(rays_v.shape) # batch_size, 3\n return torch.cat([rays_o.cpu(), rays_v.cpu(), color.cpu(), mask[:, :1].cpu(), rays_r.cpu()],\n dim=-1).cuda(), pixels_y.cpu(), pixels_x.cpu() # batch_size, 10\n\n def gen_rays_between(self, idx_0, idx_1, ratio, resolution_level=1):\n \"\"\"\n Interpolate pose between two cameras.\n \"\"\"\n l = resolution_level\n tx = torch.linspace(0, self.W - 1, self.W // l, device=self.device)\n ty = torch.linspace(0, self.H - 1, self.H // l, device=self.device)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n rays_v = torch.matmul(self.intrinsics_all_inv[0, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n trans = self.pose_all[idx_0, :3, 3] * (1.0 - ratio) + self.pose_all[idx_1, :3, 3] * ratio\n pose_0 = self.pose_all[idx_0].detach().cpu().numpy()\n pose_1 = self.pose_all[idx_1].detach().cpu().numpy()\n pose_0 = np.linalg.inv(pose_0)\n pose_1 = np.linalg.inv(pose_1)\n rot_0 = pose_0[:3, :3]\n rot_1 = pose_1[:3, :3]\n rots = Rot.from_matrix(np.stack([rot_0, rot_1]))\n key_times = [0, 1]\n slerp = Slerp(key_times, rots)\n rot = slerp(ratio)\n pose = np.diag([1.0, 1.0, 1.0, 1.0])\n pose = pose.astype(np.float32)\n pose[:3, :3] = rot.as_matrix()\n pose[:3, 3] = ((1.0 - ratio) * pose_0 + ratio * pose_1)[:3, 3]\n pose = np.linalg.inv(pose)\n rot = torch.from_numpy(pose[:3, :3]).cuda()\n trans = torch.from_numpy(pose[:3, 3]).cuda()\n rays_v = torch.matmul(rot[None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n dx = torch.sqrt(torch.sum((rays_v[:-1, :, :] - rays_v[1:, :, :]) ** 2, dim=-1))\n dx = torch.cat([dx, dx[-2:-1, :]], dim=0)\n rays_r = dx[..., None] * 2 / np.sqrt(12)\n rays_v = rays_v / torch.linalg.norm(rays_v, ord=2, dim=-1, keepdim=True) # W, H, 3\n rays_o = trans[None, None, :3].expand(rays_v.shape) # W, H, 3\n return rays_o.transpose(0, 1), rays_v.transpose(0, 1), rays_r.transpose(0, 1)\n\n def near_far_from_sphere(self, rays_o, rays_d):\n if self.near > 0:\n return self.near, self.far\n a = torch.sum(rays_d ** 2, dim=-1, keepdim=True)\n b = 2.0 * torch.sum(rays_o * rays_d, dim=-1, keepdim=True)\n mid = 0.5 * (-b) / a\n near = mid - 1.0\n far = mid + 1.0\n return near, far\n\n def image_at(self, idx, resolution_level):\n img = cv.imread(self.images_lis[idx])\n return (cv.resize(img, (self.W // resolution_level, self.H // resolution_level))).clip(0, 255)" }, { "identifier": "RenderingNetwork", "path": "models/fields.py", "snippet": "class RenderingNetwork(nn.Module):\n def __init__(self,\n d_feature,\n mode,\n d_in,\n d_out,\n d_hidden,\n n_layers,\n weight_norm=True,\n multires_view=0,\n squeeze_out=True):\n super().__init__()\n\n self.mode = mode\n self.squeeze_out = squeeze_out\n dims = [d_in + d_feature] + [d_hidden for _ in range(n_layers)] + [d_out]\n\n self.embedview_fn = None\n if multires_view > 0:\n embedview_fn, input_ch = get_embedder(multires_view)\n self.embedview_fn = embedview_fn\n dims[0] += (input_ch - 3)\n\n self.num_layers = len(dims)\n\n for l in range(0, self.num_layers - 1):\n out_dim = dims[l + 1]\n lin = nn.Linear(dims[l], out_dim)\n if weight_norm:\n lin = nn.utils.weight_norm(lin)\n\n setattr(self, \"lin\" + str(l), lin)\n\n self.relu = nn.ReLU()\n\n self.mask = -torch.ones((1, 1, 256, 256, 256)).float().cuda()\n \n\n def forward(self, points, normals, view_dirs, feature_vectors):\n if self.embedview_fn is not None:\n view_dirs = self.embedview_fn(view_dirs)\n\n rendering_input = NoOptionError\n\n if self.mode == 'idr':\n rendering_input = torch.cat([points, view_dirs, normals, feature_vectors], dim=-1)\n elif self.mode == 'no_view_dir':\n rendering_input = torch.cat([points, normals, feature_vectors], dim=-1)\n elif self.mode == 'no_normal':\n rendering_input = torch.cat([points, view_dirs, feature_vectors], dim=-1)\n\n x = rendering_input\n for l in range(0, self.num_layers - 1):\n lin = getattr(self, \"lin\" + str(l))\n\n x = lin(x)\n\n if l < self.num_layers - 2:\n x = self.relu(x)\n\n if self.squeeze_out:\n x = torch.sigmoid(x)\n return x" }, { "identifier": "FieldNetwork", "path": "models/fields.py", "snippet": "class FieldNetwork(nn.Module):\n def __init__(self,\n d_in,\n d_out,\n d_hidden,\n d_t4d,\n min_emb,\n max_emb,\n n_layers,\n t_emb=-1,\n skip_in=(4,),\n bias=0.5,\n geometric_init=True,\n weight_norm=True):\n super(FieldNetwork, self).__init__()\n\n dims = [d_in] + [d_hidden for _ in range(n_layers)] + [d_out]\n dims[0] = d_in + (max_emb - min_emb)*3*2\n\n self.num_layers = len(dims)\n self.skip_in = skip_in\n self.min_emb = min_emb\n self.max_emb = max_emb\n self.t_emb = t_emb\n\n if t_emb > 0:\n embed_fn, time_input_ch = get_embedder(t_emb, input_dims=1)\n self.embed_fn = embed_fn\n dims[0] += time_input_ch\n\n for l in range(0, self.num_layers - 1):\n if l in self.skip_in:\n in_dim = dims[l] + dims[0] + d_t4d\n else:\n in_dim = dims[l]\n out_dim = dims[l+1]\n\n lin = nn.Linear(in_dim, out_dim)\n \n if geometric_init:\n if l == self.num_layers - 2:\n torch.nn.init.normal_(lin.weight, mean=np.sqrt(np.pi) / np.sqrt(dims[l]), std=0.0001)\n torch.nn.init.constant_(lin.bias, -bias)\n elif l == 0:\n torch.nn.init.constant_(lin.bias, 0.0)\n torch.nn.init.constant_(lin.weight[:, 3:], 0.0)\n torch.nn.init.normal_(lin.weight[:, :3], 0.0, np.sqrt(2) / np.sqrt(out_dim))\n elif l in self.skip_in:\n torch.nn.init.constant_(lin.bias, 0.0)\n torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim))\n torch.nn.init.constant_(lin.weight[:, -(dims[0] + d_t4d):], 0.0)\n else:\n torch.nn.init.constant_(lin.bias, 0.0)\n torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim))\n\n if weight_norm:\n lin = nn.utils.weight_norm(lin)\n\n setattr(self, \"lin\" + str(l), lin)\n\n self.activation = nn.Softplus(beta=100)\n\n def set_tensor4d(self, tensor4d):\n self.tensor4d = tensor4d\n\n def forward(self, mean, cov, fid, time_emb, reg_l2=False):\n cones_embedding = integrated_pos_enc((mean[:, None, :], cov[:, None, :]), self.min_emb, self.max_emb, diagonal=True).reshape(mean.shape[0], -1)\n inputs = mean\n tri_feat = self.tensor4d(inputs, fid, torch.mean(time_emb))\n\n if reg_l2:\n d_vec = F.normalize(torch.randn_like(inputs), dim=-1) * 1e-3\n d_tri_feat = self.tensor4d(inputs + d_vec, fid, torch.mean(time_emb))\n pred_reg_l2 = (d_tri_feat - tri_feat)**2\n \n xyz = inputs\n if self.t_emb > 0:\n time_input = self.embed_fn(time_emb)\n x = torch.cat([xyz, cones_embedding, time_input], 1)\n else:\n x = torch.cat([xyz, cones_embedding], 1)\n\n for l in range(0, self.num_layers - 1):\n lin = getattr(self, \"lin\" + str(l))\n \n if l in self.skip_in:\n if self.t_emb > 0:\n x = torch.cat([x, tri_feat, xyz, cones_embedding, time_input], 1) / np.sqrt(2)\n else:\n x = torch.cat([x, tri_feat, xyz, cones_embedding], 1) / np.sqrt(2)\n x = lin(x)\n\n if l < self.num_layers - 2:\n x = self.activation(x)\n if reg_l2:\n return x, pred_reg_l2\n return x" }, { "identifier": "SingleVarianceNetwork", "path": "models/fields.py", "snippet": "class SingleVarianceNetwork(nn.Module):\n def __init__(self, init_val):\n super(SingleVarianceNetwork, self).__init__()\n init_tensor = torch.zeros(120)\n init_tensor[:] = init_val\n self.register_parameter('variance', nn.Parameter(init_tensor))\n\n def forward(self, x):\n return torch.ones([len(x), 1], device=x.device) * torch.exp(self.variance[0] * 10.0)" }, { "identifier": "Tensor4D", "path": "models/tensor4d.py", "snippet": "class Tensor4D(nn.Module):\n def __init__(self, feature_type, lr_resolution, hr_resolution, image_guide=False, image_guide_interval=2, image_guide_base=16) -> None:\n super(Tensor4D, self).__init__()\n \n self.data_dims = 0\n self.feature_type = feature_type\n if feature_type == '3d':\n self.feature_plane = SpacePlane(lr_resolution, hr_resolution)\n self.data_dims = self.feature_plane.dims\n elif feature_type == '4d':\n self.feature_plane = TimeSpacePlane(lr_resolution, hr_resolution)\n self.data_dims = self.feature_plane.dims\n\n self.img_dims = 0\n self.image_guide = image_guide\n if image_guide:\n self.conv_net = ConvNet(image_guide_base)\n self.img_dims = image_guide_base*8*2\n self.ig_interval = image_guide_interval\n\n if feature_type == '4d':\n self.compress_network = CompressNetwork(self.data_dims, self.data_dims // 3)\n self.compress_list = [self.compress_network.compress1, self.compress_network.compress2, self.compress_network.compress3]\n\n self.dims = self.data_dims + self.img_dims\n self.matMode = torch.BoolTensor([[0, 1, 1], [1, 0, 1], [1, 1, 0]]).cuda()\n self.vecMode = torch.BoolTensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).cuda()\n \n def get_data_parameters(self):\n return list(self.feature_plane.parameters())\n \n def get_network_parameters(self):\n params = []\n if self.feature_type == '4d':\n params += list(self.compress_network.parameters())\n if self.image_guide:\n params += list(self.conv_net.parameters())\n return params\n\n def set_images(self, image, proj):\n step = self.ig_interval\n select_proj = torch.cat([proj[i*step:i*step+1] for i in range(proj.shape[0] // step)], dim=0)\n self.proj = select_proj\n self.img_shape = image.shape\n select_image = torch.cat([image[i*step:i*step+1] for i in range(image.shape[0] // step)], dim=0)\n self.image_feature, self.image_feature_hr = self.conv_net(F.interpolate(select_image.permute(0, 3, 1, 2), size=(1024, 1024)))\n\n def forward(self, xyz_sampled_ori, fid, time_emb):\n sigma_feature_list = [] \n\n if self.image_guide:\n proj_pts = ((self.proj[:, :3, :3] @ xyz_sampled_ori.T.unsqueeze(0)) + self.proj[:, :3, 3:]).transpose(1, 2)\n proj_xy = proj_pts[:, :, :2] / (proj_pts[:, :, 2:] + 1e-6)\n B, H, W, C = self.img_shape\n proj_xy[:, :, 0] = (proj_xy[:, :, 0] - W / 2) / (W / 2)\n proj_xy[:, :, 1] = (proj_xy[:, :, 1] - H / 2) / (H / 2)\n N = self.image_feature.shape[0]\n img_feature = grid_sample(self.image_feature, proj_xy.reshape(N, -1, 1, 2)).reshape(N, -1, xyz_sampled_ori.shape[0])\n img_feature_cost = torch.sqrt(torch.sum((img_feature - torch.sum(img_feature, dim=0).unsqueeze(0) / N)**2, dim=0) / N + 1e-8)\n img_feature_max = torch.mean(img_feature, dim=0) + torch.max(img_feature, dim=0)[0]\n image_feature_hr = grid_sample(self.image_feature_hr, proj_xy.reshape(N, -1, 1, 2)).reshape(N, -1, xyz_sampled_ori.shape[0])\n image_feature_hr_cost = torch.sqrt(torch.sum((image_feature_hr - torch.sum(image_feature_hr, dim=0).unsqueeze(0) / N)**2, dim=0) / N + 1e-8)\n image_feature_hr_max = torch.mean(image_feature_hr, dim=0) + torch.max(image_feature_hr, dim=0)[0]\n sigma_feature_list = [img_feature_cost, img_feature_max, image_feature_hr_cost, image_feature_hr_max]\n \n xyz_sampled = xyz_sampled_ori\n scale = 1.0\n matMode = self.matMode\n coordinate_plane = torch.stack((xyz_sampled[..., matMode[0]] * scale, xyz_sampled[..., matMode[1]] * scale, xyz_sampled[..., matMode[2]] * scale)).view(3, -1, 1, 2)\n\n for idx_plane in range(3):\n sample_points = coordinate_plane[[idx_plane]]\n plane_coef_point = self.feature_plane.sample(sample_points, idx_plane, time_emb).view(-1, *xyz_sampled.shape[:1])\n if self.feature_type == '4d':\n plane_coef_point = self.compress_list[idx_plane](plane_coef_point.T).T\n sigma_feature_list.append(plane_coef_point)\n \n sigma_feature_list = torch.cat(sigma_feature_list, dim=0)\n # print(sigma_feature_list.shape)\n return sigma_feature_list.T" }, { "identifier": "NeuSRenderer", "path": "models/renderer.py", "snippet": "class NeuSRenderer:\n def __init__(self,\n sdf_network,\n deviation_network,\n color_network,\n mask3d,\n n_samples,\n n_importance,\n n_outside,\n up_sample_steps,\n perturb,\n reg_l2=False,\n mip_render=False,\n flow_network=None):\n \n self.sdf_network = sdf_network\n self.deviation_network = deviation_network\n self.color_network = color_network\n self.mask3d = mask3d\n self.n_samples = n_samples\n self.n_importance = n_importance\n self.n_outside = n_outside\n self.up_sample_steps = up_sample_steps\n self.perturb = perturb\n self.reg_l2 = reg_l2\n self.flow_network = flow_network\n self.mip_render = mip_render\n\n def mask_query_geometry(self, mean, cov, only_sdf=False):\n fid = self.fid\n time_emb = self.time_emb\n time_input = time_emb.expand(mean[:, :1].shape)\n space_time_input = torch.cat([mean, time_input], dim=-1)\n if not only_sdf:\n space_time_input.requires_grad_(True)\n inputs = space_time_input[:, :3]\n time_emb = space_time_input[:, 3:]\n N, _ = inputs.shape\n grads = torch.zeros((N, 4), device=inputs.device)\n sdf_nn = torch.zeros((N, 257), device=inputs.device)\n\n reg_l2 = torch.zeros((N, self.sdf_network.tensor4d.dims), device=inputs.device)\n grads[:, 0] = 1\n sdf_nn[:, 0] = -10\n\n mask = self.mask3d.valid_input(inputs, fid)\n if torch.sum(mask) == 0:\n results = {\n 'sdf_nn': sdf_nn,\n 'grads': grads[:, :3],\n 'time_grads': grads[:, 3:],\n 'pts_mask': mask,\n 'reg_l2': reg_l2\n }\n return results\n mask_mean = inputs[mask, :]\n mask_time_emb = time_emb[mask, :]\n mask_cov = cov[mask, :]\n \n if self.flow_network is not None:\n mask_cov = torch.zeros_like(mask_mean) # flow mode, disable mip_render\n if fid != 0:\n pred_flow = self.flow_network(mask_mean, mask_cov, fid, mask_time_emb, reg_l2=False)\n mask_mean = mask_mean + pred_flow\n elif not self.mip_render:\n mask_cov = torch.zeros_like(mask_mean)\n\n if (not only_sdf) and self.reg_l2:\n pred_sdf_nn, pred_reg_l2 = self.sdf_network(mask_mean, mask_cov, fid, mask_time_emb, reg_l2=True)\n reg_l2[mask] = pred_reg_l2\n else:\n pred_sdf_nn = self.sdf_network(mask_mean, mask_cov, fid, mask_time_emb, reg_l2=False)\n\n if not only_sdf:\n pred_sdf = pred_sdf_nn[:, :1]\n d_output = torch.ones_like(pred_sdf, requires_grad=False, device=pred_sdf.device)\n gradients = torch.autograd.grad(\n outputs=pred_sdf,\n inputs=space_time_input,\n grad_outputs=d_output,\n create_graph=True,\n retain_graph=True,\n only_inputs=True)[0]\n grads[mask] = gradients.reshape(-1, 4)[mask]\n \n sdf_nn[mask] = pred_sdf_nn\n results = {\n 'sdf_nn': sdf_nn,\n 'grads': grads[:, :3],\n 'time_grads': grads[:, 3:],\n 'pts_mask': mask,\n 'reg_l2': reg_l2\n }\n return results\n\n def mask_query_color(self, pts, mask, normals, view_dirs, features):\n N, _ = pts.shape\n out = torch.zeros((N, 3), device=pts.device)\n if torch.sum(mask) > 0:\n x = self.color_network(pts[mask], normals[mask], view_dirs[mask], features[mask])\n out[mask] = x\n return out\n else:\n return torch.zeros((N, 3), device=pts.device)\n\n def up_sample(self, rays_o, rays_d, z_vals, sdf, n_importance, inv_s, pts_mask):\n \"\"\"\n Up sampling give a fixed inv_s\n \"\"\"\n batch_size, n_samples = z_vals.shape\n pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None] # n_rays, n_samples, 3\n radius = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=False)\n inside_sphere = (radius[:, :-1] < 1.0) | (radius[:, 1:] < 1.0)\n sdf = sdf.reshape(batch_size, n_samples)\n prev_sdf, next_sdf = sdf[:, :-1], sdf[:, 1:]\n prev_mask, next_mask = pts_mask[:, :-1], pts_mask[:, 1:]\n mid_mask = torch.logical_and(prev_mask, next_mask)\n prev_z_vals, next_z_vals = z_vals[:, :-1], z_vals[:, 1:]\n mid_sdf = (prev_sdf + next_sdf) * 0.5\n cos_val = (next_sdf - prev_sdf) / (next_z_vals - prev_z_vals + 1e-5)\n\n # ----------------------------------------------------------------------------------------------------------\n # Use min value of [ cos, prev_cos ]\n # Though it makes the sampling (not rendering) a little bit biased, this strategy can make the sampling more\n # robust when meeting situations like below:\n #\n # SDF\n # ^\n # |\\ -----x----...\n # | \\ /\n # | x x\n # |---\\----/-------------> 0 level\n # | \\ /\n # | \\/\n # |\n # ----------------------------------------------------------------------------------------------------------\n prev_cos_val = torch.cat([torch.zeros([batch_size, 1], device=sdf.device), cos_val[:, :-1]], dim=-1)\n cos_val = torch.stack([prev_cos_val, cos_val], dim=-1)\n cos_val, _ = torch.min(cos_val, dim=-1, keepdim=False)\n cos_val = cos_val.clip(-1e3, 0.0) * inside_sphere\n\n dist = (next_z_vals - prev_z_vals)\n prev_esti_sdf = mid_sdf - cos_val * dist * 0.5\n next_esti_sdf = mid_sdf + cos_val * dist * 0.5\n prev_cdf = torch.sigmoid(prev_esti_sdf * inv_s)\n next_cdf = torch.sigmoid(next_esti_sdf * inv_s)\n\n alpha = (prev_cdf - next_cdf + 1e-5) / (prev_cdf + 1e-5)\n alpha[~mid_mask] = 0\n alpha = alpha.clamp(0.0, 1.0)\n \n alpha = torch.cat([alpha, torch.zeros([batch_size, 1], device=alpha.device)], dim=-1)\n weights = alpha * torch.cumprod(\n torch.cat([torch.ones([batch_size, 1], device=alpha.device), 1. - alpha + 1e-7], -1), -1)[:, :-1]\n\n z_samples = sample_pdf(z_vals, weights, n_importance, det=True).detach()\n return z_samples\n\n def cat_z_vals(self, rays_o, rays_d, z_vals, new_z_vals, sdf, pts_mask, last=False):\n batch_size, n_samples = z_vals.shape\n _, n_importance = new_z_vals.shape\n pts = rays_o[:, None, :] + rays_d[:, None, :] * new_z_vals[..., :, None]\n z_vals = torch.cat([z_vals, new_z_vals], dim=-1)\n z_vals, index = torch.sort(z_vals, dim=-1)\n if not last:\n new_sdf, new_pts_mask = self.sdf_network.sdf(pts.reshape(-1, 3), rt_mask=True)\n new_sdf = new_sdf.reshape(batch_size, n_importance)\n new_pts_mask = new_pts_mask.reshape(batch_size, n_importance)\n sdf = torch.cat([sdf, new_sdf], dim=-1)\n pts_mask = torch.cat([pts_mask, new_pts_mask], dim=-1)\n xx = torch.arange(batch_size)[:, None].expand(batch_size, n_samples + n_importance).reshape(-1)\n index = index.reshape(-1)\n sdf = sdf[(xx, index)].reshape(batch_size, n_samples + n_importance)\n pts_mask = pts_mask[(xx, index)].reshape(batch_size, n_samples + n_importance)\n\n return z_vals, sdf, pts_mask\n\n def render_core(self,\n rays_o,\n rays_d,\n rays_r,\n z_vals,\n sample_dist,\n background_alpha=None,\n background_sampled_color=None,\n background_rgb=None,\n cos_anneal_ratio=0.0):\n batch_size, n_samples = z_vals[:, :-1].shape\n\n # Section length\n dists = z_vals[..., 1:] - z_vals[..., :-1]\n cat_dists = torch.cat([dists, torch.Tensor([sample_dist]).to(dists.device).expand(dists[..., :1].shape)], -1)\n mid_z_vals = z_vals + cat_dists * 0.5\n\n cones = cast_rays(z_vals, rays_o, rays_d, rays_r, 'cone', diagonal=True)\n dirs = rays_d[:, None, :].expand(cones[0].shape)\n dirs = dirs.reshape(-1, 3)\n\n results = self.mask_query_geometry(cones[0].reshape(-1, 3), cones[1].reshape(-1, 3))\n sdf_nn_output, gradients, t_grads, pts_mask = results['sdf_nn'], results['grads'], results['time_grads'], results['pts_mask']\n sdf = sdf_nn_output[:, :1]\n feature_vector = sdf_nn_output[:, 1:]\n\n gradients = gradients.squeeze()\n sampled_color = self.mask_query_color(cones[0].reshape(-1, 3), pts_mask, gradients, dirs, feature_vector).reshape(batch_size, n_samples, 3)\n \n inv_s = self.deviation_network(torch.zeros([1, 3], device=sdf.device))[:, :1].clip(1e-6, 1e6) # Single parameter\n inv_s = inv_s.expand(batch_size * n_samples, 1)\n\n true_cos = (dirs * gradients).sum(-1, keepdim=True)\n\n # \"cos_anneal_ratio\" grows from 0 to 1 in the beginning training iterations. The anneal strategy below makes\n # the cos value \"not dead\" at the beginning training iterations, for better convergence.\n iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) +\n F.relu(-true_cos) * cos_anneal_ratio) # always non-positive\n\n # Estimate signed distances at section points\n estimated_next_sdf = sdf + iter_cos * dists.reshape(-1, 1) * 0.5\n estimated_prev_sdf = sdf - iter_cos * dists.reshape(-1, 1) * 0.5\n\n prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s)\n next_cdf = torch.sigmoid(estimated_next_sdf * inv_s)\n\n p = prev_cdf - next_cdf\n c = prev_cdf\n\n alpha = ((p + 1e-5) / (c + 1e-5))\n \n alpha[~pts_mask] = 0\n alpha = alpha.reshape(batch_size, n_samples).clip(0.0, 1.0)\n \n weights = alpha * torch.cumprod(torch.cat([torch.ones([batch_size, 1], device=alpha.device), 1. - alpha + 1e-7], -1), -1)[:, :-1]\n weights_sum = weights.sum(dim=-1, keepdim=True)\n \n color = (sampled_color * weights[:, :, None]).sum(dim=1)\n if background_rgb is not None: # Fixed background, usually black\n color = color + background_rgb * (1.0 - weights_sum)\n\n # Eikonal loss\n gradient_error = torch.mean((torch.linalg.norm(gradients.reshape(batch_size, n_samples, 3), ord=2,\n dim=-1) - 1.0) ** 2)\n time_grad_error = torch.mean(t_grads**2)\n return {\n 'color': color,\n 'sdf': sdf,\n 'pts_mask': pts_mask,\n 'dists': dists,\n 'gradients': gradients.reshape(batch_size, n_samples, 3),\n 's_val': 1.0 / inv_s,\n 'mid_z_vals': mid_z_vals,\n 'weights': weights,\n 'gradient_error': gradient_error,\n 'time_grad_error': time_grad_error,\n 'reg_l2': results['reg_l2'].reshape(batch_size, n_samples, -1),\n }\n\n def render(self, rays_o, rays_d, rays_r, near, far, fid, time_emb, perturb_overwrite=-1, background_rgb=None, cos_anneal_ratio=0.0):\n self.fid = fid\n self.time_emb = time_emb\n self.mask3d.set_fid(fid)\n\n batch_size = len(rays_o)\n sample_dist = 2.0 / self.n_samples # Assuming the region of interest is a unit sphere\n z_vals = torch.linspace(0.0, 1.0, self.n_samples, device=rays_o.device)\n z_vals = near + (far - near) * z_vals[None, :]\n\n z_vals_outside = None\n \n n_samples = self.n_samples\n perturb = self.perturb\n\n if perturb_overwrite >= 0:\n perturb = perturb_overwrite\n if perturb > 0:\n t_rand = (torch.rand([batch_size, 1], device=z_vals.device) - 0.5)\n z_vals = z_vals + t_rand * 2.0 / self.n_samples\n\n background_alpha = None\n background_sampled_color = None\n\n # Up sample\n if self.n_importance > 0:\n with torch.no_grad():\n cast_z_vals = torch.cat([z_vals, z_vals[:, -1:]], dim=1)\n cones = cast_rays(cast_z_vals, rays_o, rays_d, rays_r, 'cone', diagonal=True)\n results = self.mask_query_geometry(cones[0].reshape(-1, 3), cones[1].reshape(-1, 3), only_sdf=True)\n sdf, pts_mask = results['sdf_nn'][:, :1], results['pts_mask']\n # sdf, pts_mask = self.sdf_network.sdf(pts.reshape(-1, 3), rt_mask=True)\n sdf = sdf.reshape(batch_size, self.n_samples)\n pts_mask = pts_mask.reshape(batch_size, self.n_samples)\n for i in range(self.up_sample_steps):\n new_z_vals = self.up_sample(rays_o,\n rays_d,\n z_vals,\n sdf,\n self.n_importance // self.up_sample_steps + 1,\n 64 * 2**i, pts_mask)\n z_vals, sdf, pts_mask = self.cat_z_vals(rays_o,\n rays_d,\n z_vals,\n new_z_vals,\n sdf, pts_mask,\n last=(i + 1 == self.up_sample_steps))\n\n n_samples = self.n_samples + self.n_importance\n\n background_alpha = None\n background_sampled_color = None\n sample_dist = 1e-2\n\n # Render core\n ret_fine = self.render_core(rays_o,\n rays_d,\n rays_r,\n z_vals,\n sample_dist,\n background_rgb=background_rgb,\n background_alpha=background_alpha,\n background_sampled_color=background_sampled_color,\n cos_anneal_ratio=cos_anneal_ratio)\n\n\n return {\n 'color_fine': ret_fine['color'],\n 's_val': ret_fine['s_val'].reshape(batch_size, n_samples).mean(dim=-1, keepdim=True),\n 'mid_z_vals': ret_fine['mid_z_vals'],\n 'weights': ret_fine['weights'],\n 'weight_sum': ret_fine['weights'].sum(dim=-1, keepdim=True),\n 'weight_max': torch.max(ret_fine['weights'], dim=-1, keepdim=True)[0],\n 'gradients': ret_fine['gradients'],\n 'gradient_error': ret_fine['gradient_error'],\n 'time_grad_error': ret_fine['time_grad_error'],\n 'reg_l2': ret_fine['reg_l2']\n }" }, { "identifier": "Mask3D", "path": "models/mask.py", "snippet": "class Mask3D:\n def __init__(self, mask_type, num_frames=None, mask_reso=None, device=None):\n self.mask_type = mask_type # 'bounding or visualhull'\n if mask_type == 'visualhull':\n self.R = mask_reso\n self.mask = torch.ones([num_frames, self.R, self.R, self.R]).float()\n self.device = device\n self.current_fid = -1\n self.current_mask = None\n\n def set_fid(self, fid):\n if fid != self.current_fid:\n self.current_fid = fid\n if self.mask_type == 'visualhull':\n self.current_mask = self.mask[fid.cpu()].to(self.device)\n \n def valid_input(self, pts, fid):\n with torch.no_grad():\n pts = pts.reshape(1, -1, 1, 1, 3)\n pts_max = torch.max(pts, dim=-1)[0]\n pts_min = torch.min(pts, dim=-1)[0]\n mask_max = (pts_max > 1).reshape(-1)\n mask_min = (pts_min < -1).reshape(-1)\n if self.mask_type == 'visualhull':\n R = self.R\n sigma = F.grid_sample(self.current_mask.view(1, 1, R, R, R), pts, mode='bilinear', padding_mode='border').reshape(-1)\n calc_mask = sigma < 0.05\n else:\n calc_mask = torch.ones_like(mask_max)\n calc_mask[mask_max] = 0\n calc_mask[mask_min] = 0\n return calc_mask\n\n def visualhull(self, pts_ori, projs, masks, g_nums):\n cam_nums = projs.shape[0]\n interval = 1\n pts_mask = torch.zeros(pts_ori.shape[0], g_nums)\n out_mask = torch.zeros(pts_ori.shape[0])\n N, H, W, C = masks.shape\n for gp in range(cam_nums // (g_nums*interval)):\n for j in range(g_nums):\n i = j + gp*(g_nums*interval)\n mask = masks[i, :, :, :1].permute(2, 0, 1).unsqueeze(0).clone()\n mask = torch.max_pool2d(mask, 7, 1, 3, 1)\n pts = torch.cat([pts_ori, torch.ones_like(pts_ori[:, :1])], dim=-1)\n pts = projs[i] @ pts.T\n pts = pts[:2] / pts[2:]\n pts[0] = pts[0] / W * 2 - 1\n pts[1] = pts[1] / H * 2 - 1\n pts = pts.T.reshape(1, -1, 1, 2)\n \n sample_mask = torch.nn.functional.grid_sample(mask, pts, mode='bilinear', padding_mode='zeros').reshape(-1)\n pts_mask[:, j] = sample_mask\n pts_mask_sum = torch.min(pts_mask, dim=1)[0]\n valid = pts_mask_sum > 0.1\n out_mask[valid] = -1\n if gp == 0:\n out_mask[~valid] = 1\n return out_mask\n\n def compute_image_mask(self, projs, masks, g_nums):\n N = 64\n R = self.R\n X = torch.linspace(-1, 1, R).split(N)\n Y = torch.linspace(-1, 1, R).split(N)\n Z = torch.linspace(-1, 1, R).split(N)\n cam_nums = projs.shape[0]\n \n self.mask = self.mask.to(self.device)\n for gp in tqdm(range(cam_nums // g_nums)):\n # for gp in range(1):\n with torch.no_grad():\n for xi, xs in enumerate(X):\n for yi, ys in enumerate(Y):\n for zi, zs in enumerate(Z):\n xx, yy, zz = torch.meshgrid(xs, ys, zs)\n pts = torch.cat([zz.reshape(-1, 1), yy.reshape(-1, 1), xx.reshape(-1, 1)], dim=-1).to(self.device)\n val = self.visualhull(pts, projs[gp*g_nums:gp*g_nums+g_nums].to(self.device), masks[gp*g_nums:gp*g_nums+g_nums].to(self.device), g_nums).reshape(len(xs), len(ys), len(zs))\n self.mask[gp, xi * N: xi * N + len(xs),yi * N: yi * N + len(ys), zi * N: zi * N + len(zs)] = val\n self.mask = self.mask.unsqueeze(1)\n self.mask = -torch.max_pool3d(-self.mask, 7, 1, 3)\n self.mask[self.mask > -0.5] = 1\n self.mask = self.mask.detach().cpu()\n \n def compute_mask(self, fid, query_func, inv_s):\n N = 64\n R = 128\n X = torch.linspace(-1, 1, R).split(N)\n Y = torch.linspace(-1, 1, R).split(N)\n Z = torch.linspace(-1, 1, R).split(N)\n from .renderer import sigma_f\n mask = self.mask[fid].reshape(R, R, R).clone()\n self.triplane[0].flow(fid)\n with torch.no_grad():\n for xi, xs in enumerate(X):\n for yi, ys in enumerate(Y):\n for zi, zs in enumerate(Z):\n xx, yy, zz = torch.meshgrid(xs, ys, zs)\n pts = torch.cat([zz.reshape(-1, 1), yy.reshape(-1, 1), xx.reshape(-1, 1)], dim=-1)\n val = sigma_f(query_func(pts), inv_s).reshape(len(xs), len(ys), len(zs))\n mask[xi * N: xi * N + len(xs),yi * N: yi * N + len(ys), zi * N: zi * N + len(zs)] = val\n valid = mask > 0.02\n mask[valid] = 1\n mask[~valid] = -1\n mask = -torch.max_pool3d(mask.reshape(1, 1, 128, 128, 128), 7, 1, 3)\n self.mask[fid][mask[0] > -0.5] = 1" } ]
import os import time import logging import argparse import numpy as np import cv2 as cv import torch import torch.nn.functional as F from torch.utils.tensorboard import SummaryWriter from shutil import copyfile from tqdm import tqdm from pyhocon import ConfigFactory from models.dataset import Dataset, BlenderDataset from models.fields import RenderingNetwork, FieldNetwork, SingleVarianceNetwork from models.tensor4d import Tensor4D from models.renderer import NeuSRenderer from models.mask import Mask3D from metrics import *
16,196
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' class Runner: def __init__(self, conf_path, mode='train', case='CASE_NAME', is_continue=False): self.device = torch.device('cuda') # Configuration self.conf_path = conf_path f = open(self.conf_path) conf_text = f.read() conf_text = conf_text.replace('CASE_NAME', case) f.close() self.conf = ConfigFactory.parse_string(conf_text) self.conf['dataset.data_dir'] = self.conf['dataset.data_dir'].replace('CASE_NAME', case) self.base_exp_dir = self.conf['general.base_exp_dir'] os.makedirs(self.base_exp_dir, exist_ok=True) self.is_blender = self.conf['dataset'].get_bool('is_blender', default=False) self.dataset = BlenderDataset(self.conf['dataset']) if self.is_blender else Dataset(self.conf['dataset']) self.g_nums = self.conf['dataset']['g_nums'] self.iter_step = 0 self.flow = self.conf.get_bool('model.flow', default=False) # Training parameters self.end_iter = self.conf.get_int('train.end_iter') self.save_freq = self.conf.get_int('train.save_freq') self.report_freq = self.conf.get_int('train.report_freq') self.val_freq = self.conf.get_int('train.val_freq') self.batch_size = self.conf.get_int('train.batch_size') self.fine_level_iter = self.conf.get_int('train.fine_level_iter') self.downsample_iter = self.conf.get_int('train.downsample_iter') self.validate_resolution_level = self.conf.get_int('train.validate_resolution_level') self.learning_rate = self.conf.get_float('train.learning_rate') self.learning_rate_alpha = self.conf.get_float('train.learning_rate_alpha') self.use_white_bkgd = self.conf.get_bool('train.use_white_bkgd') self.warm_up_end = self.conf.get_float('train.warm_up_end', default=0.0) self.warm_up_imgs = self.conf.get_int('train.warm_up_imgs', default=50) self.anneal_end = self.conf.get_float('train.anneal_end', default=0.0) self.mask_color_loss = self.conf.get_bool('train.mask_color_loss') self.weighted_sample = self.conf.get_bool('train.weighted_sample') # Weights self.igr_weight = self.conf.get_float('train.igr_weight') self.tgr_weight = self.conf.get_float('train.tgr_weight') self.mask_weight = self.conf.get_float('train.mask_weight') self.tv_weight = self.conf.get_float('train.tv_weight') if self.tv_weight > 0: self.reg_l2 = True else: self.reg_l2 = False self.is_continue = is_continue self.mode = mode self.model_list = [] self.writer = None # Masks self.mask3d = Mask3D(**self.conf['model.mask3d'], num_frames=self.dataset.n_images // self.g_nums, device=self.device) # Networks self.tensor4d = Tensor4D(**self.conf['model.tensor4d']).to(self.device)
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' class Runner: def __init__(self, conf_path, mode='train', case='CASE_NAME', is_continue=False): self.device = torch.device('cuda') # Configuration self.conf_path = conf_path f = open(self.conf_path) conf_text = f.read() conf_text = conf_text.replace('CASE_NAME', case) f.close() self.conf = ConfigFactory.parse_string(conf_text) self.conf['dataset.data_dir'] = self.conf['dataset.data_dir'].replace('CASE_NAME', case) self.base_exp_dir = self.conf['general.base_exp_dir'] os.makedirs(self.base_exp_dir, exist_ok=True) self.is_blender = self.conf['dataset'].get_bool('is_blender', default=False) self.dataset = BlenderDataset(self.conf['dataset']) if self.is_blender else Dataset(self.conf['dataset']) self.g_nums = self.conf['dataset']['g_nums'] self.iter_step = 0 self.flow = self.conf.get_bool('model.flow', default=False) # Training parameters self.end_iter = self.conf.get_int('train.end_iter') self.save_freq = self.conf.get_int('train.save_freq') self.report_freq = self.conf.get_int('train.report_freq') self.val_freq = self.conf.get_int('train.val_freq') self.batch_size = self.conf.get_int('train.batch_size') self.fine_level_iter = self.conf.get_int('train.fine_level_iter') self.downsample_iter = self.conf.get_int('train.downsample_iter') self.validate_resolution_level = self.conf.get_int('train.validate_resolution_level') self.learning_rate = self.conf.get_float('train.learning_rate') self.learning_rate_alpha = self.conf.get_float('train.learning_rate_alpha') self.use_white_bkgd = self.conf.get_bool('train.use_white_bkgd') self.warm_up_end = self.conf.get_float('train.warm_up_end', default=0.0) self.warm_up_imgs = self.conf.get_int('train.warm_up_imgs', default=50) self.anneal_end = self.conf.get_float('train.anneal_end', default=0.0) self.mask_color_loss = self.conf.get_bool('train.mask_color_loss') self.weighted_sample = self.conf.get_bool('train.weighted_sample') # Weights self.igr_weight = self.conf.get_float('train.igr_weight') self.tgr_weight = self.conf.get_float('train.tgr_weight') self.mask_weight = self.conf.get_float('train.mask_weight') self.tv_weight = self.conf.get_float('train.tv_weight') if self.tv_weight > 0: self.reg_l2 = True else: self.reg_l2 = False self.is_continue = is_continue self.mode = mode self.model_list = [] self.writer = None # Masks self.mask3d = Mask3D(**self.conf['model.mask3d'], num_frames=self.dataset.n_images // self.g_nums, device=self.device) # Networks self.tensor4d = Tensor4D(**self.conf['model.tensor4d']).to(self.device)
self.sdf_network = FieldNetwork(d_t4d=self.tensor4d.dims, **self.conf['model.sdf_network']).to(self.device)
3
2023-11-07 10:16:33+00:00
24k
Kushalhk/AutoFilter
utils.py
[ { "identifier": "AUTH_CHANNEL", "path": "info.py", "snippet": "AUTH_CHANNEL = int(auth_channel) if auth_channel and id_pattern.search(auth_channel) else None" }, { "identifier": "LONG_IMDB_DESCRIPTION", "path": "info.py", "snippet": "LONG_IMDB_DESCRIPTION = is_enabled(environ.get(\"LONG_IMDB_DESCRIPTION\", \"False\"), False)" }, { "identifier": "MAX_LIST_ELM", "path": "info.py", "snippet": "MAX_LIST_ELM = environ.get(\"MAX_LIST_ELM\", None)" }, { "identifier": "SHORTLINK_URL", "path": "info.py", "snippet": "SHORTLINK_URL = environ.get('SHORTLINK_URL', 'paisakamalo.in')" }, { "identifier": "SHORTLINK_API", "path": "info.py", "snippet": "SHORTLINK_API = environ.get('SHORTLINK_API', '16badb4cdfbd26689b88c28d4385b24b5ec85d81')" }, { "identifier": "IS_SHORTLINK", "path": "info.py", "snippet": "IS_SHORTLINK = bool(environ.get('IS_SHORTLINK', False))" }, { "identifier": "LOG_CHANNEL", "path": "info.py", "snippet": "LOG_CHANNEL = int(environ.get('LOG_CHANNEL', ''))" }, { "identifier": "TUTORIAL", "path": "info.py", "snippet": "TUTORIAL = environ.get('TUTORIAL', 'https://t.me/TG_UPDATES1')" }, { "identifier": "GRP_LNK", "path": "info.py", "snippet": "GRP_LNK = environ.get('GRP_LNK', 'https://t.me/TG_SUPPORT_GROUP')" }, { "identifier": "CHNL_LNK", "path": "info.py", "snippet": "CHNL_LNK = environ.get('CHNL_LNK', 'https://t.me/TG_LINKS_CHANNEL')" }, { "identifier": "CUSTOM_FILE_CAPTION", "path": "info.py", "snippet": "CUSTOM_FILE_CAPTION = environ.get(\"CUSTOM_FILE_CAPTION\", f\"{script.CAPTION}\")" }, { "identifier": "SECOND_SHORTLINK_URL", "path": "info.py", "snippet": "SECOND_SHORTLINK_URL = environ.get('SECOND_SHORTLINK_URL', 'paisakamalo.in')" }, { "identifier": "SECOND_SHORTLINK_API", "path": "info.py", "snippet": "SECOND_SHORTLINK_API = environ.get('SECOND_SHORTLINK_API', '16badb4cdfbd26689b88c28d4385b24b5ec85d81')" }, { "identifier": "script", "path": "Script.py", "snippet": "class script(object):\r\n START_TXT = \"\"\"<b>Hᴇʟʟᴏ 👋 {}</b>\r\n\r\n<b>Mʏ Nᴀᴍᴇ Is <a href=\"https://t.me/{}\">{}</a>, I Cᴀɴ Pʀᴏᴠɪᴅᴇ Mᴏᴠɪᴇs, Sᴇʀɪᴇs, Aɴɪᴍᴀᴛɪᴏɴ, Cᴀʀᴛᴏᴏɴ, Aɴɪᴍᴇ, K-Dʀᴀᴍᴀ & Mᴀɴʏ Mᴏʀᴇ ☺ Jᴜsᴛ Aᴅᴅ Mᴇ Tᴏ Yᴏᴜʀ Gʀᴏᴜᴘ As Aᴅᴍɪɴ EɴJᴏʏ 😍</b>\"\"\"\r\n\r\n HELP_TXT = \"\"\"<b>Hᴇʀᴇ Is Tʜᴇ Hᴇʟᴘ Fᴏʀ Mʏ Cᴏᴍᴍᴀɴᴅs.</b>\"\"\"\r\n \r\n ABOUT_TXT = \"\"\"\r\n<b>‣ ᴍʏ ɴᴀᴍᴇ : <a href=\"https://t.me/{}\">ʙᴏᴛ</a>\r\n‣ ᴄʀᴇᴀᴛᴏʀ : <a href=\"https://t.me/KUSHALHK\">𝐊𝐔𝐒𝐇𝐀𝐋</a>\r\n‣ ʟɪʙʀᴀʀʏ : <a href=\"https://pyrogram.org/\">ᴘʏʀᴏɢʀᴀᴍ</a>\r\n‣ ʟᴀɴɢᴜᴀɢᴇ : <a href=\"https://www.python.org/\">ᴘʏᴛʜᴏɴ</a>\r\n‣ ᴅᴀᴛᴀʙᴀꜱᴇ : <a href=\"https://www.mongodb.com/\">ᴍᴏɴɢᴏ ᴅʙ</a>\r\n‣ ʜᴏꜱᴛᴇᴅ ᴏɴ : <a href=\"https://render.com/\">Render</a>\r\n‣ ʙᴜɪʟᴅ ꜱᴛᴀᴛᴜꜱ : ᴠ.𝟹.𝟶 [ꜱᴛᴀʙʟᴇ]</b>\"\"\"\r\n \r\n DISCLAIMER_TXT = \"\"\"<b>ᴛʜɪꜱ ɪꜱ ᴀɴ ᴏᴘᴇɴ ꜱᴏᴜʀᴄᴇ ᴘʀᴏᴊᴇᴄᴛ.\r\n\r\nᴀʟʟ ᴛʜᴇ ꜰɪʟᴇꜱ ɪɴ ᴛʜɪꜱ ʙᴏᴛ ᴀʀᴇ ꜰʀᴇᴇʟʏ ᴀᴠᴀɪʟᴀʙʟᴇ ᴏɴ ᴛʜᴇ ɪɴᴛᴇʀɴᴇᴛ ᴏʀ ᴘᴏꜱᴛᴇᴅ ʙʏ ꜱᴏᴍᴇʙᴏᴅʏ ᴇʟꜱᴇ. ᴊᴜꜱᴛ ꜰᴏʀ ᴇᴀꜱʏ ꜱᴇᴀʀᴄʜɪɴɢ ᴛʜɪꜱ ʙᴏᴛ ɪꜱ ɪɴᴅᴇxɪɴɢ ꜰɪʟᴇꜱ ᴡʜɪᴄʜ ᴀʀᴇ ᴀʟʀᴇᴀᴅʏ ᴜᴘʟᴏᴀᴅᴇᴅ ᴏɴ ᴛᴇʟᴇɢʀᴀᴍ. ᴡᴇ ʀᴇꜱᴘᴇᴄᴛ ᴀʟʟ ᴛʜᴇ ᴄᴏᴘʏʀɪɢʜᴛ ʟᴀᴡꜱ ᴀɴᴅ ᴡᴏʀᴋꜱ ɪɴ ᴄᴏᴍᴘʟɪᴀɴᴄᴇ ᴡɪᴛʜ ᴅᴍᴄᴀ ᴀɴᴅ ᴇᴜᴄᴅ. ɪꜰ ᴀɴʏᴛʜɪɴɢ ɪꜱ ᴀɢᴀɪɴꜱᴛ ʟᴀᴡ ᴘʟᴇᴀꜱᴇ ᴄᴏɴᴛᴀᴄᴛ ᴍᴇ ꜱᴏ ᴛʜᴀᴛ ɪᴛ ᴄᴀɴ ʙᴇ ʀᴇᴍᴏᴠᴇᴅ ᴀꜱᴀᴘ. ɪᴛ ɪꜱ ꜰᴏʀʙɪᴅᴅᴇɴ ᴛᴏ ᴅᴏᴡɴʟᴏᴀᴅ, ꜱᴛʀᴇᴀᴍ, ʀᴇᴘʀᴏᴅᴜᴄᴇ, ꜱʜᴀʀᴇ ᴏʀ ᴄᴏɴꜱᴜᴍᴇ ᴄᴏɴᴛᴇɴᴛ ᴡɪᴛʜᴏᴜᴛ ᴇxᴘʟɪᴄɪᴛ ᴘᴇʀᴍɪꜱꜱɪᴏɴ ꜰʀᴏᴍ ᴛʜᴇ ᴄᴏɴᴛᴇɴᴛ ᴡɪᴛʜᴏᴜᴛ ᴇxᴘʟɪᴄɪᴛ ᴘᴇʀᴍɪꜱꜱɪᴏɴ ꜰʀᴏᴍ ᴛʜᴇ ᴄᴏɴᴛᴇɴᴛ ᴄʀᴇᴀᴛᴏʀ ᴏʀ ʟᴇɢᴀʟ ᴄᴏᴘʏʀɪɢʜᴛ ʜᴏʟᴅᴇʀ. ɪꜰ ʏᴏᴜ ʙᴇʟɪᴇᴠᴇ ᴛʜɪꜱ ʙᴏᴛ ɪꜱ ᴠɪᴏʟᴀᴛɪɴɢ ʏᴏᴜʀ ɪɴᴛᴇʟʟᴇᴄᴛᴜᴀʟ ᴘʀᴏᴘᴇʀᴛʏ, ᴄᴏɴᴛᴀᴄᴛ ᴛʜᴇ ʀᴇꜱᴘᴇᴄᴛɪᴠᴇ ᴄʜᴀɴɴᴇʟꜱ ꜰᴏʀ ʀᴇᴍᴏᴠᴀʟ. ᴛʜᴇ ʙᴏᴛ ᴅᴏᴇꜱ ɴᴏᴛ ᴏᴡɴ ᴀɴʏ ᴏꜰ ᴛʜᴇꜱᴇ ᴄᴏɴᴛᴇɴᴛꜱ, ɪᴛ ᴏɴʟʏ ɪɴᴅᴇx ᴛʜᴇ ꜰɪʟᴇꜱ ꜰʀᴏᴍ ᴛᴇʟᴇɢʀᴀᴍ.\r\n\r\nᴍᴀɪɴᴛᴀɪɴᴇᴅ ʙʏ : <a href=\"https://t.me/KUSHALHK\">𝐊𝐔𝐒𝐇𝐀𝐋</a></b>\"\"\"\r\n\r\n SOURCE_TXT = \"\"\"\r\n<b>Hᴇʏ, Tʜɪs ɪs ᴀ Oᴘᴇɴ Sᴏᴜʀᴄᴇ Pʀᴏᴊᴇᴄᴛ.\r\n\r\nTʜɪs Bᴏᴛ ʜᴀs Lᴀᴛᴇsᴛ ᴀɴᴅ Aᴅᴠᴀɴᴄᴇᴅ Fᴇᴀᴛᴜʀᴇs⚡️\r\n\r\nFork our repository and give star ⭐- <a href='https://github.com/Kushalhk/AutoFilter'>📥 ᴄʟɪᴄᴋ ʜᴇʀᴇ 📥</a></b>\r\n\"\"\"\r\n \r\n KUSHAL_TXT = \"\"\" \r\n<b>🔥 ᴘʀᴇᴍɪᴜᴍ ғᴇᴀᴛᴜʀᴇs 🔥\r\n\r\n➻ ɴᴏ ɴᴇᴇᴅ ᴛᴏ ᴠᴇʀɪғʏ\r\n➻ ᴅɪʀᴇᴄᴛ ғɪʟᴇs\r\n➻ ᴀᴅ-ғʀᴇᴇ ᴇxᴘᴇʀɪᴇɴᴄᴇ\r\n➻ ʜɪɢʜ-sᴘᴇᴇᴅ ᴅᴏᴡɴʟᴏᴀᴅ ʟɪɴᴋ\r\n➻ ᴜɴʟɪᴍɪᴛᴇᴅ ᴍᴏᴠɪᴇs ᴀɴᴅ sᴇʀɪᴇs\r\n➻ ғᴜʟʟ ᴀᴅᴍɪɴ sᴜᴘᴘᴏʀᴛ \r\n➻ ʀᴇǫᴜᴇsᴛ ᴡɪʟʟ ʙᴇ ᴄᴏᴍᴘʟᴇᴛᴇᴅ ɪɴ 𝟷ʜ ɪғ ᴀᴠᴀɪʟᴀʙʟᴇ\r\n\r\n‼️ ᴄʟɪᴄᴋ ᴏɴ ʙᴇʟᴏᴡ ʙᴜᴛᴛᴏɴ ᴛᴏ ᴄʜᴇᴄᴋ ᴀʟʟ ᴀᴠᴀɪʟᴀʙʟᴇ ᴘʀᴇᴍɪᴜᴍ ᴘʟᴀɴs ᴀɴᴅ ɪᴛ's ᴘʀɪᴄᴇs.</b>\"\"\"\r\n\r\n \r\n SETTINGS_TXT = \"\"\"\r\nHᴇʟᴘ : <b>Sᴇᴛᴛɪɴɢꜱ</b>\r\n \r\n◈ sᴇᴛᴛɪɴɢs ɪs ᴍᴏsᴛ ɪᴍᴘᴏʀᴛᴀɴᴛ ғᴇᴀᴛᴜʀᴇ ɪɴ ᴛʜɪs ʙᴏᴛ.\r\n◈ ʏᴏᴜ ᴄᴀɴ ᴇᴀsɪʟʏ ᴄᴜsᴛᴏᴍɪᴢᴇ ᴛʜɪs ʙᴏᴛ ғᴏʀ ʏᴏᴜʀ ɢʀᴏᴜᴘ.\r\n\r\n<b>Nᴏᴛᴇ :</b>\r\n1. ᴏɴʟʏ ɢʀᴏᴜᴘ ᴀᴅᴍɪɴ ᴄᴀɴ ᴜsᴇ ᴛʜɪs ᴄᴏᴍᴍᴀɴᴅ ᴀɴᴅ ᴄʜᴀɴɢᴇ sᴇᴛᴛɪɴɢs.\r\n2. ɪᴛ ᴡᴏʀᴋs ᴏɴʟʏ ᴡʜᴇɴ ʙᴏᴛ ᴀʟʀᴇᴀᴅʏ ᴄᴏɴɴᴇᴄᴛᴇᴅ ᴛᴏ ʏᴏᴜʀ ɢʀᴏᴜᴘ.\r\n\r\n<b>Cᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ :</b>\r\n• /connect - ᴄᴏɴɴᴇᴄᴛ ʏᴏᴜʀ ɢʀᴏᴜᴘ ᴛᴏ ʙᴏᴛ\r\n• /settings - ᴄʜᴀɴɢᴇ sᴇᴛᴛɪɴɢs ᴀs ʏᴏᴜʀ ᴡɪsʜ \"\"\"\r\n\r\n TELEGRAPH_TXT = \"\"\" Hᴇʟᴘ : <b>Tᴇʟᴇɢʀᴀᴘʜ</b>\r\n\r\n<b>Nᴏᴛᴇ</b>: ᴛʜɪꜱ ᴄᴏᴍᴍᴀɴᴅ ɪꜱ ᴀᴠᴀɪʟᴀʙʟᴇ ɪɴ ɢʀᴏᴜᴘꜱ ᴀɴᴅ ᴘᴍꜱ. ᴀʟꜱᴏ ᴄᴀɴ ʙᴇ ᴜꜱᴇ ʙʏ ᴇᴠᴇʀʏᴏɴᴇ.\r\n\r\n<b>Cᴏᴍᴍᴀɴᴅs & Usᴀɢᴇ :</b>\r\n• /telegraph - sᴇɴᴅ ᴍᴇ ᴘɪᴄᴛᴜʀᴇ ᴏʀ ᴠɪᴅᴇᴏ ᴜɴᴅᴇʀ 𝟻ᴍʙ\"\"\"\r\n\r\n FONT_TXT = \"\"\"Hᴇʟᴘ : <b>Fᴏɴᴛ</b>\r\n\r\n<b>Nᴏᴛᴇ</b>: ʏᴏᴜ ᴄᴀɴ ᴜꜱᴇ ᴛʜɪꜱ ᴍᴏᴅᴇ ᴛᴏ ᴄʜᴀɴɢᴇ ʏᴏᴜʀ ꜰᴏɴᴛꜱ ꜱᴛʏʟᴇ, ᴊᴜꜱᴛ ꜱᴇɴᴅ ᴍᴇ ʟɪᴋᴇ ᴛʜɪꜱ ꜰᴏʀᴍᴀᴛ. \r\n\r\n<code>/font TG_LINKS_CHANNEL</code>\"\"\"\r\n\r\n MANUELFILTER_TXT = \"\"\"Hᴇʟᴘ : <b>Fɪʟᴛᴇʀꜱ</b>\r\n \r\n◈ ꜰɪʟᴛᴇʀ ɪꜱ ᴀ ꜰᴇᴀᴛᴜʀᴇ ᴡᴇʀᴇ ᴜꜱᴇʀꜱ ᴄᴀɴ ꜱᴇᴛ ᴀᴜᴛᴏᴍᴀᴛᴇᴅ ʀᴇᴘʟɪᴇꜱ ꜰᴏʀ ᴀ ᴘᴀʀᴛɪᴄᴜʟᴀʀ ᴋᴇʏᴡᴏʀᴅ ᴀɴᴅ ɪ ᴡɪʟʟ ʀᴇꜱᴘᴏɴᴅ ᴡʜᴇɴᴇᴠᴇʀ ᴀ ᴋᴇʏᴡᴏʀᴅ ɪꜱ ꜰᴏᴜɴᴅ ɪɴ ᴛʜᴇ ᴍᴇꜱꜱᴀɢᴇ.\r\n\r\n<b>Nᴏᴛᴇ :</b>\r\n1. ᴛʜɪꜱ ʙᴏᴛ ꜱʜᴏᴜʟᴅ ʜᴀᴠᴇ ᴀᴅᴍɪɴ ᴘʀɪᴠɪʟᴇɢᴇ.\r\n2. ᴏɴʟʏ ᴀᴅᴍɪɴꜱ ᴄᴀɴ ᴀᴅᴅ ꜰɪʟᴛᴇʀꜱ ɪɴ ᴀ ᴄʜᴀᴛ.\r\n3. ᴀʟᴇʀᴛ ʙᴜᴛᴛᴏɴꜱ ʜᴀᴠᴇ ᴀ ʟɪᴍɪᴛ ᴏꜰ 64 ᴄʜᴀʀᴀᴄᴛᴇʀꜱ.\r\n\r\n<b>Cᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ :</b>\r\n• /filter - ᴀᴅᴅ ᴀ ꜰɪʟᴛᴇʀ ɪɴ ᴀ ᴄʜᴀᴛ\r\n• /filters - ʟɪꜱᴛ ᴀʟʟ ᴛʜᴇ ꜰɪʟᴛᴇʀꜱ ᴏꜰ ᴀ ᴄʜᴀᴛ\r\n• /del - ᴅᴇʟᴇᴛᴇ ᴀ ꜱᴘᴇᴄɪꜰɪᴄ ꜰɪʟᴛᴇʀ ɪɴ ᴀ ᴄʜᴀᴛ\r\n• /delall - ᴅᴇʟᴇᴛᴇ ᴛʜᴇ ᴡʜᴏʟᴇ ꜰɪʟᴛᴇʀꜱ ɪɴ ᴀ ᴄʜᴀᴛ (ᴄʜᴀᴛ ᴏᴡɴᴇʀ ᴏɴʟʏ)\"\"\"\r\n\r\n BUTTON_TXT = \"\"\"Hᴇʟᴘ : <b>Bᴜᴛᴛᴏɴꜱ</b>\r\n \r\n◈ ᴛʜɪꜱ ʙᴏᴛ ꜱᴜᴘᴘᴏʀᴛꜱ ʙᴏᴛʜ ᴜʀʟ ᴀɴᴅ ᴀʟᴇʀᴛ ɪɴʟɪɴᴇ ʙᴜᴛᴛᴏɴꜱ.\r\n\r\n<b>Nᴏᴛᴇ :</b>\r\n𝟷. ᴛᴇʟᴇɢʀᴀᴍ ᴡɪʟʟ ɴᴏᴛ ᴀʟʟᴏᴡꜱ ʏᴏᴜ ᴛᴏ ꜱᴇɴᴅ ʙᴜᴛᴛᴏɴꜱ ᴡɪᴛʜᴏᴜᴛ ᴀɴʏ ᴄᴏɴᴛᴇɴᴛ, ꜱᴏ ᴄᴏɴᴛᴇɴᴛ ɪꜱ ᴍᴀɴᴅᴀᴛᴏʀʏ.\r\n𝟸. ᴛʜɪꜱ ʙᴏᴛ ꜱᴜᴘᴘᴏʀᴛꜱ ʙᴜᴛᴛᴏɴꜱ ᴡɪᴛʜ ᴀɴʏ ᴛᴇʟᴇɢʀᴀᴍ ᴍᴇᴅɪᴀ ᴛʏᴘᴇ.\r\n𝟹. ʙᴜᴛᴛᴏɴꜱ ꜱʜᴏᴜʟᴅ ʙᴇ ᴘʀᴏᴘᴇʀʟʏ ᴘᴀʀꜱᴇᴅ ᴀꜱ ᴍᴀʀᴋᴅᴏᴡɴ ꜰᴏʀᴍᴀᴛ\r\n\r\nᴜʀʟ ʙᴜᴛᴛᴏɴꜱ :\r\n<code>[Button Text](buttonurl:https://t.me/TG_LINKS_CHANNEL)</code>\r\n\r\nᴀʟᴇʀᴛ ʙᴜᴛᴛᴏɴꜱ :\r\n<code>[Button Text](buttonalert:ᴛʜɪꜱ ɪꜱ ᴀɴ ᴀʟᴇʀᴛ ᴍᴇꜱꜱᴀɢᴇ)</code>\"\"\"\r\n\r\n AUTOFILTER_TXT = \"\"\"Hᴇʟᴘ : <b>Aᴜᴛᴏ Fɪʟᴛᴇʀ</b>\r\n    \r\n<b>Nᴏᴛᴇ :</b> Fɪʟᴇ Iɴᴅᴇx\r\n𝟷. ᴍᴀᴋᴇ ᴍᴇ ᴛʜᴇ ᴀᴅᴍɪɴ ᴏꜰ ʏᴏᴜʀ ᴄʜᴀɴɴᴇʟ ɪꜰ ɪᴛ'ꜱ ᴘʀɪᴠᴀᴛᴇ.\r\n𝟸. ᴍᴀᴋᴇ ꜱᴜʀᴇ ᴛʜᴀᴛ ʏᴏᴜʀ ᴄʜᴀɴɴᴇʟ ᴅᴏᴇꜱ ɴᴏᴛ ᴄᴏɴᴛᴀɪɴꜱ ᴄᴀᴍʀɪᴘꜱ, ᴘᴏʀɴ ᴀɴᴅ ꜰᴀᴋᴇ ꜰɪʟᴇꜱ.\r\n𝟹. ꜰᴏʀᴡᴀʀᴅ ᴛʜᴇ ʟᴀꜱᴛ ᴍᴇꜱꜱᴀɢᴇ ᴛᴏ ᴍᴇ ᴡɪᴛʜ ǫᴜᴏᴛᴇꜱ. ɪ'ʟʟ ᴀᴅᴅ ᴀʟʟ ᴛʜᴇ ꜰɪʟᴇꜱ ɪɴ ᴛʜᴀᴛ ᴄʜᴀɴɴᴇʟ ᴛᴏ ᴍʏ ᴅʙ.\r\n\r\n<b>Nᴏᴛᴇ :</b> Aᴜᴛᴏ Fɪʟᴛᴇʀ\r\n𝟷. Aᴅᴅ ᴛʜᴇ ʙᴏᴛ ᴀs ᴀᴅᴍɪɴ ᴏɴ ʏᴏᴜʀ ɢʀᴏᴜᴘ.\r\n𝟸. Usᴇ /connect ᴀɴᴅ ᴄᴏɴɴᴇᴄᴛ ʏᴏᴜʀ ɢʀᴏᴜᴘ ᴛᴏ ᴛʜᴇ ʙᴏᴛ.\r\n𝟹. Usᴇ /settings ᴏɴ ʙᴏᴛ's ᴘᴍ ᴀɴᴅ ᴛᴜʀɴ ᴏɴ AᴜᴛᴏFɪʟᴛᴇʀ ᴏɴ ᴛʜᴇ sᴇᴛᴛɪɴɢs ᴍᴇɴᴜ.\"\"\"\r\n\r\n \r\n RULE_TXT = \"\"\"♦ 𝗚𝗿𝗼𝘂𝗽 𝗥𝘂𝗹𝗲𝘀 ♦\r\n\r\n◈ <b>Sᴇᴀʀᴄʜ Mᴏᴠɪᴇ Wɪᴛʜ Cᴏʀʀᴇᴄᴛ Sᴘᴇʟʟɪɴɢ:</b>\r\n• ᴀᴠᴀᴛᴀʀ 𝟸𝟶𝟶𝟿 ✅\r\n• ᴀᴠᴀᴛᴀʀ ʜɪɴᴅɪ ✅\r\n• ᴀᴠᴀᴛᴀʀ ᴍᴏᴠɪᴇ ❌\r\n• ᴀᴠᴀᴛᴀʀ ʜɪɴᴅɪ ᴅᴜʙʙᴇᴅ..❌\r\n\r\n◈ <b>Sᴇᴀʀᴄʜ Wᴇʙ Sᴇʀɪᴇs Iɴ ᴛʜɪs Fᴏʀᴍᴀᴛ:</b>\r\n• ᴠɪᴋɪɴɢs S𝟶𝟷 ✅\r\n• ᴠɪᴋɪɴɢs S𝟶𝟷E𝟶𝟷 ✅\r\n• ᴠɪᴋɪɴɢs S𝟶𝟷 ʜɪɴᴅɪ ✅\r\n• ᴠɪᴋɪɴɢs S𝟶𝟷 ʜɪɴᴅɪ ᴅᴜʙʙ... ❌\r\n• ᴠɪᴋɪɴɢs sᴇᴀsᴏɴ 𝟷 ❌\r\n• ᴠɪᴋɪɴɢs ᴡᴇʙ sᴇʀɪᴇs ❌\r\n\r\n<b>➙ ᴅᴏɴ'ᴛ ᴅᴏ ᴀɴʏ ꜱᴇʟꜰ ᴘʀᴏᴍᴏᴛɪᴏɴ. \r\n➙ ᴅᴏɴ'ᴛ ꜱᴇɴᴅ ᴀɴʏ ᴋɪɴᴅ ᴏꜰ ᴘʜᴏᴛᴏ, ᴠɪᴅᴇᴏ, ᴅᴏᴄᴜᴍᴇɴᴛꜱ, ᴜʀʟ, ᴇᴛᴄ...\r\n➙ ᴅᴏɴ'ᴛ ʀᴇǫᴜᴇꜱᴛ ᴀɴʏ ᴛʜɪɴɢꜱ ᴏᴛʜᴇʀ ᴛʜᴀɴ ᴍᴏᴠɪᴇꜱ, ꜱᴇʀɪᴇꜱ, ᴀɴɪᴍᴀᴛɪᴏɴ, ᴄᴀʀᴛᴏᴏɴ, ᴀɴɪᴍᴇ, ᴋ-ᴅʀᴀᴍᴀ ᴍᴀɴʏ ᴍᴏʀᴇ.</b>\r\n\r\n🔰 <b>Nᴏᴛᴇ :</b> ᴀʟʟ ᴍᴇꜱꜱᴀɢᴇꜱ ᴡɪʟʟ ʙᴇ ᴀᴜᴛᴏ-ᴅᴇʟᴇᴛᴇᴅ ᴀꜰᴛᴇʀ 𝟷𝟶 ᴍɪɴᴜᴛᴇꜱ ᴛᴏ ᴀᴠᴏɪᴅ ᴄᴏᴘʏʀɪɢʜᴛ ɪꜱꜱᴜᴇꜱ.\"\"\"\r\n\r\n CONNECTION_TXT = \"\"\"Hᴇʟᴘ : <b>Cᴏɴɴᴇᴄᴛɪᴏɴꜱ</b>\r\n \r\n◈ ᴜꜱᴇᴅ ᴛᴏ ᴄᴏɴɴᴇᴄᴛ ʙᴏᴛ ᴛᴏ ᴘᴍ ꜰᴏʀ ᴍᴀɴᴀɢɪɴɢ ꜰɪʟᴛᴇʀꜱ \r\n◈ ɪᴛ ʜᴇʟᴘꜱ ᴛᴏ ᴀᴠᴏɪᴅ ꜱᴘᴀᴍᴍɪɴɢ ɪɴ ɢʀᴏᴜᴘꜱ.\r\n\r\n<b>Nᴏᴛᴇ :</b>\r\n1. ᴏɴʟʏ ᴀᴅᴍɪɴꜱ ᴄᴀɴ ᴀᴅᴅ ᴀ ᴄᴏɴɴᴇᴄᴛɪᴏɴ.\r\n2. ꜱᴇɴᴅ /ᴄᴏɴɴᴇᴄᴛ ꜰᴏʀ ᴄᴏɴɴᴇᴄᴛɪɴɢ ᴍᴇ ᴛᴏ ʏᴏᴜʀ ᴘᴍ\r\n\r\n<b>Cᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ :</b>\r\n• /connect - ᴄᴏɴɴᴇᴄᴛ ᴀ ᴘᴀʀᴛɪᴄᴜʟᴀʀ ᴄʜᴀᴛ ᴛᴏ ʏᴏᴜʀ ᴘᴍ\r\n• /disconnect - ᴅɪꜱᴄᴏɴɴᴇᴄᴛ ꜰʀᴏᴍ ᴀ ᴄʜᴀᴛ\r\n• /connections - ʟɪꜱᴛ ᴀʟʟ ʏᴏᴜʀ ᴄᴏɴɴᴇᴄᴛɪᴏɴꜱ\"\"\"\r\n\r\n EXTRAMOD_TXT = \"\"\"Hᴇʟᴘ : <b>Exᴛʀᴀ Mᴏᴅᴜʟᴇs</b>\r\n \r\n<b>Nᴏᴛᴇ :</b>\r\nᴛʜᴇꜱᴇ ᴀʀᴇ ᴛʜᴇ ᴇxᴛʀᴀ ꜰᴇᴀᴛᴜʀᴇꜱ ᴏꜰ ᴛʜɪꜱ ʙᴏᴛ\r\n\r\n<b>Cᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ :</b>\r\n• /id - ɢᴇᴛ ɪᴅ ᴏꜰ ᴀ ꜱᴘᴇᴄɪꜰɪᴇᴅ ᴜꜱᴇʀ.\r\n• /info - ɢᴇᴛ ɪɴꜰᴏʀᴍᴀᴛɪᴏɴ ᴀʙᴏᴜᴛ ᴀ ᴜꜱᴇʀ.\r\n• /imdb - ɢᴇᴛ ᴛʜᴇ ꜰɪʟᴍ ɪɴꜰᴏʀᴍᴀᴛɪᴏɴ ꜰʀᴏᴍ ɪᴍᴅʙ ꜱᴏᴜʀᴄᴇ.\r\n• /search - ɢᴇᴛ ᴛʜᴇ ꜰɪʟᴍ ɪɴꜰᴏʀᴍᴀᴛɪᴏɴ ꜰʀᴏᴍ ᴠᴀʀɪᴏᴜꜱ ꜱᴏᴜʀᴄᴇꜱ.\"\"\"\r\n\r\n ADMIN_TXT = \"\"\"<b>Nᴏᴛᴇ :</b> Tʜɪs Mᴏᴅᴜʟᴇ Oɴʟʏ Wᴏʀᴋs Fᴏʀ Mʏ Aᴅᴍɪɴs.\r\n\r\n<b>Cᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ :</b>\r\n• /logs - ᴛᴏ ɢᴇᴛ ᴛʜᴇ ʀᴇᴄᴇɴᴛ ᴇʀʀᴏʀꜱ\r\n• /stats - ᴛᴏ ɢᴇᴛ ꜱᴛᴀᴛᴜꜱ ᴏꜰ ꜰɪʟᴇꜱ ɪɴ ᴅʙ. <b>[Tʜɪs Cᴏᴍᴍᴀɴᴅ Cᴀɴ Bᴇ Usᴇᴅ Bʏ Aɴʏᴏɴᴇ]</b>\r\n• /delete - ᴛᴏ ᴅᴇʟᴇᴛᴇ ᴀ ꜱᴘᴇᴄɪꜰɪᴄ ꜰɪʟᴇ ꜰʀᴏᴍ ᴅʙ.\r\n• /users - ᴛᴏ ɢᴇᴛ ʟɪꜱᴛ ᴏꜰ ᴍʏ ᴜꜱᴇʀꜱ ᴀɴᴅ ɪᴅꜱ.\r\n• /chats - ᴛᴏ ɢᴇᴛ ʟɪꜱᴛ ᴏꜰ ᴍʏ ᴄʜᴀᴛꜱ ᴀɴᴅ ɪᴅꜱ\r\n• /leave - ᴛᴏ ʟᴇᴀᴠᴇ ꜰʀᴏᴍ ᴀ ᴄʜᴀᴛ.\r\n• /disable - ᴛᴏ ᴅɪꜱᴀʙʟᴇ ᴀ ᴄʜᴀᴛ.\r\n• /ban - ᴛᴏ ʙᴀɴ ᴀ ᴜꜱᴇʀ.\r\n• /unban - ᴛᴏ ᴜɴʙᴀɴ ᴀ ᴜꜱᴇʀ.\r\n• /channel - ᴛᴏ ɢᴇᴛ ʟɪꜱᴛ ᴏꜰ ᴛᴏᴛᴀʟ ᴄᴏɴɴᴇᴄᴛᴇᴅ ᴄʜᴀɴɴᴇʟꜱ. \r\n• /broadcast - ᴛᴏ ʙʀᴏᴀᴅᴄᴀꜱᴛ ᴀ ᴍᴇꜱꜱᴀɢᴇ ᴛᴏ ᴀʟʟ ᴜꜱᴇʀꜱ. \r\n• /grp_broadcast - Tᴏ ʙʀᴏᴀᴅᴄᴀsᴛ ᴀ ᴍᴇssᴀɢᴇ ᴛᴏ ᴀʟʟ ᴄᴏɴɴᴇᴄᴛᴇᴅ ɢʀᴏᴜᴘs.\r\n• /gfilter - ᴛᴏ ᴀᴅᴅ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀs. \r\n• /gfilters - ᴛᴏ ᴠɪᴇᴡ ʟɪsᴛ ᴏғ ᴀʟʟ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀs. \r\n• /delg - ᴛᴏ ᴅᴇʟᴇᴛᴇ ᴀ sᴘᴇᴄɪғɪᴄ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀ. \r\n• /request - ᴛᴏ sᴇɴᴅ ᴀ ᴍᴏᴠɪᴇ/sᴇʀɪᴇs ʀᴇᴏ̨ᴜᴇsᴛ ᴛᴏ ʙᴏᴛ ᴀᴅᴍɪɴs. ᴏɴʟʏ ᴡᴏʀᴋs ᴏɴ sᴜᴘᴘᴏʀᴛ ɢʀᴏᴜᴘ. <b>[Tʜɪs Cᴏᴍᴍᴀɴᴅ Cᴀɴ Bᴇ Usᴇᴅ Bʏ Aɴʏᴏɴᴇ]</b>\r\n• /delallg - ᴛᴏ ᴅᴇʟᴇᴛᴇ ᴀʟʟ ɢғɪʟᴛᴇʀs ғʀᴏᴍ ᴛʜᴇ ʙᴏᴛ's ᴅᴀᴛᴀʙᴀsᴇ.\r\n• /deletefiles - ᴛᴏ ᴅᴇʟᴇᴛᴇ ᴄᴀᴍʀɪᴘ ᴀɴᴅ ᴘʀᴇ-ᴅᴠᴅ ғɪʟᴇs ғʀᴏᴍ ᴛʜᴇ ʙᴏᴛ's ᴅᴀᴛᴀʙᴀsᴇ.\"\"\"\r\n\r\n STICKER_TXT = \"\"\"<b>yᴏᴜ ᴄᴀɴ ᴜꜱᴇ ᴛʜɪꜱ ᴍᴏᴅᴜʟᴇ ᴛᴏ ꜰɪɴᴅᴀɴy ꜱᴛɪᴄᴋᴇʀꜱ ɪᴅ.\r\n• ᴜꜱᴀɢᴇ :ᴛᴏ ɢᴇᴛ ꜱᴛɪᴄᴋᴇʀ\r\n \r\n⭕ ʜᴏᴡ ᴛᴏ ᴜꜱᴇ\r\n◉ Reply To Any Sticker [/stickerid]\r\n\r\n/𝐬𝐭𝐢𝐜𝐤𝐞𝐫𝐢𝐝 𝐬𝐭𝐢𝐜𝐤𝐞𝐫 𝐢𝐝\r\n\r\n</b>\"\"\"\r\n \r\n STATUS_TXT = \"\"\"<b>⍟─────[ <b>Bᴏᴛ Sᴛᴀᴛᴜs</b> ]─────⍟\r\n    \r\n★ ᴛᴏᴛᴀʟ ꜰɪʟᴇꜱ : <code>{}</code>\r\n★ ᴛᴏᴛᴀʟ ᴜꜱᴇʀꜱ : <code>{}</code>\r\n★ ᴛᴏᴛᴀʟ ɢʀᴏᴜᴘꜱ : <code>{}</code>\r\n★ ᴜꜱᴇᴅ ꜱᴛᴏʀᴀɢᴇ: <code>{}</code>\r\n★ ꜰʀᴇᴇ ꜱᴛᴏʀᴀɢᴇ : <code>{}</code>\r\n\r\n•❅──────✧❅✦❅✧──────❅•</b>\"\"\"\r\n\r\n\r\n LOG_TEXT_G = \"\"\"<b>#NewGroup\r\nGʀᴏᴜᴘ = {}(<code>{}</code>)\r\nTᴏᴛᴀʟ Mᴇᴍʙᴇʀs = <code>{}</code>\r\nAᴅᴅᴇᴅ Bʏ - {}</b>\"\"\"\r\n\r\n LOG_TEXT_P = \"\"\"<b>#NewUser\r\nID - <code>{}</code>\r\nNᴀᴍᴇ - {}</b>\"\"\"\r\n\r\n ALRT_TXT = \"\"\"<b>ʜᴇʟʟᴏ {},\r\nᴛʜɪꜱ ɪꜱ ɴᴏᴛ ʏᴏᴜʀ ᴍᴏᴠɪᴇ ʀᴇQᴜᴇꜱᴛ,\r\nʀᴇǫᴜᴇꜱᴛ ʏᴏᴜʀ'ꜱ...</b>\"\"\"\r\n\r\n OLD_ALRT_TXT = \"\"\"<b>ʜᴇʏ {},\r\nʏᴏᴜ ᴀʀᴇ ᴜꜱɪɴɢ ᴏɴᴇ ᴏꜰ ᴍʏ ᴏʟᴅ ᴍᴇꜱꜱᴀɢᴇꜱ, \r\nᴘʟᴇᴀꜱᴇ ꜱᴇɴᴅ ᴛʜᴇ ʀᴇǫᴜᴇꜱᴛ ᴀɢᴀɪɴ.</b>\"\"\"\r\n\r\n CUDNT_FND = \"\"\"<b>ɪ ᴄᴏᴜʟᴅɴ'ᴛ ꜰɪɴᴅ ᴀɴʏᴛʜɪɴɢ ʀᴇʟᴀᴛᴇᴅ ᴛᴏ {}\r\nᴅɪᴅ ʏᴏᴜ ᴍᴇᴀɴ ᴀɴʏ ᴏɴᴇ ᴏꜰ ᴛʜᴇꜱᴇ?</b>\"\"\"\r\n\r\n I_CUDNT = \"\"\"<b>sᴏʀʀʏ ɴᴏ ꜰɪʟᴇs ᴡᴇʀᴇ ꜰᴏᴜɴᴅ ꜰᴏʀ ʏᴏᴜʀ ʀᴇǫᴜᴇꜱᴛ {} 😕\r\n\r\nMᴏᴠɪᴇs Nᴏᴛ Aᴠᴀɪʟᴀʙʟᴇ Rᴇᴀsᴏɴ:\r\n𝟷. ᴏ.ᴛ.ᴛ ᴏʀ ᴅᴠᴅ ɴᴏᴛ ʀᴇʟᴇᴀsᴇᴅ\r\n𝟸. ᴛʏᴘᴇ ɴᴀᴍᴇ ᴡɪᴛʜ ʏᴇᴀʀ\r\n𝟹. ᴍᴏᴠɪᴇ ɪs ɴᴏᴛ ᴀᴠᴀɪʟᴀʙʟᴇ ɪɴ ᴛʜᴇ ᴅᴀᴛᴀʙᴀsᴇ ʀᴇᴘᴏʀᴛ ᴛᴏ ᴀᴅᴍɪɴs @TG_Bots_Supporter</b>\"\"\"\r\n\r\n I_CUD_NT = \"\"\"<b>ɪ ᴄᴏᴜʟᴅɴ'ᴛ ꜰɪɴᴅ ᴀɴʏ ᴍᴏᴠɪᴇ ʀᴇʟᴀᴛᴇᴅ ᴛᴏ {}.\r\nᴘʟᴇᴀꜱᴇ ᴄʜᴇᴄᴋ ᴛʜᴇ ꜱᴘᴇʟʟɪɴɢ ᴏɴ ɢᴏᴏɢʟᴇ ᴏʀ ɪᴍᴅʙ...</b>\"\"\"\r\n\r\n MVE_NT_FND = \"\"\"<b>ᴍᴏᴠɪᴇ ɴᴏᴛ ꜰᴏᴜɴᴅ ɪɴ ᴅᴀᴛᴀʙᴀꜱᴇ...</b>\"\"\"\r\n\r\n TOP_ALRT_MSG = \"\"\"<b>Cʜᴇᴄᴋɪɴɢ Fᴏʀ Mᴏᴠɪᴇ Iɴ Dᴀᴛᴀʙᴀsᴇ...</b>\"\"\"\r\n\r\n MELCOW_ENG = \"\"\"<b>Hᴇʟʟᴏ {} 😍, Aɴᴅ Wᴇʟᴄᴏᴍᴇ Tᴏ {} Gʀᴏᴜᴘ ❤️\r\n\r\n➻ ʜᴇʀᴇ ʏᴏᴜ ᴄᴀɴ ꜱᴇᴀʀᴄʜ ʏᴏᴜʀ ꜰᴀᴠᴏᴜʀɪᴛᴇ ᴍᴏᴠɪᴇꜱ ᴏʀ ꜱᴇʀɪᴇꜱ ʙʏ ᴊᴜꜱᴛ ᴛʏᴘɪɴɢ ɪᴛ'ꜱ ɴᴀᴍᴇ. \r\n\r\n⚠️ ɪꜰ ʏᴏᴜ ᴀʀᴇ ʜᴀᴠɪɴɢ ᴀɴʏ ᴘʀᴏʙʟᴇᴍ ʀᴇɢᴀʀᴅɪɴɢ ᴅᴏᴡɴʟᴏᴀᴅɪɴɢ ᴏʀ ꜱᴏᴍᴇᴛʜɪɴɢ ᴇʟꜱᴇ ᴛʜᴇɴ ᴍᴇꜱꜱᴀɢᴇ ʜᴇʀᴇ 👇</b>\"\"\"\r\n \r\n REQINFO = \"\"\"\r\n⚠ ɪɴꜰᴏʀᴍᴀᴛɪᴏɴ ⚠\r\n\r\nᴀꜰᴛᴇʀ 5 ᴍɪɴᴜᴛᴇꜱ ᴛʜɪꜱ ᴍᴇꜱꜱᴀɢᴇ ᴡɪʟʟ ʙᴇ ᴀᴜᴛᴏᴍᴀᴛɪᴄᴀʟʟʏ ᴅᴇʟᴇᴛᴇᴅ\r\n\r\nɪꜰ ʏᴏᴜ ᴅᴏ ɴᴏᴛ ꜱᴇᴇ ᴛʜᴇ ʀᴇǫᴜᴇsᴛᴇᴅ ᴍᴏᴠɪᴇ / sᴇʀɪᴇs ꜰɪʟᴇ, ʟᴏᴏᴋ ᴀᴛ ᴛʜᴇ ɴᴇxᴛ ᴘᴀɢᴇ\"\"\"\r\n\r\n \r\n\r\n SINFO = \"\"\"\r\n⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯\r\nꜱᴇʀɪᴇꜱ ʀᴇǫᴜᴇꜱᴛ ꜰᴏʀᴍᴀᴛ\r\n⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯\r\n\r\nɢᴏ ᴛᴏ ɢᴏᴏɢʟᴇ ➠ ᴛʏᴘᴇ ꜱᴇʀɪᴇꜱ ɴᴀᴍᴇ ➠ ᴄᴏᴘʏ ᴄᴏʀʀᴇᴄᴛ ɴᴀᴍᴇ ➠ ᴘᴀꜱᴛᴇ ᴛʜɪꜱ ɢʀᴏᴜᴘ\r\n\r\nᴇxᴀᴍᴘʟᴇ : Loki S01E01\r\n\r\n🚯 ᴅᴏɴᴛ ᴜꜱᴇ ➠ ':(!,./)\"\"\"\r\n\r\n NORSLTS = \"\"\"\r\n★ #𝗡𝗼𝗥𝗲𝘀𝘂𝗹𝘁𝘀 ★\r\n\r\n𝗜𝗗 <b>: {}</b>\r\n\r\n𝗡𝗮𝗺𝗲 <b>: {}</b>\r\n\r\n𝗠𝗲𝘀𝘀𝗮𝗴𝗲 <b>: {}</b>🥲\"\"\"\r\n\r\n CAPTION = \"\"\" \r\n🗂 𝗙𝗶𝗹𝗲: <b><font class=smcp>{file_name}</font></b>\r\n📀 𝗦𝗶𝘇𝗲: <b><font class=smcp>{file_size}</font></b>\r\n\r\n<b>🔰 Cʀᴇᴀᴛᴏʀ : <a href=\"https://t.me/KUSHALHK\">𝐊𝐔𝐒𝐇𝐀𝐋</a>\r\n🔰 Cʜᴀɴɴᴇʟ : <a href=\"https://t.me/TG_LINKS_CHANNEL\">𝐌𝐎𝐕𝐈𝐄𝐒 𝐂𝐇𝐀𝐍𝐍𝐄𝐋</a>\r\n🔰 Gʀᴏᴜᴘ : <a href=\"https://t.me/movies_hub_official1\">𝐌𝐎𝐕𝐈𝐄 𝐑𝐄𝐐𝐔𝐄𝐒𝐓 𝐆𝐑𝐎𝐔𝐏</a></b>\"\"\"\r\n \r\n IMDB_TEMPLATE_TXT = \"\"\"\r\n<b>Query: {query}\r\nIMDb Data:\r\n\r\n🧿 𝐓𝐈𝐓𝐋𝐄: <a href={url}>{title}</a>\r\n🎭 𝐆𝐄𝐍𝐑𝐄𝐒: {genres}\r\n📆 𝐘𝐄𝐀𝐑: <a href={url}/releaseinfo>{year}</a>\r\n🌟 𝐑𝐀𝐓𝐈𝐍𝐆: <a href={url}/ratings>{rating}</a> / 10 (Based on {votes} user ratings)</b>\r\n☀️ 𝐋𝐀𝐍𝐆𝐔𝐀𝐆𝐄 : <code>{languages}</code></a>\r\n📀 𝐑𝐔𝐍𝐓𝐈𝐌𝐄: {runtime} Minutes</a>\r\n\r\n<b>👨‍💼 Requested by : {message.from_user.mention}</b>\"\"\"\r\n\r\n \r\n ALL_FILTERS = \"\"\"\r\n<b>Hᴇʏ {}, Tʜᴇsᴇ ᴀʀᴇ ᴍʏ ᴛʜʀᴇᴇ ᴛʏᴘᴇs ᴏғ ғɪʟᴛᴇʀs.</b>\"\"\"\r\n \r\n GFILTER_TXT = \"\"\"Hᴇʟᴘ : <b>Gʟᴏʙᴀʟ Fɪʟᴛᴇʀs</b>\r\n \r\n◈ Gʟᴏʙᴀʟ Fɪʟᴛᴇʀs ᴀʀᴇ ᴛʜᴇ ғɪʟᴛᴇʀs sᴇᴛ ʙʏ ʙᴏᴛ ᴀᴅᴍɪɴs ᴡʜɪᴄʜ ᴡɪʟʟ ᴡᴏʀᴋ ᴏɴ ᴀʟʟ ɢʀᴏᴜᴘs.\r\n \r\n<b>Cᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ :</b>\r\n• /gfilter - Tᴏ ᴄʀᴇᴀᴛᴇ ᴀ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀ.\r\n• /gfilters - Tᴏ ᴠɪᴇᴡ ᴀʟʟ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀs.\r\n• /delg - Tᴏ ᴅᴇʟᴇᴛᴇ ᴀ ᴘᴀʀᴛɪᴄᴜʟᴀʀ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀ.\r\n• /delallg - ᴛᴏ ᴅᴇʟᴇᴛᴇ ᴀʟʟ ɢʟᴏʙᴀʟ ꜰɪʟᴛᴇʀꜱ.\"\"\"\r\n \r\n FILE_STORE_TXT = \"\"\"Hᴇʟᴘ : <b>Fɪʟᴇ Sᴛᴏʀᴇ</b>\r\n \r\n◈ Fɪʟᴇ sᴛᴏʀᴇ ɪs ᴛʜᴇ ғᴇᴀᴛᴜʀᴇ ᴡʜɪᴄʜ ᴡɪʟʟ ᴄʀᴇᴀᴛᴇ ᴀ sʜᴀʀᴇᴀʙʟᴇ ʟɪɴᴋ ᴏғ ᴀ sɪɴɢʟᴇ ᴏʀ ᴍᴜʟᴛɪᴘʟᴇ ғɪʟᴇs.\r\n\r\n<b>Cᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ :</b>\r\n• /batch - ᴛᴏ ᴄʀᴇᴀᴛᴇ ᴀ ʙᴀᴛᴄʜ ʟɪɴᴋ ᴏғ ᴍᴜʟᴛɪᴘʟᴇ ғɪʟᴇs.\r\n• /link - ᴛᴏ ᴄʀᴇᴀᴛᴇ ᴀ sɪɴɢʟᴇ ғɪʟᴇ sᴛᴏʀᴇ ʟɪɴᴋ.\r\n• /pbatch - ᴊᴜsᴛ ʟɪᴋᴇ <code>/batch</code>, ʙᴜᴛ ᴛʜᴇ ғɪʟᴇs ᴡɪʟʟ ʙᴇ sᴇɴᴅ ᴡɪᴛʜ ғᴏʀᴡᴀʀᴅ ʀᴇsᴛʀɪᴄᴛɪᴏɴs.\r\n• /plink - ᴊᴜsᴛ ʟɪᴋᴇ <code>/link</code>, ʙᴜᴛ ᴛʜᴇ ғɪʟᴇ ᴡɪʟʟ ʙᴇ sᴇɴᴅ ᴡɪᴛʜ ғᴏʀᴡᴀʀᴅ ʀᴇsᴛʀɪᴄᴛɪᴏɴ.\"\"\"\r\n\r\n CHECK_TXT = \"\"\"\r\n<b>🔥 ᴄʜᴏᴏsᴇ ʏᴏᴜʀ sᴜɪᴛᴀʙʟᴇ ᴘʟᴀɴ ᴀɴᴅ ᴘᴀʏ ʏᴏᴜʀ ᴘʀᴇᴍɪᴜᴍ ғᴇᴇs ᴜsɪɴɢ ᴀɴʏ ᴜᴘɪ ᴀᴘᴘ. \r\n\r\nᴘʟᴀɴ ᴀ : 𝟷 ᴡᴇᴇᴋ / ₹𝟷𝟻\r\nᴘʟᴀɴ ʙ : 𝟷 ᴍᴏɴᴛʜ / ₹𝟹𝟿\r\nᴘʟᴀɴ ᴄ : 𝟷 ʏᴇᴀʀ / ₹𝟹𝟼𝟶\r\n\r\n➻ ᴜᴘɪ ɪᴅ : harikushal234@paytm\r\n\r\n‼️ ᴍᴜsᴛ sᴇɴᴅ sᴄʀᴇᴇɴsʜᴏᴛ ᴀғᴛᴇʀ ᴘᴀʏᴍᴇɴᴛ ᴀɴᴅ ɢɪᴠᴇ ᴍᴇ sᴏᴍᴇ ᴛɪᴍᴇ ᴛᴏ ᴀᴅᴅ ʏᴏᴜ ɪɴ ᴛʜᴇ ᴘʀᴇᴍɪᴜᴍ ʟɪsᴛ.</b>\"\"\"\r\n\r\n PLAN1_TXT = \"\"\"\r\n<b>🔥 ᴘᴀʏ ʏᴏᴜʀ ᴘʀᴇᴍɪᴜᴍ ᴘʟᴀɴ ғᴇᴇs ₹𝟷𝟻 ғᴏʀ 𝟷 ᴡᴇᴇᴋ ᴘʀᴇᴍɪᴜᴍ ᴀᴄᴄᴇss ᴡɪᴛʜ ᴀᴅ-ғʀᴇᴇ ᴇxᴘᴇʀɪᴇɴᴄᴇ ᴀɴᴅ ᴍᴀɴʏ ᴍᴏʀᴇ. \r\n\r\n➻ ᴜᴘɪ ɪᴅ : harikushal234@paytm\r\n\r\n‼️ ᴍᴜsᴛ sᴇɴᴅ sᴄʀᴇᴇɴsʜᴏᴛ ᴀғᴛᴇʀ ᴘᴀʏᴍᴇɴᴛ ᴀɴᴅ ɢɪᴠᴇ ᴍᴇ sᴏᴍᴇ ᴛɪᴍᴇ ᴛᴏ ᴀᴅᴅ ʏᴏᴜ ɪɴ ᴛʜᴇ ᴘʀᴇᴍɪᴜᴍ ʟɪsᴛ.</b>\"\"\"\r\n\r\n PLAN2_TXT = \"\"\"\r\n<b>🔥 ᴘᴀʏ ʏᴏᴜʀ ᴘʀᴇᴍɪᴜᴍ ᴘʟᴀɴ ғᴇᴇs ₹𝟹𝟿 ғᴏʀ 𝟷 ᴍᴏɴᴛʜ ᴘʀᴇᴍɪᴜᴍ ᴀᴄᴄᴇss ᴡɪᴛʜ ᴀᴅ-ғʀᴇᴇ ᴇxᴘᴇʀɪᴇɴᴄᴇ ᴀɴᴅ ᴍᴀɴʏ ᴍᴏʀᴇ. \r\n\r\n➻ ᴜᴘɪ ɪᴅ : harikushal234@paytm\r\n\r\n‼️ ᴍᴜsᴛ sᴇɴᴅ sᴄʀᴇᴇɴsʜᴏᴛ ᴀғᴛᴇʀ ᴘᴀʏᴍᴇɴᴛ ᴀɴᴅ ɢɪᴠᴇ ᴍᴇ sᴏᴍᴇ ᴛɪᴍᴇ ᴛᴏ ᴀᴅᴅ ʏᴏᴜ ɪɴ ᴛʜᴇ ᴘʀᴇᴍɪᴜᴍ ʟɪsᴛ.</b>\"\"\"\r\n\r\n PLAN3_TXT = \"\"\"\r\n<b>🔥 ᴘᴀʏ ʏᴏᴜʀ ᴘʀᴇᴍɪᴜᴍ ᴘʟᴀɴ ғᴇᴇs ₹𝟹𝟼𝟶 ғᴏʀ 𝟷 ʏᴇᴀʀ ᴘʀᴇᴍɪᴜᴍ ᴀᴄᴄᴇss ᴡɪᴛʜ ᴀᴅ-ғʀᴇᴇ ᴇxᴘᴇʀɪᴇɴᴄᴇ ᴀɴᴅ ᴍᴀɴʏ ᴍᴏʀᴇ. \r\n\r\n➻ ᴜᴘɪ ɪᴅ : harikushal234@paytm\r\n\r\n‼️ ᴍᴜsᴛ sᴇɴᴅ sᴄʀᴇᴇɴsʜᴏᴛ ᴀғᴛᴇʀ ᴘᴀʏᴍᴇɴᴛ ᴀɴᴅ ɢɪᴠᴇ ᴍᴇ sᴏᴍᴇ ᴛɪᴍᴇ ᴛᴏ ᴀᴅᴅ ʏᴏᴜ ɪɴ ᴛʜᴇ ᴘʀᴇᴍɪᴜᴍ ʟɪsᴛ.</b>\"\"\"\r\n\r\n RESTART_TXT = \"\"\"\r\n<b>Bᴏᴛ Rᴇsᴛᴀʀᴛᴇᴅ !\r\n\r\n📅 Dᴀᴛᴇ : <code>{}</code>\r\n⏰ Tɪᴍᴇ : <code>{}</code>\r\n🌐 Tɪᴍᴇᴢᴏɴᴇ : <code>Asia/Kolkata</code>\r\n🛠️ Bᴜɪʟᴅ Sᴛᴀᴛᴜs: <code>ᴠ𝟹.𝟶 [ Sᴛᴀʙʟᴇ ]</code></b>\"\"\"\r\n\r\n LOGO = \"\"\"\r\n ____ ___ ____ __ ____ ____ \r\n(_ _)/ __) ( _ \\ / \\(_ _)(__ )\r\n )( ( (_ \\ ) _ (( O ) )( / _/ \r\n (__) \\___/ (____/ \\__/ (__) (____)\"\"\"\r" }, { "identifier": "db", "path": "database/users_chats_db.py", "snippet": "class Database:\n def __init__(self, uri, database_name):\n def new_user(self, id, name):\n def new_group(self, id, title):\n async def add_user(self, id, name):\n async def is_user_exist(self, id):\n async def total_users_count(self):\n async def remove_ban(self, id):\n async def ban_user(self, user_id, ban_reason=\"No Reason\"):\n async def get_ban_status(self, id):\n async def get_all_users(self):\n async def delete_user(self, user_id):\n async def get_banned(self):\n async def add_chat(self, chat, title):\n async def get_chat(self, chat):\n async def re_enable_chat(self, id):\n async def update_settings(self, id, settings):\n async def get_settings(self, id):\n async def disable_chat(self, chat, reason=\"No Reason\"):\n async def total_chat_count(self):\n async def get_all_chats(self):\n async def get_db_size(self):" } ]
import logging import asyncio import pytz import random import re import os import string import requests import aiohttp import http.client import json from pyrogram.errors import InputUserDeactivated, UserNotParticipant, FloodWait, UserIsBlocked, PeerIdInvalid from info import AUTH_CHANNEL, LONG_IMDB_DESCRIPTION, MAX_LIST_ELM, SHORTLINK_URL, SHORTLINK_API, IS_SHORTLINK, LOG_CHANNEL, TUTORIAL, GRP_LNK, CHNL_LNK, CUSTOM_FILE_CAPTION, SECOND_SHORTLINK_URL, SECOND_SHORTLINK_API from imdb import Cinemagoer from pyrogram.types import Message, InlineKeyboardButton, InlineKeyboardMarkup from pyrogram.errors import FloodWait, UserIsBlocked, MessageNotModified, PeerIdInvalid from pyrogram import enums from typing import Union from Script import script from datetime import datetime, date from typing import List from database.users_chats_db import db from bs4 import BeautifulSoup from shortzy import Shortzy
19,308
logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) BTN_URL_REGEX = re.compile( r"(\[([^\[]+?)\]\((buttonurl|buttonalert):(?:/{0,2})(.+?)(:same)?\))" ) imdb = Cinemagoer() TOKENS = {} VERIFIED = {} BANNED = {} SECOND_SHORTENER = {} SMART_OPEN = '“' SMART_CLOSE = '”' START_CHAR = ('\'', '"', SMART_OPEN) # temp db for banned class temp(object): BANNED_USERS = [] BANNED_CHATS = [] ME = None CURRENT=int(os.environ.get("SKIP", 2)) CANCEL = False MELCOW = {} U_NAME = None B_NAME = None GETALL = {} SHORT = {} SETTINGS = {} async def is_subscribed(bot, query): try:
logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) BTN_URL_REGEX = re.compile( r"(\[([^\[]+?)\]\((buttonurl|buttonalert):(?:/{0,2})(.+?)(:same)?\))" ) imdb = Cinemagoer() TOKENS = {} VERIFIED = {} BANNED = {} SECOND_SHORTENER = {} SMART_OPEN = '“' SMART_CLOSE = '”' START_CHAR = ('\'', '"', SMART_OPEN) # temp db for banned class temp(object): BANNED_USERS = [] BANNED_CHATS = [] ME = None CURRENT=int(os.environ.get("SKIP", 2)) CANCEL = False MELCOW = {} U_NAME = None B_NAME = None GETALL = {} SHORT = {} SETTINGS = {} async def is_subscribed(bot, query): try:
user = await bot.get_chat_member(AUTH_CHANNEL, query.from_user.id)
0
2023-11-03 12:21:26+00:00
24k
apple/ml-reed
reed/algorithms/pebble.py
[ { "identifier": "utils", "path": "BPref/utils.py", "snippet": "def make_env(cfg):\ndef ppo_make_env(env_id, seed):\ndef tie_weights(src, trg):\ndef make_metaworld_env(cfg):\ndef ppo_make_metaworld_env(env_id, seed):\n def __init__(self, *models):\n def __enter__(self):\n def __exit__(self, *args):\n def __init__(self, *models):\n def __enter__(self):\n def __exit__(self, *args):\ndef soft_update_params(net, target_net, tau):\ndef set_seed_everywhere(seed):\ndef make_dir(*path_parts):\ndef weight_init(m):\n def __init__(self,\n input_dim,\n hidden_dim,\n output_dim,\n hidden_depth,\n output_mod=None):\n def forward(self, x):\n def __init__(self, cache_size=1):\n def atanh(x):\n def __eq__(self, other):\n def _call(self, x):\n def _inverse(self, y):\n def log_abs_det_jacobian(self, x, y):\n def __init__(self, loc, scale):\n def mean(self):\n def __init__(self, epsilon=1e-4, shape=(), device=None):\n def update(self, x):\n def update_from_moments(self, batch_mean, batch_var, batch_count):\n def std(self):\ndef update_mean_var_count_from_moments(\n mean, var, count, batch_mean, batch_var, batch_count\n):\ndef mlp(input_dim, hidden_dim, output_dim, hidden_depth, output_mod=None):\ndef to_np(t):\nclass eval_mode(object):\nclass train_mode(object):\nclass MLP(nn.Module):\nclass TanhTransform(pyd.transforms.Transform):\nclass SquashedNormal(pyd.transformed_distribution.TransformedDistribution):\nclass TorchRunningMeanStd:\n M2 = m_a + m_b + torch.pow(delta, 2) * count * batch_count / tot_count" }, { "identifier": "Logger", "path": "BPref/logger.py", "snippet": "class Logger(object):\n def __init__(self,\n log_dir,\n save_tb=False,\n log_frequency=10000,\n agent='sac'):\n self._log_dir = log_dir\n self._log_frequency = log_frequency\n if save_tb:\n tb_dir = os.path.join(log_dir, 'tb')\n if os.path.exists(tb_dir):\n try:\n shutil.rmtree(tb_dir)\n except:\n print(\"logger.py warning: Unable to remove tb directory\")\n pass\n self._sw = SummaryWriter(tb_dir)\n else:\n self._sw = None\n # each agent has specific output format for training\n assert agent in AGENT_TRAIN_FORMAT\n train_format = COMMON_TRAIN_FORMAT + AGENT_TRAIN_FORMAT[agent]\n self._train_mg = MetersGroup(os.path.join(log_dir, 'train'),\n formating=train_format)\n self._eval_mg = MetersGroup(os.path.join(log_dir, 'eval'),\n formating=COMMON_EVAL_FORMAT)\n\n def _should_log(self, step, log_frequency):\n log_frequency = log_frequency or self._log_frequency\n return step % log_frequency == 0\n\n def _try_sw_log(self, key, value, step):\n if self._sw is not None:\n self._sw.add_scalar(key, value, step)\n\n def _try_sw_log_video(self, key, frames, step):\n if self._sw is not None:\n frames = torch.from_numpy(np.array(frames))\n frames = frames.unsqueeze(0)\n self._sw.add_video(key, frames, step, fps=30)\n\n def _try_sw_log_histogram(self, key, histogram, step):\n if self._sw is not None:\n self._sw.add_histogram(key, histogram, step)\n\n def log(self, key, value, step, n=1, log_frequency=1):\n if not self._should_log(step, log_frequency):\n return\n assert key.startswith('train') or key.startswith('eval')\n if type(value) == torch.Tensor:\n value = value.item()\n self._try_sw_log(key, value / n, step)\n mg = self._train_mg if key.startswith('train') else self._eval_mg\n mg.log(key, value, n)\n\n def log_param(self, key, param, step, log_frequency=None):\n if not self._should_log(step, log_frequency):\n return\n self.log_histogram(key + '_w', param.weight.data, step)\n if hasattr(param.weight, 'grad') and param.weight.grad is not None:\n self.log_histogram(key + '_w_g', param.weight.grad.data, step)\n if hasattr(param, 'bias') and hasattr(param.bias, 'data'):\n self.log_histogram(key + '_b', param.bias.data, step)\n if hasattr(param.bias, 'grad') and param.bias.grad is not None:\n self.log_histogram(key + '_b_g', param.bias.grad.data, step)\n\n def log_video(self, key, frames, step, log_frequency=None):\n if not self._should_log(step, log_frequency):\n return\n assert key.startswith('train') or key.startswith('eval')\n self._try_sw_log_video(key, frames, step)\n\n def log_histogram(self, key, histogram, step, log_frequency=None):\n if not self._should_log(step, log_frequency):\n return\n assert key.startswith('train') or key.startswith('eval')\n self._try_sw_log_histogram(key, histogram, step)\n\n def dump(self, step, save=True, ty=None):\n if ty is None:\n self._train_mg.dump(step, 'train', save)\n self._eval_mg.dump(step, 'eval', save)\n elif ty == 'eval':\n self._eval_mg.dump(step, 'eval', save)\n elif ty == 'train':\n self._train_mg.dump(step, 'train', save)\n else:\n raise f'invalid log type: {ty}'" }, { "identifier": "TrajectoryReplayBuffer", "path": "BPref/replay_buffer.py", "snippet": "class TrajectoryReplayBuffer:\n \"\"\"\n Buffer to store trajectories of environment transitions. Unlike ReplayBuffer, which stores all transitions in a\n flat manner, transitions are sorted by trajectory. Each trajectory corresponds to an episode.\n \"\"\"\n _RELABEL_BATCH_SIZE = 256\n\n def __init__(self, capacity: int, device: torch.device, window: int = 1, num_envs: t.Optional[int] = None,\n image_observations: t.Optional[t.Union[int, np.ndarray]] = None):\n \"\"\"\n Args:\n capacity: the number of trajectories to hold in memory\n device: the device sampled transitions should be put on\n window: no idea - part of the original code and is used in add_batch(...) which has not yet been refactored\n num_envs: the number of environment instances used to train the policy. Only needs to be specified when the\n number is >1. Some algorithms train on multiple instances of an environment at once, e.g. PPO.\n Not currently used, but not yet removed because we have not tested with an algorithm that needs\n multiple environment instances.\n image_observations: (default = false) whether to collect image observations in addition to state\n observations. This is helpful to use when the policy is trained on the state, but you\n want to visualize the trajectories or the reward model is trained on images.\n\n \"\"\"\n self.capacity = capacity\n self.device = device\n\n self.observations: t.Optional[np.ndarray] = None\n self.actions: t.Optional[np.ndarray] = None\n self.rewards: t.Optional[np.ndarray] = None\n self.not_dones: t.Optional[np.ndarray] = None\n self.not_dones_no_max: t.Optional[np.ndarray] = None\n self.trajectory_lengths: t.List = []\n self.window = window\n self.env_rewards: t.Optional[np.ndarray] = None\n self.image_observations: t.Optional[np.ndarray] = None\n # track whether to collect image observations - when not None, specifies the dimensions of the images\n self._collect_image_observations = image_observations\n\n # track the trajectories as a list of Trajectory\n self.trajectories: t.List[Trajectory] = []\n\n self.idx = 0\n self.last_save = 0\n self.full = False\n\n def __len__(self):\n return np.sum(self.trajectory_lengths) - len(self.trajectory_lengths)\n\n def __getitem__(self, flat_indx: t.Union[int, t.Tuple[int, int], t.List[int]]) -> TRANSITION:\n \"\"\"\n Get the transition at the given index\n\n Args:\n flat_indx: the index assuming transitions are stored flat instead of nested in trajectories\n - when an integer is specified, a single transition is retrieved\n - when a tuple of integers is given, a slice is retrieved as if the transitions are stored flat\n\n Returns:\n current observation\n action\n reward\n next observation\n whether the episode ended\n whether the episode ended without reaching max steps\n image version of current observation (optional)\n \"\"\"\n if isinstance(flat_indx, int) or isinstance(flat_indx, np.int64):\n traj_indx, trans_indx = self._flat_indx_to_trajectory_index(flat_indx)\n # check we are grabbing from a trajectory currently being accumulated\n # When the done signal is given, the current trajectory being accumulated is converted to a trajectory,\n # is added to the list of trajectories, and the values used to accumulate the next trajectory are set to\n # done. The next trajectory is not started until the call to add(...) after the done signal is received.\n # Therefore, we need to check whether the trajectory to pull from is actually the last completed trajectory\n # prior to starting a new trajectory. This is why we compare the length of the lists containing trajectory\n # lengths and the list containing the trajectories.\n if (traj_indx == len(self.trajectory_lengths) - 1\n and len(self.trajectory_lengths) > len(self.trajectories)):\n # we need to grab from the trajectory currently being populated\n return (self.observations[trans_indx].astype(np.float32), self.actions[trans_indx].astype(np.float32),\n self.rewards[trans_indx].astype(np.float32), self.observations[trans_indx + 1].astype(np.float32),\n self.not_dones[trans_indx].astype(np.float32),\n self.not_dones_no_max[trans_indx].astype(np.float32),\n (self.env_rewards[trans_indx].astype(np.float32)\n if self.env_rewards is not None\n else None),\n ((self.image_observations[trans_indx].astype(np.float32))\n if self.image_observations is not None\n else None),\n ((self.image_observations[trans_indx+1].astype(np.float32))\n if self.image_observations is not None\n else None))\n else:\n # grab from a previously completed trajectory\n transition: Transition = self.trajectories[traj_indx][trans_indx]\n return (transition.observation.astype(np.float32), transition.action.astype(np.float32),\n transition.reward.astype(np.float32), transition.next_observation.astype(np.float32),\n transition.not_done.astype(np.float32), transition.not_done_no_max.astype(np.float32),\n transition.env_reward.astype(np.float32),\n (transition.image_observation.astype(np.float32)\n if transition.image_observation is not None\n else None),\n (transition.next_image_observation.astype(np.float32)\n if transition.next_image_observation is not None\n else None))\n elif isinstance(flat_indx, t.List):\n observations = []\n actions = []\n rewards = []\n next_observations = []\n not_dones = []\n not_dones_no_max = []\n env_rewards = []\n image_observations = []\n next_image_observations = []\n for indx in flat_indx:\n observation, action, reward, next_observation, not_done, not_done_no_max, env_reward, image_observation, next_image_observation = self[indx]\n observations.append(observation)\n actions.append(action)\n rewards.append(reward)\n next_observations.append(next_observation)\n not_dones.append(not_done)\n not_dones_no_max.append(not_done_no_max)\n if env_reward is not None:\n env_rewards.append(env_reward)\n if image_observation is not None:\n image_observations.append(image_observation)\n if next_image_observation is not None:\n next_image_observations.append(next_image_observation)\n return (np.asarray(observations, dtype=np.float32), np.asarray(actions, dtype=np.float32),\n np.asarray(rewards, dtype=np.float32), np.asarray(next_observations, dtype=np.float32),\n np.asarray(not_dones, dtype=np.float32), np.asarray(not_dones_no_max, dtype=np.float32),\n (np.asarray(env_rewards, dtype=np.float32) if len(env_rewards) > 0 else None),\n (np.asarray(image_observations, dtype=np.float32) if self._collect_image_observations else None),\n (np.asarray(next_image_observations, dtype=np.float32) if self._collect_image_observations else None))\n else:\n # get the locations of the start and end transitions\n start_traj_indx, start_trans_indx = self._flat_indx_to_trajectory_index(flat_indx[0])\n end_traj_indx, end_trans_indx = self._flat_indx_to_trajectory_index(flat_indx[1])\n # check that we are not spanning trajectories\n if start_traj_indx == end_traj_indx:\n # grab the sub-trajectory\n sub_trajectory = self.trajectories[start_traj_indx][tuple((start_trans_indx, end_trans_indx))]\n else:\n # grab what remains of the trajectory\n end_trans_indx = len(self.trajectories[start_traj_indx]) - 1\n sub_trajectory = self.trajectories[start_traj_indx][tuple((start_trans_indx, end_trans_indx))]\n return (sub_trajectory.initial_observations,\n sub_trajectory.actions,\n sub_trajectory.rewards,\n sub_trajectory.next_observations,\n sub_trajectory.not_dones,\n sub_trajectory.not_dones_no_max,\n sub_trajectory.env_rewards,\n (sub_trajectory.initial_image_observations\n if sub_trajectory.initial_image_observations is not None\n else None),\n (sub_trajectory.next_image_observations\n if sub_trajectory.next_image_observations is not None\n else None))\n\n @property\n def trajectory_count(self) -> int:\n \"\"\"\n The number of trajectories in the buffer\n \"\"\"\n return len(self.trajectories)\n\n @property\n def all_not_dones(self) -> np.ndarray:\n \"\"\"\n Rewards from the state-action pairs from all trajectories and all transitions, where the action was taken in the state\n \"\"\"\n return np.concatenate([np.expand_dims(traj.not_dones, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_rewards(self) -> np.ndarray:\n \"\"\"\n Rewards from the state-action pairs from all trajectories and all transitions, where the action was taken in the state\n \"\"\"\n return np.concatenate([np.expand_dims(traj.rewards, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_environment_rewards(self) -> np.ndarray:\n \"\"\"\n Environment rewards from all trajectories and all transitions\n \"\"\"\n return np.concatenate([np.expand_dims(traj.rewards, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_initial_image_observations(self) -> np.ndarray:\n \"\"\"\n Image observations from the state-action pairs from all trajectories and all transitions, where the action was taken in the state\n \"\"\"\n return np.concatenate([np.expand_dims(traj.initial_image_observations, axis=0)\n for traj in self.trajectories],\n axis=0)\n\n @property\n def all_next_image_observations(self) -> np.ndarray:\n \"\"\"\n Image observations from the state-action pairs from all trajectories and all transitions,\n\n The result of a transition\n \"\"\"\n return np.concatenate([np.expand_dims(traj.next_image_observations, axis=0)\n for traj in self.trajectories],\n axis=0)\n\n @property\n def all_initial_observations(self) -> np.ndarray:\n \"\"\"\n observations from the state-action pairs from all trajectories and all transitions, where the action was taken in the state\n \"\"\"\n return np.concatenate([np.expand_dims(traj.initial_observations, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_next_observations(self) -> np.ndarray:\n \"\"\"\n Observations from the state-action pairs from all trajectories and all transitions\n\n The result of a transition\n \"\"\"\n return np.concatenate([np.expand_dims(traj.next_observations, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_actions(self) -> np.ndarray:\n \"\"\"\n Actions from the state-action pairs from all trajectories and all transitions\n \"\"\"\n return np.concatenate([np.expand_dims(traj.actions, axis=0) for traj in self.trajectories], axis=0)\n\n def _flat_indx_to_trajectory_index(self, flat_indx: int) -> t.Tuple[int, int]:\n \"\"\"\n Converts an index that assumes the transitions are flat to a trajectory and transition (w/in trajectory) index\n\n Args:\n flat_indx: the index assuming transitions are stored flat\n\n Returns:\n the index of the trajectory containing the transition\n the index of the transition within the trajectory\n \"\"\"\n # need to figure out which transition indices are stored in which trajectories\n transition_cumulative_sum = np.cumsum(self.trajectory_lengths)\n # the trajectory containing the transition is at the first index where the cumulative sum of transitions is\n # less than the transition index\n target_trajectory_indx = int(np.argmax(flat_indx < transition_cumulative_sum))\n # get the transition's index within the trajectory as the different between the flat index and the cumulative\n # sum at the previous trajectory - tells us how far into the target trajectory the transition is\n if target_trajectory_indx == 0:\n transition_trajectory_indx = flat_indx\n else:\n transition_trajectory_indx = flat_indx - transition_cumulative_sum[target_trajectory_indx - 1]\n return target_trajectory_indx, transition_trajectory_indx\n\n def _add_transition(self, observation: np.ndarray, action: np.ndarray, reward: float, done: t.Union[float, bool],\n done_no_max: t.Union[float, bool],\n env_reward: t.Optional[float] = None, image_observations: t.Optional[np.ndarray] = None):\n \"\"\"\n Track the transition and update the length of the trajectory currently being accumulated\n\n Args:\n observation: the current observation\n action: the action taken in the current state\n reward: the reward associated with the last state-action pait\n done: whether the last action completed an episode\n done_no_max: whether the last action completed an episode without reaching the maximum allowed steps\n env_reward: (optional) the reward given by the environment - stored and used to train the preference-learned\n reward model when learning from synthetic feedback\n image_observations: (optional) image-based observation -> should not be given is observations is also an image. This\n should be used when you want to accumulate images separately from policy training.\n \"\"\"\n self.observations = np.concatenate([self.observations, np.expand_dims(observation, axis=0)], axis=0)\n self.actions = np.concatenate([self.actions, np.expand_dims(action, axis=0)], axis=0)\n self.rewards = np.concatenate([self.rewards, np.asarray(reward).reshape(1, 1)], axis=0)\n if type(done) is float:\n self.not_dones = np.concatenate([self.not_dones,\n np.asarray(not done, dtype=np.float32).reshape(1, 1)], axis=0)\n self.not_dones_no_max = np.concatenate([self.not_dones_no_max,\n np.asarray(not done_no_max, dtype=np.float32).reshape(1, 1)],\n axis=0)\n else:\n self.not_dones = np.concatenate([self.not_dones,\n np.asarray(~done, dtype=np.float32).reshape(1, 1)], axis=0)\n self.not_dones_no_max = np.concatenate([self.not_dones_no_max,\n np.asarray(~done_no_max, dtype=np.float32).reshape(1, 1)],\n axis=0)\n\n self.trajectory_lengths[-1] += 1\n if env_reward is not None:\n self.env_rewards = np.concatenate([self.env_rewards,\n np.asarray(env_reward, dtype=np.float32).reshape(1, 1)], axis=0)\n\n if image_observations is not None and self._collect_image_observations:\n self.image_observations = np.concatenate([self.image_observations, np.expand_dims(image_observations, axis=0)], axis=0)\n\n def _start_trajectory(self, observation: np.ndarray,\n action: np.ndarray,\n reward: float,\n done: t.Union[float, bool],\n done_no_max: t.Union[float, bool],\n env_reward: t.Optional[float] = None,\n image_observations: t.Optional[np.ndarray] = None):\n \"\"\"\n Start a new trajectory and track the transition\n\n Args:\n observation: the current observation\n action: the action taken in the current state\n reward: the reward associated with the last state-action pait\n done: whether the last action completed an episode\n done_no_max: whether the last action completed an episode without reaching the maximum allowed steps\n env_reward: (optional) the reward given by the environment - stored and used to train the preference-learned\n reward model when learning from synthetic feedback\n image_observations: (optional) image-based observation -> should not be given is observations is also an image. This\n should be used when you want to accumulate images separately from policy training.\n \"\"\"\n self.observations = np.expand_dims(observation, axis=0).astype(dtype=np.float32)\n self.actions = np.expand_dims(action, axis=0).astype(dtype=np.float32)\n self.rewards = np.asarray(reward, dtype=np.float32).reshape(1, 1)\n if type(done) is float:\n self.not_dones = np.asarray(not done, dtype=np.float32).reshape(1, 1)\n self.not_dones_no_max = np.asarray(not done_no_max, dtype=np.float32).reshape(1, 1)\n else:\n self.not_dones = np.asarray(~done, dtype=np.float32).reshape(1, 1)\n self.not_dones_no_max = np.asarray(~done_no_max, dtype=np.float32).reshape(1, 1)\n\n self.trajectory_lengths.append(1)\n\n if env_reward is not None:\n self.env_rewards = np.asarray(env_reward, dtype=np.float32).reshape(1, 1)\n\n if image_observations is not None and self._collect_image_observations:\n self.image_observations = np.expand_dims(image_observations, axis=0).astype(dtype=np.float32)\n\n def add(self, observation, action, reward, next_observation, done, done_no_max,\n env_reward: t.Optional[float] = None, image_observation: t.Optional[np.ndarray] = None,\n image_next_observation: t.Optional[np.ndarray] = None):\n \"\"\"\n Args:\n observation: the current observation\n action: the action taken in the current state\n reward: the reward associated with the last state-action pait\n next_observation: only used when an episode is completed to ensure the last observation is captured\n done: whether the last action completed an episode\n done_no_max: whether the last action completed an episode without reaching the maximum allowed steps\n env_reward: (optional) the reward given by the environment - stored and used to train the preference-learned\n reward model when learning from synthetic feedback\n image_observation: (optional) image-based observation -> should not be given is observations is also an image. This\n should be used when you want to accumulate images separately from policy training.\n image_next_observation: (optional) the image-based next observation -> should not be given when next_observation is also\n and image. This should be used when you want to accumulate the images separately from the\n trained policy.\n \"\"\"\n if self.observations is None:\n self._start_trajectory(observation, action, reward, done, done_no_max, env_reward, image_observation)\n elif done:\n self._add_transition(observation, action, reward, done, done_no_max, env_reward, image_observation)\n # the episode has ended, so we need to track the next observation\n self.observations = np.concatenate([self.observations, np.expand_dims(next_observation, axis=0)], axis=0)\n if image_next_observation is not None:\n self.image_observations = np.concatenate([self.image_observations,\n np.expand_dims(image_next_observation, axis=0)], axis=0)\n # create the trajectory\n self.trajectories.append(Trajectory(self.observations.astype(dtype=np.float32),\n (self.image_observations.astype(dtype=np.float32)\n if self.image_observations is not None\n else None),\n actions=self.actions.astype(dtype=np.float32),\n rewards=self.rewards.astype(dtype=np.float32),\n not_dones=self.not_dones.astype(dtype=np.float32),\n not_dones_no_max=self.not_dones_no_max.astype(dtype=np.float32),\n env_rewards=self.env_rewards.astype(dtype=np.float32)))\n # check if the inclusion of the just completed trajectory puts the buffer at capacity\n # if it does, remove the first trajectory as this is a FIFO buffer\n if np.sum(self.trajectory_lengths) >= self.capacity:\n self.trajectories = self.trajectories[1:]\n self.trajectory_lengths = self.trajectory_lengths[1:]\n self.observations = None\n self.actions = None\n self.rewards = None\n self.not_dones = None\n self.not_dones_no_max = None\n self.env_rewards = None\n self.image_observations = None\n else:\n self._add_transition(observation, action, reward, done, done_no_max, env_reward, image_observation)\n\n self.idx = (self.idx + 1) % self.capacity\n self.full = self.full or self.idx == 0\n\n def relabel_with_predictor(self, predictor, state_action_formatter: PreProcessInference):\n \"\"\"\n Relabel the rewards stored in the replay buffer using the given predictor\n\n Args:\n predictor: network that will consume state-action pairs and assign a reward\n state_action_formatter: formats the states and actions for consumption by the reward model\n \"\"\"\n print(\"Relabelling the replay buffer with the updated reward model.\")\n for trajectory in self.trajectories:\n # the number of batches to run through the model\n total_iter = int(len(trajectory) / self._RELABEL_BATCH_SIZE)\n # handle the case where we have more transitions than is evenly divisible by the batch size\n if len(trajectory) > self._RELABEL_BATCH_SIZE * total_iter:\n total_iter += 1\n # collect and process each batch to be passed through predictor\n for index in range(total_iter):\n start_indx = index * self._RELABEL_BATCH_SIZE\n # make sure we don't have an end index that is after the end of the trajectory\n end_indx = min((index + 1) * self._RELABEL_BATCH_SIZE, len(trajectory))\n\n # pull out the actions from the transitions that will be relabelled\n actions = trajectory.actions[start_indx:end_indx]\n # we need to handle the case where the reward model operates off of images\n if predictor.image_observations:\n observations = trajectory.all_image_observations[start_indx:end_indx]\n else:\n observations = trajectory.all_observations[start_indx:end_indx]\n formatted_state_action = state_action_formatter.format_state_action(observations, actions, batch_sa=True)\n pred_reward = predictor.r_hat_batch(formatted_state_action)\n # update the rewards assigned to the transitions\n trajectory.rewards[start_indx:end_indx] = pred_reward\n\n def sample(self, batch_size: int):\n indxs = list(np.random.randint(0, np.sum(self.trajectory_lengths) - 1, size=batch_size))\n observations, actions, rewards, next_observations, not_dones, not_dones_no_max, env_rewards, image_observations, next_image_observations = self[indxs]\n observations = torch.as_tensor(observations, device=self.device).float()\n actions = torch.as_tensor(actions, device=self.device)\n rewards = torch.as_tensor(rewards, device=self.device)\n next_observations = torch.as_tensor(next_observations, device=self.device).float()\n not_dones = torch.as_tensor(not_dones, device=self.device)\n not_dones_no_max = torch.as_tensor(not_dones_no_max, device=self.device)\n env_rewards = torch.as_tensor(env_rewards, device=self.device)\n image_observations = (torch.as_tensor(image_observations, device=self.device).float() if self._collect_image_observations else None)\n next_image_observations = (torch.as_tensor(next_image_observations, device=self.device).float() if self._collect_image_observations else None)\n return observations, actions, rewards, next_observations, not_dones, not_dones_no_max, env_rewards, image_observations, next_image_observations\n\n def sample_state_ent(self, batch_size: int):\n observations, actions, rewards, next_observations, not_dones, not_dones_no_max, _, _, _ = self.sample(batch_size)\n full_observation = torch.as_tensor(np.concatenate([traj.all_observations for traj in self.trajectories], axis=0),\n device=self.device)\n return observations, full_observation, actions, rewards, next_observations, not_dones, not_dones_no_max\n\n def save(self, out_directory: Path, env_id: str, step: int):\n \"\"\"\n Save the replay buffer to disk as a npz archive\n Args:\n out_directory: location where replay buffer will be saved\n env_id: the environment within which the data was generated\n step: the number of policy training steps taken to produce this dataset\n \"\"\"\n # create the ZipFile object\n zip_obj = ZipFile(out_directory / f\"{env_id}_replay_buffer_{step}.zip\", \"w\")\n\n # write each trajectory file to disk and to the zip archive\n for traj_id, trajectory in enumerate(self.trajectories):\n trajectory.save(out_directory / f\"{traj_id}.npz\")\n zip_obj.write(out_directory / f\"{traj_id}.npz\")\n # close the Zip File\n zip_obj.close()\n\n @staticmethod\n def from_directory(directory_path: Path,\n device: torch.device = 'cuda') -> \"TrajectoryReplayBuffer\":\n \"\"\"\n Create a TrajectoryReplay buffer from a directory of npz archive trajectories\n\n Args:\n directory_path: the location of the npz_archive on disk\n device: the device sampled transitions should be pushed to\n Returns:\n populated trajectory replay buffer\n \"\"\"\n # accumulate the trajectories\n trajectories = []\n trajectory_lengths = []\n # determine how many transitions are in the replay buffer\n capacity = 0\n # load each trajectory from disk\n for traj_filename in directory_path.iterdir():\n # we only load data from npz archives, so we need to skip anything else\n if not traj_filename.suffix == \".npz\": continue\n # load the trajectory from disk\n traj = Trajectory.from_npz(traj_filename)\n # track the trajectory\n trajectories.append(traj)\n # track the trajectory's length\n trajectory_lengths.append(len(traj))\n # track the trajectory's length\n capacity += len(traj)\n # create the buffer\n _buffer = TrajectoryReplayBuffer(capacity=capacity, device=device)\n # add the trajectories to the buffer\n _buffer.trajectories = trajectories\n _buffer.trajectory_lengths = trajectory_lengths\n\n return _buffer" }, { "identifier": "StateActionRewardModel", "path": "reed/models/reward_model.py", "snippet": "class StateActionRewardModel:\n \"\"\"\n Reward model that operates over state action pairs\n \"\"\"\n def __init__(self,\n in_dim: t.Union[int, t.List[int]],\n ensemble_size: int = 3,\n hidden_dim: int = 256,\n hidden_layers: int = 3,\n final_activation: str = 'tanh',\n lr: float = 3e-4,\n optimizer: str = \"adam\",\n reward_train_batch: int = 128,\n size_segment: int = 1,\n device: torch.device = \"cuda\",\n multi_gpu: bool = False,\n image_observations: bool = False,\n image_encoder_architecture: str = \"pixl2r\",\n image_hidden_num_channels: int = 32,\n grayscale_images: bool = True):\n # the device the model will be put on\n self.device = device\n # whether data parallelism should be used during model training\n self.multi_gpu = multi_gpu\n # reward model configuration\n self.in_dim = in_dim\n self.hidden_dim = hidden_dim\n self.hidden_layers = hidden_layers\n self.ensemble_size = ensemble_size\n self.lr = lr\n self.optimizer_type = optimizer\n self.ensemble = []\n self.paramlst = []\n self.optimizer = None\n self.model = None\n self.final_activation = final_activation\n self.size_segment = size_segment\n\n self.image_observations = image_observations\n self.image_encoder_architecture = image_encoder_architecture\n self.image_hidden_num_channels = image_hidden_num_channels\n self.grayscale_images = grayscale_images\n\n # construct the reward ensemble\n self.construct_ensemble()\n\n # parameters used to train the reward model on the preference labelled trajectories\n self.train_batch_size = reward_train_batch\n self.CEloss = nn.CrossEntropyLoss()\n\n def eval(self):\n \"\"\"Set each reward model in the ensemble to evaluation mode\"\"\"\n self.ensemble = [net.eval() for net in self.ensemble]\n\n def train(self):\n \"\"\"Set each reward model in the ensemble to train mode\"\"\"\n self.ensemble = [net.train() for net in self.ensemble]\n\n def softXEnt_loss(self, predicted: torch.Tensor, target: torch.Tensor):\n logprobs = F.log_softmax(predicted, dim=1)\n return -(target * logprobs).sum() / predicted.shape[0]\n\n def construct_ensemble(self):\n for _ in range(self.ensemble_size):\n if self.image_observations:\n model = ImageStateActionNetwork(self.in_dim,\n out_size=1,\n hidden_dim=self.hidden_dim,\n hidden_depth=self.hidden_layers,\n final_activation=self.final_activation,\n image_encoder_architecture=self.image_encoder_architecture,\n image_hidden_num_channels=self.image_hidden_num_channels).float()\n else:\n model = StateActionNetwork(self.in_dim,\n out_size=1,\n hidden_dim=self.hidden_dim,\n hidden_depth=self.hidden_layers,\n final_activation=self.final_activation).float()\n print(model)\n # check if the model will be run with Data Parallelism\n if self.multi_gpu:\n print(f\"There are {torch.cuda.device_count()} GPU devices, so the reward ensemble WILL be trained \"\n f\"using nn.DataParallel\")\n self.ensemble.append(nn.DataParallel(model).to(self.device))\n else:\n print(f\"There are {torch.cuda.device_count()} GPU devices, so the reward ensemble will NOT be trained \"\n f\"using nn.DataParallel\")\n self.ensemble.append(model.to(self.device))\n # track all model parameters\n self.paramlst.extend(model.parameters())\n # create a single optimizer applied to all ensemble members\n if self.optimizer_type == \"adam\":\n self.optimizer = torch.optim.Adam(self.paramlst, lr=self.lr)\n elif self.optimizer_type == \"sgd\":\n self.optimizer = torch.optim.SGD(self.paramlst, lr=self.lr)\n else:\n raise NotImplementedError(f\"{self.optimizer_type} is not implemented as a reward optimizer and must be \"\n f\"one of 'adam' or 'sgd'.\")\n\n def format_state(self, obs: np.ndarray, batch_states: bool = False, by_trajectory: bool = False):\n \"\"\"\n Args:\n obs: the state observations\n batch_states: whether a batch of state is to be processed\n by_trajectory: whether the batch of states is structured by trajectory -> should only be\n True when batch_sa=True\n Returns:\n the state-action pairs as a single array\n \"\"\"\n if self.image_observations:\n # check if the images needs to be converted to grayscale\n if self.grayscale_images:\n obs = _to_grayscale(obs, batch_states=batch_states)\n if batch_states:\n # permute the input so that the channels are in the first dimension\n if by_trajectory:\n obs = np.transpose(obs, (0, 1, 4, 2, 3))\n else:\n print(obs.shape)\n obs = np.transpose(obs, (0, 3, 1, 2))\n return obs\n else:\n # permute the input so that the channels are in the first dimension\n obs = np.transpose(obs, (2, 0, 1))\n # add a dimension along the front for concatenation into the buffer\n return obs.reshape(1, *obs.shape)\n else:\n return obs.reshape(1, obs.shape[1:]) if batch_states else obs.reshape(1, obs.shape[0])\n\n def format_state_action(self, obs: np.ndarray, act: np.ndarray,\n batch_sa: bool = False, by_trajectory: bool = False) -> np.ndarray:\n \"\"\"\n Args:\n obs: the state observations\n act: the actions associated with each state observation\n batch_sa: whether a batch of state-action pairs is to be processed\n by_trajectory: whether the batch of state-action pairs is structured by trajectory -> should only be\n True when batch_sa=True\n Returns:\n the state-action pairs as a single array\n \"\"\"\n if self.image_observations:\n # check if the images needs to be converted to grayscale\n if self.grayscale_images:\n obs = _to_grayscale(obs, batch_states=batch_sa)\n if batch_sa:\n obs_dim = obs.shape[1:]\n # we concatenate the actions along channel dimension of the image\n if by_trajectory:\n repeated_actions = np.tile(act.reshape((act.shape[0], act.shape[1], 1, 1, act.shape[-1])),\n (1, 1, obs_dim[0], obs_dim[1], 1))\n else:\n repeated_actions = np.tile(act.reshape((act.shape[0], 1, 1, act.shape[-1])),\n (1, obs_dim[0], obs_dim[1], 1))\n # now concatenate the two\n sa_t = np.concatenate((obs, repeated_actions), axis=-1)\n # permute the input so that the channels are in the first dimension\n if by_trajectory:\n sa_t = np.transpose(sa_t, (0, 1, 4, 2, 3))\n else:\n sa_t = np.transpose(sa_t, (0, 3, 1, 2))\n return sa_t\n else:\n obs_dim = obs.shape\n # we concatenate the actions along channel dimension of the image\n repeated_actions = np.tile(act.reshape((1, 1, -1)), (obs_dim[0], obs_dim[1], 1))\n # now concatenate the two\n sa_t = np.concatenate((obs, repeated_actions), axis=-1)\n # permute the input so that the channels are in the first dimension\n sa_t = np.transpose(sa_t, (2, 0, 1))\n # add a dimension along the front for concatenation into the buffer\n return sa_t.reshape(1, *self.in_dim)\n else:\n sa_t = np.concatenate([obs, act], axis=-1)\n if batch_sa:\n return sa_t\n else:\n return sa_t.reshape(1, -1)\n\n def p_hat_member(self, x_1: np.ndarray, x_2: np.ndarray, member: int = -1):\n # softmaxing to get the probabilities according to eqn 1\n with torch.no_grad():\n # if we are using image observations, we need to collapse along the batch and time dimensions to push\n # a forward pass through the network\n # to compute the probabilities when then need to re-construct the batch and time dimensions\n if self.image_observations:\n # we need to compute the probabilities in batches to avoid out of memory issues\n # we use the train batch size as it should be an amount safe to put on the GPU's memory without causing\n # issues\n mb_size = self.train_batch_size\n start_indx = 0\n r_hat1 = None\n r_hat2 = None\n while start_indx < x_1.shape[0]:\n # check if there is a mb_size worth of trajectories to still be processed\n if start_indx + mb_size <= x_1.shape[0]:\n mb_x_1 = x_1[start_indx:start_indx + mb_size].reshape((-1, *x_1.shape[2:]))\n mb_x_2 = x_1[start_indx:start_indx + mb_size].reshape((-1, *x_1.shape[2:]))\n else:\n # process the leftover trajectories in a batch smaller than mb_size\n mb_x_1 = x_1[start_indx:].reshape((-1, *x_1.shape[2:]))\n mb_x_2 = x_2[start_indx:].reshape((-1, *x_2.shape[2:]))\n # process the leftover trajectories in a batch smaller than mb_size\n mb_rhat1 = self.r_hat_member(torch.from_numpy(mb_x_1).float().to(self.device),\n member=member).detach().cpu().reshape((mb_size, x_1.shape[1], 1))\n mb_rhat2 = self.r_hat_member(torch.from_numpy(mb_x_2).float().to(self.device),\n member=member).detach().cpu().reshape((mb_size, x_2.shape[1], 1))\n start_indx += mb_size\n\n # accumulate the rhats\n if r_hat1 is None:\n r_hat1 = mb_rhat1\n r_hat2 = mb_rhat2\n else:\n r_hat1 = torch.concat((r_hat1, mb_rhat1), dim=0)\n r_hat2 = torch.concat((r_hat2, mb_rhat2))\n\n else:\n r_hat1 = self.r_hat_member(x_1, member=member).cpu()\n r_hat2 = self.r_hat_member(x_2, member=member).cpu()\n r_hat1 = r_hat1.sum(axis=1)\n r_hat2 = r_hat2.sum(axis=1)\n r_hat = torch.cat([r_hat1, r_hat2], axis=-1)\n # taking 0 index for probability x_1 > x_2\n return F.softmax(r_hat, dim=-1)[:, 0]\n\n def p_hat_entropy(self, x_1: np.ndarray, x_2: np.ndarray, member: int = -1):\n # softmaxing to get the probabilities according to eqn 1\n with torch.no_grad():\n r_hat1 = self.r_hat_member(x_1, member=member)\n r_hat2 = self.r_hat_member(x_2, member=member)\n r_hat1 = r_hat1.sum(axis=1)\n r_hat2 = r_hat2.sum(axis=1)\n r_hat = torch.cat([r_hat1, r_hat2], axis=-1)\n\n ent = F.softmax(r_hat, dim=-1) * F.log_softmax(r_hat, dim=-1)\n ent = ent.sum(axis=-1).abs()\n return ent\n\n def r_hat_member(self, x: torch.Tensor, member: int = -1) -> torch.Tensor:\n # the network parameterizes r hat in eqn 1 from the paper\n # return self.ensemble[member](torch.from_numpy(x).float().to(device))\n return self.ensemble[member](x)\n\n def r_hat(self, x: np.ndarray):\n # they say they average the rewards from each member of the ensemble, but I think this only makes sense if the\n # rewards are already normalized and I don't understand how the normalization should be happening right now :(\n r_hats = []\n for member in range(self.ensemble_size):\n r_hats.append(self.r_hat_member(torch.from_numpy(x).float().to(self.device), member=member).detach().cpu().numpy())\n r_hats = np.array(r_hats)\n return np.mean(r_hats)\n\n def r_hat_batch(self, x: np.ndarray):\n # they say they average the rewards from each member of the ensemble, but I think this only makes sense if the rewards are already normalized\n # but I don't understand how the normalization should be happening right now :(\n r_hats = []\n for member in range(self.ensemble_size):\n r_hats.append(self.r_hat_member(torch.from_numpy(x).float().to(self.device), member=member).detach().cpu().numpy())\n r_hats = np.array(r_hats)\n return np.mean(r_hats, axis=0)\n\n def save(self, model_dir: str, env_id: str, step: int):\n \"\"\"\n Save the reward ensemble to disk\n\n Args:\n model_dir: path where the ensemble is to be saved\n env_id: the environment on which the ensemble has been trained\n step: the number of policy training steps\n \"\"\"\n for member in range(self.ensemble_size):\n torch.save(\n self.ensemble[member].state_dict(), f'{model_dir}/{env_id}_reward_model_{step}_{member}.pt'\n )\n\n def train_reward(self,\n preference_data_loader: PreferenceTripletEnsembleDataLoader,\n num_epoch: int):\n \"\"\"\n Train the reward model on the given preference dataset.\n\n Args:\n preference_data_loader: loads batches of preference triplets. Separated handles different preference\n dataset permutations for each member of the reward's ensemble.\n num_epoch: the number of training epochs to execute\n \"\"\"\n # track the accuracy and loss by ensemble member per epoch\n ensemble_accuracies = np.zeros((num_epoch, self.ensemble_size))\n ensemble_losses = np.zeros((num_epoch, self.ensemble_size))\n\n # train the reward model for the specified number of epochs\n for epoch in range(num_epoch):\n if epoch % 10 == 0:\n print(f\"Running preference training epoch {epoch} of {num_epoch}\")\n epoch_ensemble_losses = np.zeros(self.ensemble_size)\n epoch_ensemble_acc = np.zeros(self.ensemble_size)\n # train on each batch\n for batch_indx, batch in enumerate(preference_data_loader):\n # confirm there is either a single batch to be shared by all networks in the reward ensemble or\n # a batch per network in the ensemble\n assert len(batch) == 1 or len(batch) == self.ensemble_size\n # we need to zero out the gradients before we begin to process this batch\n self.optimizer.zero_grad()\n # we will need to accumulate the loss across the ensemble members\n batch_loss = 0.0\n for member_indx, preference_triplet_batch in enumerate(batch):\n # the predicted reward per transition in each trajectory\n # check if we need to collapse the batch and time dimensions into one and then reconstruct the two\n if self.image_observations:\n # get the rewards for each transition in the trajectories one\n traj_one_shape = preference_triplet_batch.trajectories_one.shape\n formatted_trajectories_one = preference_triplet_batch.trajectories_one.reshape(\n (-1, *traj_one_shape[2:]))\n r_hat1 = self.r_hat_member(formatted_trajectories_one,\n member=member_indx).reshape((traj_one_shape[0],\n traj_one_shape[1], 1))\n # get the rewards for each transition in the trajectories two\n traj_two_shape = preference_triplet_batch.trajectories_two.shape\n formatted_trajectories_two = preference_triplet_batch.trajectories_two.reshape(\n (-1, *traj_two_shape[2:]))\n r_hat2 = self.r_hat_member(formatted_trajectories_two,\n member=member_indx).reshape((traj_two_shape[0],\n traj_two_shape[1], 1))\n else:\n r_hat1 = self.r_hat_member(preference_triplet_batch.trajectories_one,\n member=member_indx)\n r_hat2 = self.r_hat_member(preference_triplet_batch.trajectories_two,\n member=member_indx)\n # compute the return per trajectory\n r_hat1 = r_hat1.sum(axis=1)\n r_hat2 = r_hat2.sum(axis=1)\n\n r_hat = torch.cat([r_hat1, r_hat2], dim=-1)\n\n # compute the ensemble member's loss\n curr_loss = self.CEloss(r_hat, preference_triplet_batch.preference_labels.squeeze())\n # add the loss from the ensemble member to the batch loss\n batch_loss += curr_loss\n # track the loss for this ensemble member\n epoch_ensemble_losses[member_indx] += curr_loss.item()\n\n # compute the accuracy of the ensemble member's predictions\n _, predicted = torch.max(r_hat.data, 1)\n correct = (predicted == preference_triplet_batch.preference_labels.squeeze()).sum().item()\n epoch_ensemble_acc[member_indx] += correct\n # compute the gradients\n batch_loss.backward()\n # apply the gradients to the model\n self.optimizer.step()\n # compute the ensemble accuracy for this epoch\n ensemble_accuracies[epoch] = epoch_ensemble_acc / preference_data_loader.dataset_length()\n # compute the mean ensemble loss for this epoch\n ensemble_losses[epoch] = epoch_ensemble_losses / preference_data_loader.dataset_length()\n\n if epoch % 10 == 0:\n print(f\"Epoch {epoch} mean accuracy = {np.mean(ensemble_accuracies[:epoch + 1]):.2f}\")\n\n # check the current mean accuracy, if it is greater than 0.97 then terminate training\n if np.mean(ensemble_accuracies[epoch]) >= 0.97:\n print(f\"Epoch accuracy {np.mean(ensemble_accuracies[epoch]):.2f} \"\n f\"after {epoch} epochs triggered early stopping.\")\n return ensemble_accuracies[:epoch + 1], ensemble_losses[:epoch + 1]\n\n print(f\"Epoch {num_epoch} mean accuracy = {np.mean(ensemble_accuracies):.2f}\")\n\n return ensemble_accuracies, ensemble_losses" }, { "identifier": "PreferenceDataset", "path": "reed/data/preference_dataset.py", "snippet": "class PreferenceDataset:\n def __init__(self, observation_dim: t.Union[t.Tuple, int], action_dim: t.Union[t.Tuple, int], capacity: int,\n size_segment: int, out_path: Path, image_observations: bool, grayscale_images: bool,\n collect_image_pref_dataset: bool, state_action_formatter: PreProcessInference,\n teacher_beta: float = -1, teacher_gamma: float = 1,\n teacher_eps_mistake: float = 0, teacher_eps_skip: float = 0, teacher_eps_equal: float = 0):\n \"\"\"\n Args:\n observation_dim: the dimensionality of the observations\n action_dim: the dimensionality of the actions\n capacity: the maximum number of trajectory pairs to include in the action_dimtaset\n size_segment: the length of the trajectory segments\n out_path: the location where the preference action_dimtaset will be written to disk during training\n image_observations: whether the observations given to the reward model are images\n grayscale_images: whether the image observations should be converted to grayscale instead of color\n collect_image_pref_dataset: whether to collect the image preference dataset separate from the observations.\n Should NOT be set to true if the observations are images.\n state_action_formatter: function that maps states and actions to a single input\n teacher_beta\n teacher_gamma: used to determine how much influence each reward has on the preference label based on\n order within the trajectory. Used to compute the return\n teacher_eps_mistake: the frequency with which the teacher assigns an incorrect label\n teacher_eps_skip: the frequency with which the teacher does not assign a label\n teacher_eps_equal: the maximum difference between trajectory returns for the two trajectories to be labelled\n as equally preferred\n \"\"\"\n self.observation_dim = observation_dim\n self.action_dim = action_dim\n self.capacity = capacity\n self.size_segment = size_segment\n self.out_path = out_path\n self.image_observations = image_observations\n self.grayscale_images = grayscale_images\n # whether to collect the preference dataset as images\n # only needs to be set to True if we are not learning the reward function from images\n # if we are learning the reward function from images then we have an image dataset\n self.collect_image_pref_dataset = collect_image_pref_dataset\n\n # formats the state-action pairs into a single input to the reward model\n self.state_action_formatter = state_action_formatter\n\n # track where each preference triplet is written to disk\n self._preference_triplet_tracker: t.List[Path] = []\n\n self.buffer_index = 0\n self.buffer_full = False\n\n # create the preference labeller\n self._preference_labeller = _PreferenceLabeller(teacher_beta=teacher_beta, teacher_gamma=teacher_gamma,\n teacher_eps_mistake=teacher_eps_mistake,\n teacher_eps_skip=teacher_eps_skip,\n teacher_eps_equal=teacher_eps_equal)\n\n # make sure the outpath where the trajectories will be written exist\n self.out_path.mkdir(parents=True, exist_ok=True)\n\n def __len__(self):\n return len(self._preference_triplet_tracker)\n\n def __getitem__(self, item: int) -> PREFERENCE_TRIPLET:\n \"\"\"\n Load and return the preference triplet at the specified index in the buffer\n\n Args:\n item: index of the triplet in the buffer\n Returns:\n trajectory one\n trajectory two\n preference label\n \"\"\"\n # get the location of the specified preference triplet and load it into memory\n npz_archive = np.load(self._preference_triplet_tracker[item].as_posix())\n\n # grab the trajectories and preference labels\n trajectory_one = npz_archive[\"trajectory_one\"]\n trajectory_two = npz_archive[\"trajectory_two\"]\n preference_label = npz_archive[\"preference_label\"]\n\n return trajectory_one, trajectory_two, preference_label\n\n def get_batch(self, indices: t.List[int]) -> PREFERENCE_TRIPLET_BATCH:\n \"\"\"\n Load and return the batch of preference triplets at the given indices in the buffer\n\n Args:\n indices: the buffer indices of the preference triplets to load into memory\n Returns:\n batch of trajectories one\n batch of trajectories two\n batch of preference labels\n \"\"\"\n # accumulate the trajectory pairs and preference labels\n trajectories_one = []\n trajectories_two = []\n preference_labels = []\n # grab each preference triplet\n for index in indices:\n trajectory_one, trajectory_two, preference_label = self[index]\n trajectories_one.append(np.expand_dims(trajectory_one, axis=0))\n trajectories_two.append(np.expand_dims(trajectory_two, axis=0))\n preference_labels.append(preference_label)\n\n return (np.concatenate(trajectories_one, axis=0), np.concatenate(trajectories_two, axis=0),\n np.concatenate(preference_labels, axis=0))\n\n def _sample_trajectory_segments_uniform(self,\n experience_buffer: TrajectoryReplayBuffer,\n trajectory_count: int,\n mini_batch_size: int) -> t.Tuple[np.ndarray, np.ndarray, t.Optional[np.ndarray]]:\n \"\"\"\n Uniformly sample trajectories and then uniformly sample a segment of the trajectory.\n\n Format and track the state-action pairs from each trajectory segment\n Format and track rewards from each trajectory segment\n\n Combine the formatted state-action pairs and the rewards across trajectory segments\n\n Args:\n experience_buffer: the replay buffer from which trajectory pairs will be drawn\n trajectory_count: the number of trajectories to be sampled from\n mini_batch_size: the number of trajectories to sample\n\n Returns:\n the formatted state-action pairs from random trajectory segments from trajectories\n the rewards from each random trajectory segment\n (optionally) the image observations from each random trajectory segment - only returned when the flag to\n collect image observations in the preference dataset is true and image observations are not\n used to train the reward model\n \"\"\"\n # select the trajectories to be included in this batch of trajectory segments\n trajectory_indices = np.random.choice(trajectory_count, size=mini_batch_size, replace=True)\n\n # accumulate the formatted state-action pairs and rewards from each trajectory segment\n state_action_pairs = []\n rewards = []\n # optionally accumulate image observations\n image_observations = ([] if self.collect_image_pref_dataset and not self.image_observations else None)\n # extract each trajectory and randomly sample a segment\n for traj_index in trajectory_indices:\n # grab the trajectory\n trajectory = experience_buffer.trajectories[traj_index]\n # select a random segment from the trajectory\n traj_segment = trajectory.random_segment(length=self.size_segment)\n # track the rewards associated with the random segment\n rewards.append(np.expand_dims(traj_segment.env_rewards, axis=0))\n # format the state and action based on whether image observations are being used\n if self.image_observations:\n formatted_pair = self.state_action_formatter.format_state_action(\n traj_segment.initial_image_observations,\n traj_segment.actions,\n batch_sa=True)\n else:\n formatted_pair = self.state_action_formatter.format_state_action(\n traj_segment.initial_observations,\n traj_segment.actions,\n batch_sa=True)\n if self.collect_image_pref_dataset:\n image_observations.append(np.expand_dims(traj_segment.initial_image_observations, axis=0))\n # add a dimension in the front so we can concatenate later and the track\n state_action_pairs.append(np.expand_dims(formatted_pair, axis=0))\n return (np.concatenate(state_action_pairs, axis=0),\n np.concatenate(rewards, axis=0),\n (np.concatenate(image_observations, axis=0) if image_observations is not None else None))\n\n @staticmethod\n def get_rank_probability(trajectories_one: np.ndarray, trajectories_two: np.ndarray,\n reward_model: torch.nn.Module):\n \"\"\"\n Compute the preference-prediction disagreement between the ensemble members for each trajectory pair\n\n Args:\n trajectories_one: the trajectories one to be evaluated for ensemble disagreement\n trajectories_two: the trajectories two to be evaluated for ensemble disagreement\n reward_model: the ensemble of networks that will be used to compute disagreement\n \"\"\"\n\n # get probability x_1 > x_2\n probs = []\n for member in range(len(reward_model.ensemble)):\n probs.append(reward_model.p_hat_member(trajectories_one,\n trajectories_two,\n member=member).cpu().numpy())\n probs = np.array(probs)\n\n return np.mean(probs, axis=0), np.std(probs, axis=0)\n\n def get_queries(self, experience_buffer: TrajectoryReplayBuffer, mb_size=20):\n len_traj, max_len = experience_buffer.trajectory_lengths[0], experience_buffer.trajectory_count\n\n # if len(self.experience_buffer.trajectory_lengths[0][-1]) < len_traj:\n # check that the last trajectory contains at least as many transitions as the target segment length\n # we check the last trajectory, because it may be incomplete\n # this is a carry over from the original code. The authors had an assumption that all \"completed\" trajectories\n # will be at least as long as the target segment length\n if experience_buffer.trajectory_lengths[-1] < self.size_segment:\n max_len = max_len - 1\n\n # grab each trajectory, select a random segment from each, format the state-action pairs, and concatenate\n # along the batch dimension\n state_action_pair_traj_one, r_t_1, images_traj_one = self._sample_trajectory_segments_uniform(\n experience_buffer=experience_buffer,\n trajectory_count=max_len,\n mini_batch_size=mb_size)\n state_action_pair_traj_two, r_t_2, images_traj_two = self._sample_trajectory_segments_uniform(\n experience_buffer=experience_buffer,\n trajectory_count=max_len,\n mini_batch_size=mb_size)\n # confirm the image-specific variables are only populated when they should be\n if not self.collect_image_pref_dataset and self.image_observations:\n assert images_traj_one is None and images_traj_two is None\n return state_action_pair_traj_one, state_action_pair_traj_two, r_t_1, r_t_2, images_traj_one, images_traj_two\n\n def put_queries(self, state_action_pair_traj_one: np.ndarray, state_action_pair_traj_two: np.ndarray,\n preference_labels: np.ndarray,\n images_traj_one: t.Optional[np.ndarray] = None, images_traj_two: t.Optional[np.ndarray] = None):\n \"\"\"\n Args:\n state_action_pair_traj_one: the state-action pairs that make up the trajectories one in the queries\n state_action_pair_traj_two: the state-action pairs that make up the trajectories two in the queries\n preference_labels: the preference labels for each pair of trajectories\n images_traj_one: the images for trajectories one\n images_traj_two: the images for trajectories two\n \"\"\"\n # get the number of triplets to be stored\n total_sample = state_action_pair_traj_one.shape[0]\n # write each preference_triplet to disk\n for batch_indx in range(total_sample):\n # get the index of the triplet in the \"buffer\"\n preference_triplet_index = self.buffer_index + batch_indx\n # check if we need to wrap the buffer\n if preference_triplet_index >= self.capacity:\n preference_triplet_index -= self.capacity\n elif not self.buffer_full:\n # this is a previously unseen preference triplet buffer index, so we need to track the triplet location\n self._preference_triplet_tracker.append(self.out_path / f\"preference_triplet_{preference_triplet_index}.npz\")\n # save the preference triplet\n np.savez((self.out_path / f\"preference_triplet_{preference_triplet_index}.npz\").as_posix(),\n trajectory_one=state_action_pair_traj_one[batch_indx],\n trajectory_two=state_action_pair_traj_two[batch_indx],\n preference_label=preference_labels[batch_indx],\n image_trajectory_one=(\n None if images_traj_one is None else images_traj_one[batch_indx]),\n image_trajectory_two=(\n None if images_traj_two is None else images_traj_two[batch_indx]))\n # set the new buffer index\n next_index = self.buffer_index + total_sample\n # check if the buffer has wrapped\n if next_index >= self.capacity:\n self.buffer_full = True\n # wrap the buffer index\n self.buffer_index = next_index - self.capacity\n else:\n self.buffer_index = next_index\n\n def uniform_sampling(self, experience_buffer: TrajectoryReplayBuffer, mb_size: int) -> int:\n \"\"\"\n Grow the preference dataset with preference triplets uniformly sampled from the experience buffer\n\n Args:\n experience_buffer: the replay buffer from which to sample trajectory pairs\n mb_size: target number of preference triplets to add to the preference dataset. Fewer than the target may\n be added depending on the whether labeller skips labelling some trajectories.\n Returns:\n number of preference triplets added to the dataset\n \"\"\"\n # get queries\n sa_t_1, sa_t_2, r_t_1, r_t_2, img_sa_t_1, img_sa_t_2 = self.get_queries(experience_buffer=experience_buffer,\n mb_size=mb_size)\n\n # get labels\n sa_t_1, sa_t_2, r_t_1, r_t_2, labels = self._preference_labeller.get_label(sa_t_1, sa_t_2, r_t_1, r_t_2)\n if len(labels) > 0:\n self.put_queries(sa_t_1, sa_t_2, labels, img_sa_t_1, img_sa_t_2)\n\n return len(labels)\n\n # TODO: refactor to break the circular import that would need to happen in order to specify that reward_model here\n # should be BPref.reward_model.RewardModel\n def disagreement_sampling(self, experience_buffer: TrajectoryReplayBuffer, mb_size: int, large_batch: int,\n reward_model: torch.nn.Module) -> int:\n \"\"\"\n Grow the preference dataset with preference triplets from the experience buffer that the reward ensemble\n disagrees about\n\n Args:\n experience_buffer: the replay buffer from which to sample trajectory pairs\n mb_size: target number of preference triplets to add to the preference dataset. Fewer than the target may\n be added depending on the whether labeller skips labelling some trajectories.\n large_batch: scales up the number of triplets to add to the preference dataset to uniformly select a large\n number of trajectory pairs, which are then pruned based on which ones the reward ensemble\n has the most disagreement over\n reward_model: the ensemble of reward networks that will be used to assess disagreement.\n Should be BPref.reward_model.RewardModel, but cannot import and reference from here right now\n as it would lead to circular imports\n Returns:\n number of preference triplets added to the dataset\n \"\"\"\n # get queries\n sa_t_1, sa_t_2, r_t_1, r_t_2, img_sa_t_1, img_sa_t_2 = self.get_queries(\n experience_buffer=experience_buffer, mb_size=mb_size * large_batch)\n\n # get final queries based on ensemble member disagreement\n _, disagree = self.get_rank_probability(sa_t_1, sa_t_2, reward_model=reward_model)\n top_k_index = (-disagree).argsort()[:mb_size]\n r_t_1, sa_t_1 = r_t_1[top_k_index], sa_t_1[top_k_index]\n r_t_2, sa_t_2 = r_t_2[top_k_index], sa_t_2[top_k_index]\n if img_sa_t_1 is not None:\n img_sa_t_1 = img_sa_t_1[top_k_index]\n img_sa_t_2 = img_sa_t_2[top_k_index]\n\n # get labels\n sa_t_1, sa_t_2, r_t_1, r_t_2, labels = self._preference_labeller.get_label(\n sa_t_1, sa_t_2, r_t_1, r_t_2)\n if len(labels) > 0:\n self.put_queries(sa_t_1, sa_t_2, labels, img_sa_t_1, img_sa_t_2)\n\n return len(labels)\n\n def set_teacher_thres_skip(self, new_margin):\n self._preference_labeller.teacher_thres_skip = new_margin * self._preference_labeller.teacher_eps_skip\n\n def set_teacher_thres_equal(self, new_margin):\n self._preference_labeller.teacher_eps_equal = new_margin * self._preference_labeller.teacher_eps_equal\n\n def save(self, dataset_dir: Path, env_id: str, step: int):\n \"\"\"\n Saves the preference dataset as a zip archive and the labeller configuration as a yaml to the specified location\n\n Args:\n dataset_dir: path where the dataset is to be saved\n env_id: the environment/task within which the data was generated\n step: the number of policy training steps taken to produce this dataset\n \"\"\"\n # create the ZipFile object\n zip_obj = ZipFile(dataset_dir / f\"{env_id}_preference_dataset_{step}.zip\", \"w\")\n # the configuration for the online preference dataset\n config = {\"teacher_params\": {\"teacher_beta\": self._preference_labeller.teacher_beta,\n \"teacher_gamma\": self._preference_labeller.teacher_gamma,\n \"teacher_eps_mistake\": self._preference_labeller.teacher_eps_mistake,\n \"teacher_eps_equal\": self._preference_labeller.teacher_eps_equal,\n \"teacher_eps_skip\": self._preference_labeller.teacher_eps_skip,\n \"teacher_thres_skip\": self._preference_labeller.teacher_thres_skip,\n \"teacher_thres_equal\": self._preference_labeller.teacher_thres_equal,\n \"label_margin\": self._preference_labeller.label_margin,\n \"label_target\": self._preference_labeller.label_target}}\n with open((dataset_dir / f\"preference_dataset_config.yaml\").as_posix(), \"w+\") as f:\n yaml.dump(config, f)\n # write the labeller config to the preference dataset's zip archive\n zip_obj.write(dataset_dir / f\"preference_dataset_config.yaml\")\n\n # add each preference triplet to the zip archive\n for pref_triplet_path in self._preference_triplet_tracker:\n zip_obj.write(pref_triplet_path)\n # move the file from it temp location to the artifact directory\n file_dest_path = dataset_dir / pref_triplet_path.name\n shutil.move(pref_triplet_path, file_dest_path)\n # close the Zip File\n zip_obj.close()" }, { "identifier": "PreferenceTripletEnsembleDataLoader", "path": "reed/data/preference_data_loader.py", "snippet": "class PreferenceTripletEnsembleDataLoader:\n \"\"\"\n Handles loading and generating batches of preference triplets.\n\n The special logic needed is to handle different batch orderings for different networks in the reward ensemble\n \"\"\"\n def __init__(self, dataset: PreferenceDataset, ensemble_size: int,\n batch_size: int = 64, num_workers: int = 0, shuffle: bool = True, device: torch.device = \"cuda\"):\n \"\"\"\n Args:\n\n \"\"\"\n # create a data loader per ensemble network\n self.loader_ensemble = [DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers)\n for _ in range(ensemble_size)]\n\n self.device = device\n\n def _format_batch(self, batch: UNFORMATTED_PREFERENCE_TRIPLET_BATCH) -> FORMATTED_PREFERENCE_TRIPLET_BATCH:\n \"\"\"\n Format the preference batch so that the tensors are longs and on the correct device\n \"\"\"\n return [PreferenceTripletBatch(trajectories_one=member[0].float().to(self.device),\n trajectories_two=member[1].float().to(self.device),\n preference_labels=member[2].long().to(self.device))\n for member in batch]\n\n def dataset_length(self) -> int:\n return len(self.loader_ensemble[0].dataset)\n\n def __iter__(self) -> FORMATTED_PREFERENCE_TRIPLET_BATCH:\n \"\"\"\n Iterate through the preference triplet data loaders and return the batch per ensemble member\n\n Returns:\n list of PreferenceTripletBatch\n \"\"\"\n # set up each loader as an iterator\n iter_loader_ensemble = [iter(loader) for loader in self.loader_ensemble]\n # for each data loader grab the next batch until there are no more batches to grab\n while True:\n # check if there is a next batch to return\n try:\n yield self._format_batch([next(dataloader_iterator) for dataloader_iterator in iter_loader_ensemble])\n except StopIteration:\n break" }, { "identifier": "PreProcessInference", "path": "reed/data/preprocess_images.py", "snippet": "class PreProcessInference:\n \"\"\"\n Preprocess the data for inference by the reward, SSC, and SFC models\n \"\"\"\n def __init__(self,\n image_observations: bool = False,\n grayscale_images: bool = True,\n normalize_images: bool = True,\n environment_id: str = \"dmc\"):\n \"\"\"\n Args:\n image_observations: whether the observations are images\n grayscale_images: whether images observations should be in grayscale\n normalize_images: whether the image observations should be normalized\n environment_id: the environment from which the data is coming\n \"\"\"\n self.image_observations = image_observations\n self.grayscale_images = grayscale_images\n self.normalize_images = normalize_images\n self.environment_id = environment_id\n\n @staticmethod\n def _channel_first_to_last(observation: np.ndarray,\n batch_states: bool = False,\n by_trajectory: bool = False) -> np.ndarray:\n \"\"\"\n Move the channel from the first dimension to the last dimension\n \"\"\"\n if batch_states and by_trajectory:\n return np.transpose(observation, (0, 1, 3, 4, 2))\n elif batch_states:\n return np.transpose(observation, (0, 2, 3, 1))\n else:\n return np.transpose(observation, (1, 2, 0))\n\n @staticmethod\n def _channel_last_to_first(observation: np.ndarray, batch_states: bool = False,\n by_trajectory: bool = False) -> np.ndarray:\n \"\"\"\n Move the channel from the last dimension to the first dimension\n Args:\n observation: the state observations\n batch_states: whether a batch of state is to be processed\n by_trajectory: whether the batch of states is structured by trajectory -> should only be\n True when batch_sa=True\n Returns:\n the image with the channel dimension moved from first to last\n \"\"\"\n # permute the input so that the channels are in the first dimension of the images\n if batch_states and by_trajectory:\n return np.transpose(observation, (0, 1, 4, 2, 3))\n elif batch_states:\n return np.transpose(observation, (0, 3, 1, 2))\n else:\n # permute the input so that the channels are in the first dimension\n obs = np.transpose(observation, (2, 0, 1))\n # add a dimension along the front for concatenation into the buffer\n return np.expand_dims(obs, axis=0)\n\n def format_state(self, obs: np.ndarray, batch_states: bool = False,\n by_trajectory: bool = False, channel_first: bool = False) -> np.ndarray:\n \"\"\"\n Args:\n obs: the state observations\n batch_states: whether a batch of state is to be processed\n by_trajectory: whether the batch of states is structured by trajectory -> should only be\n True when batch_sa=True\n channel_first: whether the channel dimension is first when the observations are images.\n Returns:\n the state-action pairs as a single array\n \"\"\"\n if self.image_observations:\n if channel_first:\n # move the channel dimension from first to last to avoid a bunch of logic in our formatting methods\n # that handles variable locations for the channel dimension\n obs = self._channel_first_to_last(observation=obs,\n batch_states=batch_states,\n by_trajectory=by_trajectory)\n if self.grayscale_images:\n obs = _to_grayscale(observation=obs)\n if self.normalize_images:\n # TODO: add normalization based on pixel mean and standard deviation instead of scaling 0 to 1\n obs = np.divide(obs, 255.)\n # move the channel dimension from first to last\n return self._channel_last_to_first(observation=obs, batch_states=batch_states, by_trajectory=by_trajectory)\n\n else:\n return obs.reshape(1, obs.shape[1:]) if batch_states else obs.reshape(1, obs.shape[0])\n\n def format_state_action(self, obs: np.ndarray, act: np.ndarray,\n batch_sa: bool = False, by_trajectory: bool = False,\n channel_first: bool = False) -> np.ndarray:\n \"\"\"\n Args:\n obs: the state observations\n act: the actions associated with each state observation\n batch_sa: whether a batch of state-action pairs is to be processed\n by_trajectory: whether the batch of state-action pairs is structured by trajectory -> should only be\n True when batch_sa=True\n channel_first: whether the channel dimension is first when the observations are images.\n Returns:\n the state-action pairs as a single array\n \"\"\"\n if self.image_observations:\n if channel_first:\n # move the channel dimension from first to last to avoid a bunch of logic in our formatting methods\n # that handles variable locations for the channel dimension\n obs = self._channel_first_to_last(observation=obs,\n batch_states=batch_sa,\n by_trajectory=by_trajectory)\n if self.grayscale_images:\n obs = _to_grayscale(observation=obs)\n if self.normalize_images:\n # TODO: add normalization based on pixel mean and standard deviation instead of scaling 0 to 1\n obs = np.divide(obs, 255.)\n\n # get the dimensions of the image\n obs_dim = obs.shape[-3:]\n assert len(obs_dim) == 3\n # add the actions to the image channels and permute the input so that the channels are in the first\n # dimension of the images\n if batch_sa and by_trajectory:\n repeated_actions = np.tile(act.reshape((act.shape[0], act.shape[1], 1, 1, act.shape[-1])),\n (1, 1, obs_dim[0], obs_dim[1], 1))\n elif batch_sa:\n repeated_actions = np.tile(act.reshape((act.shape[0], 1, 1, act.shape[-1])),\n (1, obs_dim[0], obs_dim[1], 1))\n else:\n repeated_actions = np.tile(act.reshape((1, 1, -1)), (obs_dim[0], obs_dim[1], 1))\n sa_t = np.concatenate((obs, repeated_actions), axis=-1)\n return self._channel_last_to_first(sa_t, batch_states=batch_sa, by_trajectory=by_trajectory)\n else:\n sa_t = np.concatenate([obs, act], axis=-1)\n if batch_sa:\n return sa_t\n else:\n return sa_t.reshape(1, -1)" } ]
import typing as t import time import numpy as np import torch import hydra from pathlib import Path from omegaconf import dictconfig, OmegaConf from BPref import utils from BPref.logger import Logger from BPref.replay_buffer import TrajectoryReplayBuffer from collections import deque from reed.models.reward_model import StateActionRewardModel from reed.data.preference_dataset import PreferenceDataset from reed.data.preference_data_loader import PreferenceTripletEnsembleDataLoader from reed.data.preprocess_images import PreProcessInference
21,223
Returns: the dimensionality of reward's observation space """ if self.experiment_config.reward_from_image_observations: # get a sample image rendering of the environment and get its shape self.env.reset() if "metaworld" in self.experiment_config.env: start_time = time.time() img_obs = self.env.render(camera_name=self.experiment_config.camera_name, resolution=( self.experiment_config.image_height, self.experiment_config.image_width)) end_time = time.time() print(f"Sample render time for metaworld is {end_time - start_time} seconds") else: start_time = time.time() img_obs = self.env.render(mode="rgb_array", height=self.experiment_config.image_height, width=self.experiment_config.image_width) end_time = time.time() print(f"Sample render time for DMC is {end_time - start_time} seconds") observation_space = img_obs.shape print("--------------------------") print("--------------------------") print("--------------------------") print("image observation shape", observation_space) print("--------------------------") print("--------------------------") print("--------------------------") else: observation_space = self.env.observation_space.shape[0] return observation_space def _render_image_observation(self) -> np.ndarray: """ Render the current image observation """ if "metaworld" in self.experiment_config.env: img_obs = self.env.render(camera_name=self.experiment_config.camera_name, resolution=( self.experiment_config.image_height, self.experiment_config.image_width)) else: img_obs = self.env.render(mode="rgb_array", height=self.experiment_config.image_height, width=self.experiment_config.image_width) return img_obs def construct_reward_ensemble(self) -> StateActionRewardModel: """ Create the reward ensemble as specified in the experiment config. """ return StateActionRewardModel( in_dim=self.reward_in_dim, ensemble_size=self.experiment_config.ensemble_size, hidden_dim=self.experiment_config.reward_hidden_embed_dim, hidden_layers=self.experiment_config.reward_num_hidden_layers, final_activation=self.experiment_config.activation, lr=self.experiment_config.reward_lr, reward_train_batch=self.experiment_config.reward_train_batch, size_segment=self.experiment_config.segment_size, device=self.device, multi_gpu=self.multi_gpu, image_observations=self.experiment_config.reward_from_image_observations, image_encoder_architecture=self.experiment_config.image_encoder_architecture, image_hidden_num_channels=self.experiment_config.image_hidden_num_channels, grayscale_images=self.experiment_config.grayscale_images ) def evaluate(self): average_episode_reward = 0 average_true_episode_reward = 0 success_rate = 0 for episode in range(self.experiment_config.num_eval_episodes): obs = self.env.reset() self.agent.reset() done = False episode_reward = 0 true_episode_reward = 0 if self.log_success: episode_success = 0 while not done: with utils.eval_mode(self.agent): action = self.agent.act(obs, sample=False) obs, reward, done, extra = self.env.step(action) episode_reward += reward true_episode_reward += reward if self.log_success: episode_success = max(episode_success, extra['success']) average_episode_reward += episode_reward average_true_episode_reward += true_episode_reward if self.log_success: success_rate += episode_success average_episode_reward /= self.experiment_config.num_eval_episodes average_true_episode_reward /= self.experiment_config.num_eval_episodes if self.log_success: success_rate /= self.experiment_config.num_eval_episodes success_rate *= 100.0 self.logger.log('eval/episode_reward', average_episode_reward, self.step) self.logger.log('eval/true_episode_reward', average_true_episode_reward, self.step) if self.log_success: self.logger.log('eval/success_rate', success_rate, self.step) self.logger.log('train/true_episode_success', success_rate, self.step) self.logger.dump(self.step) def train_reward_on_preferences(self) -> t.Optional[float]: """ Update the reward model on the current preference dataset Returns: train accuracy on the current reward model update round, if the preference dataset contains samples """ # create the data loader that will be used to train the reward model
# # For licensing see accompanying LICENSE file. # Copyright (C) 2023 Apple Inc. All Rights Reserved. # class PEBBLE: """ Train a reward model in conjunction with policy training following the PEBBLE algorithm from (Lee et al. 2021) """ def __init__(self, experiment_config: dictconfig.DictConfig): """ Args: experiment_config: contains the configuration for the experiment to be run. Access like a dictionry """ # track the experimental configuration self.experiment_config = experiment_config # create the logger to track policy learning progress self.logger = Logger( self.experiment_config.out_dir, save_tb=self.experiment_config.log_save_tb, log_frequency=self.experiment_config.log_frequency, agent=self.experiment_config.agent.name) # used to track where we are in training # total amount of feedback the reward model has solicited self.total_feedback = 0 # total amount of feedback given to the reward model self.labeled_feedback = 0 # policy train step self.step = 0 # we need to set the random seed for replication purposes utils.set_seed_everywhere(self.experiment_config.seed) # the device on which models will be trained self.device = torch.device(self.experiment_config.device) # flag to make sure we are handling multi-gpu training where we need to self.multi_gpu = torch.cuda.device_count() > 1 print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") print(f"There is {torch.cuda.device_count()} GPU, so models will be trained with torch.nn.DataParallel.") print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") # make the environment if 'metaworld' in self.experiment_config.env: self.env = utils.make_metaworld_env(self.experiment_config) # we are not evaluating a domain where we need to log whether an agent has reached a goal state self.log_success = True else: self.env = utils.make_env(self.experiment_config) # we are not evaluating a domain where we need to log whether an agent has reached a goal state self.log_success = False print('----------------------') print('----------------------') print('----------------------') print('----------------------') print("observation space ", self.env.observation_space.shape[0]) print("action space ", self.env.action_space.shape[0]) print('----------------------') print('----------------------') print('----------------------') print('----------------------') # we need to set the policy's observation and action space self.experiment_config.agent.params.obs_dim = self.env.observation_space.shape[0] self.experiment_config.agent.params.action_dim = self.env.action_space.shape[0] self.experiment_config.agent.params.action_range = [ float(self.env.action_space.low.min()), float(self.env.action_space.high.max()) ] # create the agent specified in the configuration self.agent = hydra.utils.instantiate(self.experiment_config.agent) # the class that will format the observations and observation action pairs for consumption by the reward model self._reward_input_preprocessor = PreProcessInference( image_observations=self.experiment_config.reward_from_image_observations, grayscale_images=self.experiment_config.grayscale_images, normalize_images=self.experiment_config.normalized_images) # determine the reward's observation space # if the reward is trained on images then the reward's observation space differs from the policy's, which is # trained on the state space self._observation_dimensionality = self._determine_observation_dimensions() self._reward_observation_dimensionality = self._determine_reward_observation_dimensions() # create the agent's replay buffer setting if image observations will need to be tracked self.replay_buffer = TrajectoryReplayBuffer( int(self.experiment_config.replay_buffer_capacity), self.device, image_observations=(self._observation_dimensionality if (self.experiment_config.reward_from_image_observations or self.experiment_config.save_image_observations) else None) ) # determine the dimensionality of the input to the reward function self.reward_in_dim = self._determine_reward_input_dimensions( observation_dim=self._reward_observation_dimensionality, action_dim=self.env.action_space.shape[0]) # instantiating the reward model self.reward_model = self.construct_reward_ensemble() # create the preference dataset that will solicit and hold labelled preference triplets self.preference_dataset = PreferenceDataset( observation_dim=self._reward_observation_dimensionality, action_dim=self.env.action_space.shape[0], capacity=self.experiment_config.preference_dataset_capacity, size_segment=self.experiment_config.segment_size, out_path=Path("/tmp/preference_dataset/"), image_observations=self.experiment_config.reward_from_image_observations, state_action_formatter=self._reward_input_preprocessor, grayscale_images=self.experiment_config.grayscale_images, collect_image_pref_dataset=self.experiment_config.save_image_observations, teacher_beta=self.experiment_config.teacher_beta, teacher_gamma=self.experiment_config.teacher_gamma, teacher_eps_mistake=self.experiment_config.teacher_eps_mistake, teacher_eps_skip=self.experiment_config.teacher_eps_skip, teacher_eps_equal=self.experiment_config.teacher_eps_equal ) # save the experimental configuration with open(Path(self.experiment_config.out_dir) / "experiment_config.yaml", "w+") as f: OmegaConf.save(config=self.experiment_config, f=f) def _determine_reward_input_dimensions(self, observation_dim: t.Union[int, np.ndarray], action_dim: int) -> t.Union[int, t.Sequence]: """ Determine the dimensionality of the inputs to the reward model Args: observation_dim: the dimensionality of agent observations. If the observation is an image, the dimensionality should have the following order: (num_channels, height, width) action_dim: the dimensionality of agent actions Returns: the dimensionality of the reward model's inputs """ # compute the dimensions of the input to the reward function if not self.experiment_config.reward_from_image_observations: return observation_dim + action_dim else: # we need to concatenate the actions to last dimension of the image # the input to the reward net also needs to have the channels first # the image dimensions are given to us a (height, width, channels) sample_shape = list(observation_dim) if self.experiment_config.grayscale_images: num_channels = action_dim + 1 else: num_channels = sample_shape[0] + action_dim # update the number of channels sample_shape[0] = num_channels # the dimensions of the input to the reward model return sample_shape def _determine_reward_observation_dimensions(self) -> t.Union[int, np.ndarray]: """ Check if the reward will use the image observations. If so the reward input shape needs to be set accordingly Returns: the dimensionality of reward's observation space """ if self.experiment_config.reward_from_image_observations: # get a sample image rendering of the environment and get its shape self.env.reset() if "metaworld" in self.experiment_config.env: start_time = time.time() img_obs = self.env.render(camera_name=self.experiment_config.camera_name, resolution=( self.experiment_config.image_height, self.experiment_config.image_width)) end_time = time.time() print(f"Sample render time for metaworld is {end_time - start_time} seconds") else: start_time = time.time() img_obs = self.env.render(mode="rgb_array", height=self.experiment_config.image_height, width=self.experiment_config.image_width) end_time = time.time() print(f"Sample render time for DMC is {end_time - start_time} seconds") formatted_image_observation = self._reward_input_preprocessor.format_state(img_obs).squeeze(axis=0) observation_space = formatted_image_observation.shape print("--------------------------") print("--------------------------") print("--------------------------") print("image observation shape", observation_space) print("--------------------------") print("--------------------------") print("--------------------------") else: observation_space = self.env.observation_space.shape[0] return observation_space def _determine_observation_dimensions(self) -> t.Union[int, np.ndarray]: """ Check if the reward will use the image observations. If so the replay buffer needs to be set up to accumulate the image observations Returns: the dimensionality of reward's observation space """ if self.experiment_config.reward_from_image_observations: # get a sample image rendering of the environment and get its shape self.env.reset() if "metaworld" in self.experiment_config.env: start_time = time.time() img_obs = self.env.render(camera_name=self.experiment_config.camera_name, resolution=( self.experiment_config.image_height, self.experiment_config.image_width)) end_time = time.time() print(f"Sample render time for metaworld is {end_time - start_time} seconds") else: start_time = time.time() img_obs = self.env.render(mode="rgb_array", height=self.experiment_config.image_height, width=self.experiment_config.image_width) end_time = time.time() print(f"Sample render time for DMC is {end_time - start_time} seconds") observation_space = img_obs.shape print("--------------------------") print("--------------------------") print("--------------------------") print("image observation shape", observation_space) print("--------------------------") print("--------------------------") print("--------------------------") else: observation_space = self.env.observation_space.shape[0] return observation_space def _render_image_observation(self) -> np.ndarray: """ Render the current image observation """ if "metaworld" in self.experiment_config.env: img_obs = self.env.render(camera_name=self.experiment_config.camera_name, resolution=( self.experiment_config.image_height, self.experiment_config.image_width)) else: img_obs = self.env.render(mode="rgb_array", height=self.experiment_config.image_height, width=self.experiment_config.image_width) return img_obs def construct_reward_ensemble(self) -> StateActionRewardModel: """ Create the reward ensemble as specified in the experiment config. """ return StateActionRewardModel( in_dim=self.reward_in_dim, ensemble_size=self.experiment_config.ensemble_size, hidden_dim=self.experiment_config.reward_hidden_embed_dim, hidden_layers=self.experiment_config.reward_num_hidden_layers, final_activation=self.experiment_config.activation, lr=self.experiment_config.reward_lr, reward_train_batch=self.experiment_config.reward_train_batch, size_segment=self.experiment_config.segment_size, device=self.device, multi_gpu=self.multi_gpu, image_observations=self.experiment_config.reward_from_image_observations, image_encoder_architecture=self.experiment_config.image_encoder_architecture, image_hidden_num_channels=self.experiment_config.image_hidden_num_channels, grayscale_images=self.experiment_config.grayscale_images ) def evaluate(self): average_episode_reward = 0 average_true_episode_reward = 0 success_rate = 0 for episode in range(self.experiment_config.num_eval_episodes): obs = self.env.reset() self.agent.reset() done = False episode_reward = 0 true_episode_reward = 0 if self.log_success: episode_success = 0 while not done: with utils.eval_mode(self.agent): action = self.agent.act(obs, sample=False) obs, reward, done, extra = self.env.step(action) episode_reward += reward true_episode_reward += reward if self.log_success: episode_success = max(episode_success, extra['success']) average_episode_reward += episode_reward average_true_episode_reward += true_episode_reward if self.log_success: success_rate += episode_success average_episode_reward /= self.experiment_config.num_eval_episodes average_true_episode_reward /= self.experiment_config.num_eval_episodes if self.log_success: success_rate /= self.experiment_config.num_eval_episodes success_rate *= 100.0 self.logger.log('eval/episode_reward', average_episode_reward, self.step) self.logger.log('eval/true_episode_reward', average_true_episode_reward, self.step) if self.log_success: self.logger.log('eval/success_rate', success_rate, self.step) self.logger.log('train/true_episode_success', success_rate, self.step) self.logger.dump(self.step) def train_reward_on_preferences(self) -> t.Optional[float]: """ Update the reward model on the current preference dataset Returns: train accuracy on the current reward model update round, if the preference dataset contains samples """ # create the data loader that will be used to train the reward model
preference_data_loader = PreferenceTripletEnsembleDataLoader(
5
2023-11-06 23:14:20+00:00
24k
alibaba/animate-anything
train.py
[ { "identifier": "VideoJsonDataset", "path": "utils/dataset.py", "snippet": "class VideoJsonDataset(Dataset):\n def __init__(\n self,\n tokenizer=None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 16,\n fps: int = 8,\n video_dir: str = \"./data\",\n video_json: str = \"\",\n fallback_prompt: str = \"\",\n use_bucketing: bool = False,\n cache_latents = False,\n motion_threshold = 50,\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.use_bucketing = use_bucketing\n\n self.fallback_prompt = fallback_prompt\n self.video_dir = video_dir\n self.video_files = json.load(open(video_json))\n\n self.width = width\n self.height = height\n\n self.n_sample_frames = n_sample_frames\n self.fps = fps\n self.cache_latents = cache_latents\n self.motion_threshold = motion_threshold\n self.transform = T.Compose([\n #T.RandomResizedCrop(size=(height, width), scale=(0.8, 1.0), ratio=(width/height, width/height), antialias=False),\n T.Resize(min(height, width), antialias=False),\n T.CenterCrop([height, width])\n ])\n\n\n def get_frame_buckets(self, vr):\n _, h, w = vr[0].shape \n width, height = sensible_buckets(self.width, self.height, h, w)\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n\n \n @staticmethod\n def __getname__(): return 'video_json'\n\n def __len__(self):\n return len(self.video_files)\n\n def __getitem__(self, index):\n mask = None\n try:\n item = self.video_files[index]\n video_path = os.path.join(self.video_dir, item['video'])\n cache_path = os.path.splitext(video_path)[0] + '.pt'\n if self.cache_latents and os.path.exists(cache_path):\n return torch.load(cache_path, map_location='cpu')\n\n prompt = item['caption']\n if self.fallback_prompt == \"<no_text>\":\n prompt = \"\"\n vr = decord.VideoReader(video_path)\n video = get_frame_batch(self.n_sample_frames, self.fps, vr, self.transform)\n except Exception as err:\n print(\"read video error\", err, video_path)\n return self.__getitem__(index+1)\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n example = {\n \"pixel_values\": normalize_input(video), \n \"prompt_ids\": prompt_ids, \n \"text_prompt\": prompt, \n 'cache_path': cache_path,\n 'dataset': self.__getname__()\n }\n mask = get_moved_area_mask(video.permute([0,2,3,1]).numpy())\n example['motion'] = calculate_motion_score(video.permute([0,2,3,1]).numpy())\n if example['motion'] < self.motion_threshold:\n return self.__getitem__(random.randint(0, len(self)-1))\n return example" }, { "identifier": "SingleVideoDataset", "path": "utils/dataset.py", "snippet": "class SingleVideoDataset(Dataset):\n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 4,\n frame_step: int = 1,\n single_video_path: str = \"\",\n single_video_prompt: str = \"\",\n use_caption: bool = False,\n use_bucketing: bool = False,\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.use_bucketing = use_bucketing\n self.frames = []\n self.index = 1\n\n self.vid_types = (\".mp4\", \".avi\", \".mov\", \".webm\", \".flv\", \".mjpeg\")\n self.n_sample_frames = n_sample_frames\n self.frame_step = frame_step\n\n self.single_video_path = single_video_path\n self.single_video_prompt = single_video_prompt\n\n self.width = width\n self.height = height\n def create_video_chunks(self):\n # Create a list of frames separated by sample frames\n # [(1,2,3), (4,5,6), ...]\n vr = decord.VideoReader(self.single_video_path)\n vr_range = range(1, len(vr), self.frame_step)\n\n self.frames = list(self.chunk(vr_range, self.n_sample_frames))\n\n # Delete any list that contains an out of range index.\n for i, inner_frame_nums in enumerate(self.frames):\n for frame_num in inner_frame_nums:\n if frame_num > len(vr):\n print(f\"Removing out of range index list at position: {i}...\")\n del self.frames[i]\n\n return self.frames\n\n def chunk(self, it, size):\n it = iter(it)\n return iter(lambda: tuple(islice(it, size)), ())\n\n def get_frame_batch(self, vr, resize=None):\n index = self.index\n frames = vr.get_batch(self.frames[self.index])\n video = rearrange(frames, \"f h w c -> f c h w\")\n\n if resize is not None: video = resize(video)\n return video\n\n def get_frame_buckets(self, vr):\n _, h, w = vr[0].shape \n width, height = sensible_buckets(self.width, self.height, h, w)\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n \n def process_video_wrapper(self, vid_path):\n video, vr = process_video(\n vid_path,\n self.use_bucketing,\n self.width, \n self.height, \n self.get_frame_buckets, \n self.get_frame_batch\n )\n \n return video, vr \n\n def single_video_batch(self, index):\n train_data = self.single_video_path\n self.index = index\n\n if train_data.endswith(self.vid_types):\n video, _ = self.process_video_wrapper(train_data)\n\n prompt = self.single_video_prompt\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return video, prompt, prompt_ids\n else:\n raise ValueError(f\"Single video is not a video type. Types: {self.vid_types}\")\n \n @staticmethod\n def __getname__(): return 'single_video'\n\n def __len__(self):\n \n return len(self.create_video_chunks())\n\n def __getitem__(self, index):\n\n video, prompt, prompt_ids = self.single_video_batch(index)\n\n example = {\n \"pixel_values\": normalize_input(video),\n \"prompt_ids\": prompt_ids,\n \"text_prompt\": prompt,\n 'dataset': self.__getname__()\n }\n\n return example" }, { "identifier": "ImageDataset", "path": "utils/dataset.py", "snippet": "class ImageDataset(Dataset):\n \n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n base_width: int = 256,\n base_height: int = 256,\n use_caption: bool = False,\n image_dir: str = '',\n single_img_prompt: str = '',\n use_bucketing: bool = False,\n fallback_prompt: str = '',\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.img_types = (\".png\", \".jpg\", \".jpeg\", '.bmp')\n self.use_bucketing = use_bucketing\n #self.image_dir = self.get_images_list(image_dir)\n self.image_dir_path = image_dir\n self.image_dir = json.load(open(kwargs['image_json']))\n self.fallback_prompt = fallback_prompt\n\n self.use_caption = use_caption\n self.single_img_prompt = single_img_prompt\n\n self.width = width\n self.height = height\n\n def get_images_list(self, image_dir):\n if os.path.exists(image_dir):\n imgs = [x for x in os.listdir(image_dir) if x.endswith(self.img_types)]\n full_img_dir = []\n\n for img in imgs: \n full_img_dir.append(f\"{image_dir}/{img}\")\n\n return sorted(full_img_dir)\n\n return ['']\n\n def image_batch(self, index):\n train_data = self.image_dir[index]\n img, prompt = train_data['image'], train_data['caption']\n img = os.path.join(self.image_dir_path, img)\n try:\n img = torchvision.io.read_image(img, mode=torchvision.io.ImageReadMode.RGB)\n except:\n img = T.transforms.PILToTensor()(Image.open(img).convert(\"RGB\"))\n\n width = self.width\n height = self.height\n\n if self.use_bucketing:\n _, h, w = img.shape\n width, height = sensible_buckets(width, height, w, h)\n \n resize = T.transforms.Resize((height, width), antialias=True)\n\n img = resize(img) \n img = repeat(img, 'c h w -> f c h w', f=1)\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return img, prompt, prompt_ids\n\n @staticmethod\n def __getname__(): return 'image'\n \n def __len__(self):\n # Image directory\n return len(self.image_dir)\n\n def __getitem__(self, index):\n img, prompt, prompt_ids = self.image_batch(index)\n example = {\n \"pixel_values\": normalize_input(img),\n \"frames\": img,\n \"prompt_ids\": prompt_ids,\n \"text_prompt\": prompt, \n 'dataset': self.__getname__()\n }\n\n return example" }, { "identifier": "VideoFolderDataset", "path": "utils/dataset.py", "snippet": "class VideoFolderDataset(Dataset):\n def __init__(\n self,\n tokenizer=None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 16,\n fps: int = 8,\n path: str = \"./data\",\n fallback_prompt: str = \"\",\n use_bucketing: bool = False,\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.use_bucketing = use_bucketing\n\n self.fallback_prompt = fallback_prompt\n\n self.video_files = glob(f\"{path}/*.mp4\")\n\n self.width = width\n self.height = height\n\n self.n_sample_frames = n_sample_frames\n self.fps = fps\n\n def get_frame_buckets(self, vr):\n _, h, w = vr[0].shape \n width, height = sensible_buckets(self.width, self.height, h, w)\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n\n def get_frame_batch(self, vr, resize=None):\n n_sample_frames = self.n_sample_frames\n native_fps = vr.get_avg_fps()\n \n every_nth_frame = max(1, round(native_fps / self.fps))\n every_nth_frame = min(len(vr), every_nth_frame)\n \n effective_length = len(vr) // every_nth_frame\n if effective_length < n_sample_frames:\n n_sample_frames = effective_length\n raise RuntimeError(\"not enough frames\")\n\n effective_idx = random.randint(0, (effective_length - n_sample_frames))\n idxs = every_nth_frame * np.arange(effective_idx, effective_idx + n_sample_frames)\n\n video = vr.get_batch(idxs)\n video = rearrange(video, \"f h w c -> f c h w\")\n\n if resize is not None: video = resize(video)\n return video, vr\n \n def process_video_wrapper(self, vid_path):\n video, vr = process_video(\n vid_path,\n self.use_bucketing,\n self.width, \n self.height, \n self.get_frame_buckets, \n self.get_frame_batch\n )\n return video, vr\n \n @staticmethod\n def __getname__(): return 'folder'\n\n def __len__(self):\n return len(self.video_files)\n\n def __getitem__(self, index):\n try:\n video, _ = self.process_video_wrapper(self.video_files[index])\n except Exception as err:\n print(\"read video error\", self.video_files[index])\n video, _ = self.process_video_wrapper(self.video_files[index+1])\n\n if os.path.exists(self.video_files[index].replace(\".mp4\", \".txt\")):\n with open(self.video_files[index].replace(\".mp4\", \".txt\"), \"r\") as f:\n lines = f.readlines()\n prompt = random.choice(lines)\n else:\n prompt = self.fallback_prompt\n\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return {\"pixel_values\": normalize_input(video[0]), \"frames\": video[0],\n \"prompt_ids\": prompt_ids, \"text_prompt\": prompt, 'dataset': self.__getname__()}" }, { "identifier": "CachedDataset", "path": "utils/dataset.py", "snippet": "class CachedDataset(Dataset):\n def __init__(self,cache_dir: str = ''):\n self.cache_dir = cache_dir\n self.cached_data_list = self.get_files_list()\n\n def get_files_list(self):\n tensors_list = [f\"{self.cache_dir}/{x}\" for x in os.listdir(self.cache_dir) if x.endswith('.pt')]\n return sorted(tensors_list)\n\n def __len__(self):\n return len(self.cached_data_list)\n\n def __getitem__(self, index):\n cached_latent = torch.load(self.cached_data_list[index], map_location='cuda:0')\n return cached_latent" }, { "identifier": "VideoBLIPDataset", "path": "utils/dataset.py", "snippet": "class VideoBLIPDataset(Dataset):\n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 4,\n sample_start_idx: int = 1,\n fps: int = 1,\n json_path: str =\"\",\n json_data = None,\n vid_data_key: str = \"video_path\",\n preprocessed: bool = False,\n use_bucketing: bool = False,\n cache_latents: bool = False,\n motion_threshold = 50,\n **kwargs\n ):\n self.vid_types = (\".mp4\", \".avi\", \".mov\", \".webm\", \".flv\", \".mjpeg\")\n self.use_bucketing = use_bucketing\n self.tokenizer = tokenizer\n self.preprocessed = preprocessed\n \n self.vid_data_key = vid_data_key\n self.train_data = self.load_from_json(json_path, json_data)\n self.cache_latents = cache_latents\n self.motion_threshold = motion_threshold\n self.width = width\n self.height = height\n\n self.n_sample_frames = n_sample_frames\n self.sample_start_idx = sample_start_idx\n self.fps = fps\n self.transform = T.Compose([\n #T.RandomResizedCrop(size=(height, width), scale=(0.8, 1.0), ratio=(width/height, width/height), antialias=False)\n T.Resize(min(height, width), antialias=False),\n T.CenterCrop([height, width])\n ])\n\n def build_json(self, json_data):\n extended_data = []\n for data in json_data['data']:\n for nested_data in data['data']:\n self.build_json_dict(\n data, \n nested_data, \n extended_data\n )\n json_data = extended_data\n return json_data\n\n def build_json_dict(self, data, nested_data, extended_data):\n clip_path = nested_data['clip_path'] if 'clip_path' in nested_data else None\n \n extended_data.append({\n self.vid_data_key: data[self.vid_data_key],\n 'frame_index': nested_data['frame_index'],\n 'prompt': nested_data['prompt'],\n 'clip_path': clip_path\n })\n \n def load_from_json(self, path, json_data):\n try:\n with open(path) as jpath:\n print(f\"Loading JSON from {path}\")\n json_data = json.load(jpath)\n\n return self.build_json(json_data)\n\n except:\n import traceback\n traceback.print_exc()\n self.train_data = []\n print(\"Non-existant JSON path. Skipping.\")\n \n def validate_json(self, base_path, path):\n return os.path.exists(f\"{base_path}/{path}\")\n\n def get_frame_buckets(self, vr):\n _, h, w = vr[0].shape \n width, height = sensible_buckets(self.width, self.height, h, w)\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n\n def train_data_batch(self, index):\n vid_data = self.train_data[index]\n # Get video prompt\n prompt = vid_data['prompt']\n # If we are training on individual clips.\n if 'clip_path' in self.train_data[index] and \\\n self.train_data[index]['clip_path'] is not None:\n clip_path = vid_data['clip_path']\n else:\n clip_path = vid_data[self.vid_data_key]\n # Get the frame of the current index.\n self.sample_start_idx = vid_data['frame_index']\n cache_path = os.path.splitext(clip_path)[0] + '.pt'\n if self.cache_latents and os.path.exists(cache_path):\n return torch.load(cache_path, map_location='cpu')\n\n vr = decord.VideoReader(clip_path)\n video = get_frame_batch(self.n_sample_frames, self.fps, vr, self.transform)\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n example = {\n \"pixel_values\": normalize_input(video),\n \"prompt_ids\": prompt_ids,\n \"text_prompt\": prompt,\n 'dataset': self.__getname__(),\n 'cache_path': cache_path,\n }\n mask = get_moved_area_mask(video.permute([0,2,3,1]).numpy())\n example['mask'] = mask\n example['motion'] = calculate_motion_score(video.permute([0,2,3,1]).numpy())\n return example\n \n\n @staticmethod\n def __getname__(): return 'video_blip'\n\n def __len__(self):\n if self.train_data is not None:\n return len(self.train_data)\n else: \n return 0\n\n def __getitem__(self, index):\n example = self.train_data_batch(index)\n if example['motion'] < self.motion_threshold:\n return self.__getitem__(random.randint(0, len(self)-1))\n return example" }, { "identifier": "UNet3DConditionModel", "path": "models/unet_3d_condition_mask.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n r\"\"\"\n UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep\n and returns sample shaped output.\n\n This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library\n implements for all the models (such as downloading or saving, etc.)\n\n Parameters:\n sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):\n Height and width of input/output sample.\n in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.\n out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.\n down_block_types (`Tuple[str]`, *optional*, defaults to `(\"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"DownBlock2D\")`):\n The tuple of downsample blocks to use.\n up_block_types (`Tuple[str]`, *optional*, defaults to `(\"UpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\",)`):\n The tuple of upsample blocks to use.\n block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):\n The tuple of output channels for each block.\n layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.\n downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.\n mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.\n act_fn (`str`, *optional*, defaults to `\"silu\"`): The activation function to use.\n norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.\n If `None`, it will skip the normalization and activation layers in post-processing\n norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.\n cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.\n attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.\n \"\"\"\n\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n up_block_types: Tuple[str] = (\"UpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\"),\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1024,\n attention_head_dim: Union[int, Tuple[int]] = 64,\n motion_mask = False,\n motion_strength = False,\n ):\n super().__init__()\n self.motion_mask = motion_mask\n self.motion_strength = motion_strength\n print(f\"motion mask {self.motion_mask}, motion_strength {self.motion_strength}\")\n self.sample_size = sample_size\n self.gradient_checkpointing = False\n # Check inputs\n if len(down_block_types) != len(up_block_types):\n raise ValueError(\n f\"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.\"\n )\n\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_out_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n self.conv_in2 = nn.Conv2d(\n 5, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n time_embed_dim = block_out_channels[0] * 4\n self.time_proj = Timesteps(block_out_channels[0], True, 0)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n cond_proj_dim=block_out_channels[0],\n )\n\n self.motion_proj = Timesteps(block_out_channels[0], True, 0)\n self.motion_embedding = nn.Sequential(\n nn.Linear(timestep_input_dim, time_embed_dim), nn.SiLU(),\n nn.Linear(time_embed_dim, time_embed_dim))\n nn.init.zeros_(self.motion_embedding[-1].weight)\n nn.init.zeros_(self.motion_embedding[-1].bias)\n\n self.transformer_in = TransformerTemporalModel(\n num_attention_heads=8,\n attention_head_dim=attention_head_dim,\n in_channels=block_out_channels[0],\n num_layers=1,\n )\n\n # class embedding\n self.down_blocks = nn.ModuleList([])\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=False,\n )\n self.down_blocks.append(down_block)\n\n # mid\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=False,\n )\n # count how many layers upsample the images\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=False,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n if norm_num_groups is not None:\n self.conv_norm_out = nn.GroupNorm(\n num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps\n )\n self.conv_act = nn.SiLU()\n else:\n self.conv_norm_out = None\n self.conv_act = None\n\n conv_out_padding = (conv_out_kernel - 1) // 2\n self.conv_out = nn.Conv2d(\n block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding\n )\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, value=False):\n self.gradient_checkpointing = value\n self.mid_block.gradient_checkpointing = value\n for module in self.down_blocks + self.up_blocks:\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value \n \n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n condition_latent: torch.Tensor,\n mask: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n motion = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, num_frames, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet3DConditionOutput`] instead of a plain tuple.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\n\n Returns:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n sample = torch.cat([condition_latent, sample], dim=2)\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n num_frames = sample.shape[2]\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n if self.motion_strength and motion is not None:\n timestep_cond = self.motion_proj(motion).to(dtype=self.dtype)\n emb = self.time_embedding(t_emb, timestep_cond)\n #emb += self.motion_embedding(m_emb)\n else:\n emb = self.time_embedding(t_emb, timestep_cond)\n emb = emb.repeat_interleave(repeats=num_frames, dim=0)\n encoder_hidden_states = encoder_hidden_states.repeat_interleave(repeats=num_frames, dim=0)\n\n # 2. pre-process\n if self.motion_mask and mask is not None:\n mask = repeat(mask , 'b 1 1 h w -> (t b) 1 f h w', t=sample.shape[0]//mask.shape[0], f=sample.shape[2])\n sample = torch.cat([mask, sample], dim=1)\n sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:])\n sample = self.conv_in2(sample)\n else:\n sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:])\n sample = self.conv_in(sample)\n\n if num_frames > 1:\n if self.gradient_checkpointing:\n sample = transformer_g_c(self.transformer_in, sample, num_frames)\n else:\n sample = self.transformer_in(sample, num_frames=num_frames).sample\n\n # 3. down\n down_block_res_samples = (sample,)\n for i, downsample_block in enumerate(self.down_blocks):\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames)\n \n down_block_res_samples += res_samples\n\n if down_block_additional_residuals is not None:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n\n if mid_block_additional_residual is not None:\n sample = sample + mid_block_additional_residual\n\n # 5. up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n num_frames=num_frames,\n )\n\n # 6. post-process\n if self.conv_norm_out:\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n\n sample = self.conv_out(sample)\n\n # reshape to (batch, channel, framerate, width, height)\n sample = sample[None, :].reshape((-1, num_frames) + sample.shape[1:]).permute(0, 2, 1, 3, 4)\n sample = sample[:,:,1:]\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)" }, { "identifier": "LatentToVideoPipeline", "path": "models/pipeline.py", "snippet": "class LatentToVideoPipeline(TextToVideoSDPipeline):\n @torch.no_grad()\n def __call__(\n self,\n prompt = None,\n height= None,\n width= None,\n num_frames: int = 16,\n num_inference_steps: int = 50,\n guidance_scale= 9.0,\n negative_prompt= None,\n eta: float = 0.0,\n generator= None,\n latents= None,\n prompt_embeds= None,\n negative_prompt_embeds= None,\n output_type= \"np\",\n return_dict: bool = True,\n callback= None,\n callback_steps: int = 1,\n cross_attention_kwargs= None,\n condition_latent=None,\n mask=None,\n timesteps=None,\n motion=None,\n ):\n r\"\"\"\n Function invoked when calling the pipeline for generation.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts to guide the video generation. If not defined, one has to pass `prompt_embeds`.\n instead.\n height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):\n The height in pixels of the generated video.\n width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):\n The width in pixels of the generated video.\n num_frames (`int`, *optional*, defaults to 16):\n The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds\n amounts to 2 seconds of video.\n num_inference_steps (`int`, *optional*, defaults to 50):\n The number of denoising steps. More denoising steps usually lead to a higher quality videos at the\n expense of slower inference.\n guidance_scale (`float`, *optional*, defaults to 7.5):\n Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).\n `guidance_scale` is defined as `w` of equation 2. of [Imagen\n Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >\n 1`. Higher guidance scale encourages to generate videos that are closely linked to the text `prompt`,\n usually at the expense of lower video quality.\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the video generation. If not defined, one has to pass\n `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is\n less than `1`).\n eta (`float`, *optional*, defaults to 0.0):\n Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to\n [`schedulers.DDIMScheduler`], will be ignored for others.\n generator (`torch.Generator` or `List[torch.Generator]`, *optional*):\n One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)\n to make generation deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor will ge generated by sampling using the supplied random `generator`. Latents should be of shape\n `(batch_size, num_channel, num_frames, height, width)`.\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not\n provided, text embeddings will be generated from `prompt` input argument.\n negative_prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt\n weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input\n argument.\n output_type (`str`, *optional*, defaults to `\"np\"`):\n The output format of the generate video. Choose between `torch.FloatTensor` or `np.array`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.stable_diffusion.TextToVideoSDPipelineOutput`] instead of a\n plain tuple.\n callback (`Callable`, *optional*):\n A function that will be called every `callback_steps` steps during inference. The function will be\n called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.\n callback_steps (`int`, *optional*, defaults to 1):\n The frequency at which the `callback` function will be called. If not specified, the callback will be\n called at every step.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\n\n Examples:\n\n Returns:\n [`~pipelines.stable_diffusion.TextToVideoSDPipelineOutput`] or `tuple`:\n [`~pipelines.stable_diffusion.TextToVideoSDPipelineOutput`] if `return_dict` is True, otherwise a `tuple.\n When returning a tuple, the first element is a list with the generated frames.\n \"\"\"\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n num_images_per_prompt = 1\n\n # 1. Check inputs. Raise error if not correct\n self.check_inputs(\n prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds\n )\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n #device = self._execution_device\n device = latents.device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n # 3. Encode input prompt\n text_encoder_lora_scale = (\n cross_attention_kwargs.get(\"scale\", None) if cross_attention_kwargs is not None else None\n )\n prompt_embeds = self._encode_prompt(\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n lora_scale=text_encoder_lora_scale,\n )\n\n # 4. Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n if timesteps is None:\n timesteps = self.scheduler.timesteps\n else:\n num_inference_steps = len(timesteps)\n # 5. Prepare latent variables. do nothing\n\n # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 7. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n uncondition_latent = condition_latent\n condition_latent = torch.cat([uncondition_latent, condition_latent]) if do_classifier_free_guidance else condition_latent \n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n if motion is not None:\n motion = torch.tensor(motion, device=device)\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n condition_latent=condition_latent,\n mask=mask,\n motion=motion\n ).sample\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n # reshape latents\n bsz, channel, frames, width, height = latents.shape\n latents = latents.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height)\n noise_pred = noise_pred.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample\n\n # reshape latents back\n latents = latents[None, :].reshape(bsz, frames, channel, width, height).permute(0, 2, 1, 3, 4)\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n video_tensor = self.decode_latents(latents)\n\n if output_type == \"pt\":\n video = video_tensor\n else:\n video = tensor2vid(video_tensor)\n\n # Offload last model to CPU\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.final_offload_hook.offload()\n\n if not return_dict:\n return (video, latents)\n\n return TextToVideoSDPipelineOutput(frames=video)" }, { "identifier": "LoraHandler", "path": "utils/lora_handler.py", "snippet": "class LoraHandler(object):\n def __init__(\n self, \n version: LORA_VERSIONS = LoraVersions.cloneofsimo, \n use_unet_lora: bool = False,\n use_text_lora: bool = False,\n save_for_webui: bool = False,\n only_for_webui: bool = False,\n lora_bias: str = 'none',\n unet_replace_modules: list = ['UNet3DConditionModel'],\n text_encoder_replace_modules: list = ['CLIPEncoderLayer']\n ):\n self.version = version\n self.lora_loader = self.get_lora_func(func_type=LoraFuncTypes.loader)\n self.lora_injector = self.get_lora_func(func_type=LoraFuncTypes.injector)\n self.lora_bias = lora_bias\n self.use_unet_lora = use_unet_lora\n self.use_text_lora = use_text_lora\n self.save_for_webui = save_for_webui\n self.only_for_webui = only_for_webui\n self.unet_replace_modules = unet_replace_modules\n self.text_encoder_replace_modules = text_encoder_replace_modules\n self.use_lora = any([use_text_lora, use_unet_lora])\n\n if self.use_lora:\n print(f\"Using LoRA Version: {self.version}\")\n\n def is_cloneofsimo_lora(self):\n return self.version == LoraVersions.cloneofsimo\n\n def is_stable_lora(self):\n return self.version == LoraVersions.stable_lora\n\n def get_lora_func(self, func_type: LORA_FUNC_TYPES = LoraFuncTypes.loader):\n\n if self.is_cloneofsimo_lora():\n\n if func_type == LoraFuncTypes.loader:\n return monkeypatch_or_replace_lora_extended\n\n if func_type == LoraFuncTypes.injector:\n return inject_trainable_lora_extended\n\n if self.is_stable_lora():\n\n if func_type == LoraFuncTypes.loader:\n return load_lora\n\n if func_type == LoraFuncTypes.injector:\n return add_lora_to\n \n assert \"LoRA Version does not exist.\"\n\n def check_lora_ext(self, lora_file: str):\n return lora_file.endswith(tuple(LORA_FILE_TYPES))\n\n def get_lora_file_path(\n self, \n lora_path: str, \n model: Union[UNet3DConditionModel, CLIPTextModel]\n ):\n if os.path.exists(lora_path):\n lora_filenames = [fns for fns in os.listdir(lora_path)]\n is_lora = self.check_lora_ext(lora_path)\n\n is_unet = isinstance(model, UNet3DConditionModel)\n is_text = isinstance(model, CLIPTextModel)\n idx = 0 if is_unet else 1\n\n base_name = FILE_BASENAMES[idx]\n \n for lora_filename in lora_filenames:\n is_lora = self.check_lora_ext(lora_filename)\n if not is_lora:\n continue\n \n if base_name in lora_filename:\n return os.path.join(lora_path, lora_filename)\n\n return None\n\n def handle_lora_load(self, file_name:str, lora_loader_args: dict = None):\n self.lora_loader(**lora_loader_args)\n print(f\"Successfully loaded LoRA from: {file_name}\")\n \n def load_lora(self, model, lora_path: str = '', lora_loader_args: dict = None,):\n try:\n lora_file = self.get_lora_file_path(lora_path, model)\n\n if lora_file is not None:\n lora_loader_args.update({\"lora_path\": lora_file})\n self.handle_lora_load(lora_file, lora_loader_args)\n\n else:\n print(f\"Could not load LoRAs for {model.__class__.__name__}. Injecting new ones instead...\")\n\n except Exception as e:\n print(f\"An error occured while loading a LoRA file: {e}\")\n \n def get_lora_func_args(self, lora_path, use_lora, model, replace_modules, r, dropout, lora_bias):\n return_dict = lora_args.copy()\n \n if self.is_cloneofsimo_lora():\n return_dict = filter_dict(return_dict, keys=CLONE_OF_SIMO_KEYS)\n return_dict.update({\n \"model\": model,\n \"loras\": self.get_lora_file_path(lora_path, model),\n \"target_replace_module\": replace_modules,\n \"r\": r\n })\n\n if self.is_stable_lora():\n KEYS = ['model', 'lora_path']\n return_dict = filter_dict(return_dict, KEYS)\n \n return_dict.update({'model': model, 'lora_path': lora_path})\n\n return return_dict\n\n def do_lora_injection(\n self, \n model, \n replace_modules, \n bias='none',\n dropout=0,\n r=4,\n lora_loader_args=None,\n ): \n REPLACE_MODULES = replace_modules\n\n params = None\n negation = None\n is_injection_hybrid = False\n \n if self.is_cloneofsimo_lora():\n is_injection_hybrid = True\n injector_args = lora_loader_args\n\n params, negation = self.lora_injector(**injector_args) \n for _up, _down in extract_lora_ups_down(\n model, \n target_replace_module=REPLACE_MODULES):\n\n if all(x is not None for x in [_up, _down]):\n print(f\"Lora successfully injected into {model.__class__.__name__}.\")\n\n break\n\n return params, negation, is_injection_hybrid\n\n if self.is_stable_lora():\n injector_args = lora_args.copy()\n injector_args = filter_dict(injector_args, keys=STABLE_LORA_KEYS)\n\n SEARCH_CLASS = [torch.nn.Linear, torch.nn.Conv2d, torch.nn.Conv3d, torch.nn.Embedding]\n\n injector_args.update({\n \"model\": model,\n \"target_module\": REPLACE_MODULES,\n \"search_class\": SEARCH_CLASS,\n \"r\": r,\n \"dropout\": dropout,\n \"lora_bias\": self.lora_bias\n })\n\n activator = self.lora_injector(**injector_args)\n activator()\n\n return params, negation, is_injection_hybrid\n\n def add_lora_to_model(self, use_lora, model, replace_modules, dropout=0.0, lora_path='', r=16):\n\n params = None\n negation = None\n\n lora_loader_args = self.get_lora_func_args(\n lora_path,\n use_lora,\n model,\n replace_modules,\n r,\n dropout,\n self.lora_bias\n )\n if use_lora:\n params, negation, is_injection_hybrid = self.do_lora_injection(\n model, \n replace_modules, \n bias=self.lora_bias,\n lora_loader_args=lora_loader_args,\n dropout=dropout,\n r=r\n )\n\n if not is_injection_hybrid:\n self.load_lora(model, lora_path=lora_path, lora_loader_args=lora_loader_args)\n \n params = model if params is None else params\n return params, negation\n \n\n def deactivate_lora_train(self, models, deactivate=True):\n \"\"\"\n Usage: Use before and after sampling previews.\n Currently only available for Stable LoRA.\n \"\"\"\n if self.is_stable_lora():\n set_mode_group(models, not deactivate)\n\n def save_cloneofsimo_lora(self, model, save_path, step):\n \n def save_lora(model, name, condition, replace_modules, step, save_path): \n if condition and replace_modules is not None:\n save_path = f\"{save_path}/{step}_{name}.pt\"\n save_lora_weight(model, save_path, replace_modules)\n\n save_lora(\n model.unet, \n FILE_BASENAMES[0], \n self.use_unet_lora, \n self.unet_replace_modules, \n step,\n save_path, \n )\n save_lora(\n model.text_encoder, \n FILE_BASENAMES[1], \n self.use_text_lora, \n self.text_encoder_replace_modules, \n step, \n save_path\n )\n\n train_patch_pipe(model, self.use_unet_lora, self.use_text_lora)\n\n def save_stable_lora(\n self, \n model, \n step, \n name, \n save_path = '', \n save_for_webui=False,\n only_for_webui=False\n ):\n import uuid\n\n save_filename = f\"{step}_{name}\"\n lora_metadata = metadata = {\n \"stable_lora_text_to_video\": \"v1\", \n \"lora_name\": name + \"_\" + uuid.uuid4().hex.lower()[:5]\n }\n save_lora(\n unet=model.unet,\n text_encoder=model.text_encoder,\n save_text_weights=self.use_text_lora,\n output_dir=save_path,\n lora_filename=save_filename,\n lora_bias=self.lora_bias,\n save_for_webui=self.save_for_webui,\n only_webui=self.only_for_webui,\n metadata=lora_metadata,\n unet_dict_converter=convert_unet_state_dict,\n text_dict_converter=convert_text_enc_state_dict_v20\n )\n\n def save_lora_weights(self, model: None, save_path: str ='',step: str = ''):\n save_path = f\"{save_path}/lora\"\n os.makedirs(save_path, exist_ok=True)\n\n if self.is_cloneofsimo_lora():\n if any([self.save_for_webui, self.only_for_webui]):\n warnings.warn(\n \"\"\"\n You have 'save_for_webui' enabled, but are using cloneofsimo's LoRA implemention.\n Only 'stable_lora' is supported for saving to a compatible webui file.\n \"\"\"\n )\n self.save_cloneofsimo_lora(model, save_path, step)\n\n if self.is_stable_lora():\n name = 'lora_text_to_video'\n self.save_stable_lora(model, step, name, save_path)" }, { "identifier": "LORA_VERSIONS", "path": "utils/lora_handler.py", "snippet": "LORA_VERSIONS = [LoraVersions.stable_lora, LoraVersions.cloneofsimo]" }, { "identifier": "read_mask", "path": "utils/common.py", "snippet": "def read_mask(json_path, label=[\"mask\"]):\n j = json.load(open(json_path)) \n if type(label) != list:\n labels = [label]\n height = j['imageHeight']\n width = j['imageWidth']\n mask = np.zeros([height, width], dtype=np.uint8)\n for shape in j['shapes']:\n if shape['label'] in label:\n x1, y1 = shape['points'][0]\n x2, y2 = shape['points'][1]\n mask[int(y1):int(y2), int(x1):int(x2)] = 255\n return mask" }, { "identifier": "generate_random_mask", "path": "utils/common.py", "snippet": "def generate_random_mask(image):\n # Create a blank mask with the same size as the image\n b, c , h, w = image.shape\n mask = np.zeros([b, h, w], dtype=np.uint8)\n \n # Generate random coordinates for the mask\n num_points = np.random.randint(3, 10) # Randomly choose the number of points to generate\n points = np.random.randint(0, min(h, w), size=(num_points, 2)) # Randomly generate the points\n # Draw a filled polygon on the mask using the random points\n for i in range(b):\n width = random.randint(w//4, w)\n height = random.randint(h//4, h)\n x = random.randint(0, w-width)\n y = random.randint(0, h-height)\n points=np.array([[x, y], [x+width, y], [x+width, y+height], [x, y+height]])\n mask[i] = cv2.fillPoly(mask[i], [points], 255)\n \n # Apply the mask to the image\n #masked_image = cv2.bitwise_and(image, image, mask=mask)\n return mask " }, { "identifier": "slerp", "path": "utils/common.py", "snippet": "def slerp(z1, z2, alpha):\n theta = torch.acos(torch.sum(z1 * z2) / (torch.norm(z1) * torch.norm(z2)))\n return (\n torch.sin((1 - alpha) * theta) / torch.sin(theta) * z1\n + torch.sin(alpha * theta) / torch.sin(theta) * z2\n )" }, { "identifier": "calculate_motion_score", "path": "utils/common.py", "snippet": "def calculate_motion_score(frame_imgs, calculate_edges=False, color=\"RGB\") -> float:\n # Convert image into HSV colorspace.\n _last_frame = None\n\n _weights = [1.0, 1.0, 1.0, 0.0]\n score = 0\n for frame_img in frame_imgs:\n if color == \"RGB\":\n hue, sat, lum = cv2.split(cv2.cvtColor(frame_img, cv2.COLOR_RGB2HSV))\n else:\n hue, sat, lum = cv2.split(cv2.cvtColor(frame_img, cv2.COLOR_BGR2HSV))\n # Performance: Only calculate edges if we have to.\n edges = _detect_edges(lum) if calculate_edges else None\n if _last_frame == None:\n _last_frame = (hue, sat, lum, edges)\n continue\n\n score_components = [\n _mean_pixel_distance(hue, _last_frame[0]),\n _mean_pixel_distance(sat, _last_frame[1]),\n _mean_pixel_distance(lum, _last_frame[2]),\n 0.0 if edges is None else _mean_pixel_distance(edges, _last_frame[3]),\n ]\n\n frame_score: float = (\n sum(component * weight for (component, weight) in zip(score_components, _weights))\n / sum(abs(weight) for weight in _weights))\n score += frame_score\n _last_frame = (hue, sat, lum, edges)\n\n return round(score/(len(frame_imgs)-1) * 10)" }, { "identifier": "read_video", "path": "utils/common.py", "snippet": "def read_video(video_path, frame_number=-1):\n # Open the video file\n cap = cv2.VideoCapture(video_path)\n count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) \n if frame_number == -1:\n frame_number = count\n else:\n frame_number = min(frame_number, count)\n frames = []\n for i in range(frame_number):\n ret, ref_frame = cap.read()\n ref_frame = cv2.cvtColor(ref_frame, cv2.COLOR_BGR2RGB)\n if not ret:\n raise ValueError(\"Failed to read video file\")\n frames.append(ref_frame)\n return frames" }, { "identifier": "calculate_motion_precision", "path": "utils/common.py", "snippet": "def calculate_motion_precision(frames, mask):\n moved_mask = get_moved_area_mask(frames, move_th=20, th=0)\n moved = moved_mask == 255\n gt = mask == 255\n precision = np.sum(moved & gt) / np.sum(moved)\n return precision" }, { "identifier": "calculate_latent_motion_score", "path": "utils/common.py", "snippet": "def calculate_latent_motion_score(latents):\n #latents b, c f, h, w\n diff=torch.abs(latents[:,:,1:]-latents[:,:,:-1])\n motion_score = torch.sum(torch.mean(diff, dim=[2,3,4]), dim=1) * 10\n return motion_score" }, { "identifier": "DDPM_forward", "path": "utils/common.py", "snippet": "def DDPM_forward(x0, step, num_frames, scheduler):\n device = x0.device\n t = scheduler.timesteps[-1]\n xt = repeat(x0, 'b c 1 h w -> b c f h w', f = num_frames)\n\n eps = torch.randn_like(xt)\n alpha_vec = torch.prod(scheduler.alphas[t:])\n xt = torch.sqrt(alpha_vec) * xt + torch.sqrt(1-alpha_vec) * eps\n return xt, None" }, { "identifier": "DDPM_forward_timesteps", "path": "utils/common.py", "snippet": "def DDPM_forward_timesteps(x0, step, num_frames, scheduler):\n '''larger step -> smaller t -> smaller alphas[t:] -> smaller xt -> smaller x0'''\n\n device = x0.device\n # timesteps are reversed\n timesteps = scheduler.timesteps[len(scheduler.timesteps)-step:]\n t = timesteps[0]\n\n if x0.shape[2] == 1:\n xt = repeat(x0, 'b c 1 h w -> b c f h w', f = num_frames)\n else:\n xt = x0\n noise = torch.randn(xt.shape, dtype=xt.dtype, device=device)\n # t to tensor of batch size \n t = torch.tensor([t]*xt.shape[0], device=device)\n xt = scheduler.add_noise(xt, noise, t)\n return xt, timesteps" }, { "identifier": "DDPM_forward_mask", "path": "utils/common.py", "snippet": "def DDPM_forward_mask(x0, step, num_frames, scheduler, mask):\n '''larger step -> smaller t -> smaller alphas[t:] -> smaller xt -> smaller x0'''\n device = x0.device\n dtype = x0.dtype\n b, c, f, h, w = x0.shape\n\n move_xt, timesteps = DDPM_forward_timesteps(x0, step, num_frames, scheduler)\n mask = T.ToTensor()(mask).to(dtype).to(device)\n mask = T.Resize([h, w], antialias=False)(mask)\n mask = rearrange(mask, 'b h w -> b 1 1 h w')\n freeze_xt = repeat(x0, 'b c 1 h w -> b c f h w', f = num_frames)\n initial = freeze_xt * (1-mask) + move_xt * mask\n return initial, timesteps" }, { "identifier": "motion_mask_loss", "path": "utils/common.py", "snippet": "def motion_mask_loss(latents, mask):\n diff = torch.abs(latents[:,:,1:] - latents[:,:,:-1])\n loss = torch.sum(torch.mean(diff * (1-mask), dim=[2,3,4]), dim=1)\n return loss" }, { "identifier": "generate_center_mask", "path": "utils/common.py", "snippet": "def generate_center_mask(image):\n # Create a blank mask with the same size as the image\n b, c , h, w = image.shape\n mask = np.zeros([b, h, w], dtype=np.uint8)\n \n # Generate random coordinates for the mask\n for i in range(b):\n width = int(w/10)\n height = int(h/10)\n mask[i][height:-height,width:-width] = 255\n # Apply the mask to the image\n #masked_image = cv2.bitwise_and(image, image, mask=mask)\n return mask " }, { "identifier": "tensor_to_vae_latent", "path": "utils/common.py", "snippet": "def tensor_to_vae_latent(t, vae):\n video_length = t.shape[1]\n\n t = rearrange(t, \"b f c h w -> (b f) c h w\")\n latents = vae.encode(t).latent_dist.sample()\n latents = rearrange(latents, \"(b f) c h w -> b c f h w\", f=video_length)\n latents = latents * 0.18215\n\n return latents" } ]
import argparse import datetime import logging import inspect import math import os import json import gc import copy import random import cv2 import torch import torch.nn.functional as F import torch.utils.checkpoint import torchvision.transforms as T import diffusers import transformers import numpy as np import imageio import itertools import bitsandbytes as bnb from typing import Dict, Optional, Tuple from omegaconf import OmegaConf from tqdm.auto import tqdm from PIL import Image from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers.models import AutoencoderKL from diffusers import DPMSolverMultistepScheduler, DDPMScheduler from diffusers.image_processor import VaeImageProcessor from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version, export_to_video from diffusers.utils.import_utils import is_xformers_available from diffusers.models.attention_processor import AttnProcessor2_0, Attention from diffusers.models.attention import BasicTransformerBlock from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth import tensor2vid from transformers import CLIPTextModel, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPEncoder from utils.dataset import VideoJsonDataset, SingleVideoDataset, \ ImageDataset, VideoFolderDataset, CachedDataset, VideoBLIPDataset from einops import rearrange, repeat from models.unet_3d_condition_mask import UNet3DConditionModel from models.pipeline import LatentToVideoPipeline from utils.lora_handler import LoraHandler, LORA_VERSIONS from utils.common import read_mask, generate_random_mask, slerp, calculate_motion_score, \ read_video, calculate_motion_precision, calculate_latent_motion_score, \ DDPM_forward, DDPM_forward_timesteps, DDPM_forward_mask, motion_mask_loss, \ generate_center_mask, tensor_to_vae_latent from xformers.ops import MemoryEfficientAttentionFlashAttentionOp
19,661
return { "model": model, "condition": condition, 'extra_params': extra_params, 'is_lora': is_lora, "negation": negation } def create_optim_params(name='param', params=None, lr=5e-6, extra_params=None): params = { "name": name, "params": params, "lr": lr } if extra_params is not None: for k, v in extra_params.items(): params[k] = v return params def negate_params(name, negation): # We have to do this if we are co-training with LoRA. # This ensures that parameter groups aren't duplicated. if negation is None: return False for n in negation: if n in name and 'temp' not in name: return True return False def create_optimizer_params(model_list, lr): optimizer_params = [] for optim in model_list: model, condition, extra_params, is_lora, negation = optim.values() # Check if we are doing LoRA training. if is_lora and condition and isinstance(model, list): params = create_optim_params( params=itertools.chain(*model), extra_params=extra_params ) optimizer_params.append(params) continue if is_lora and condition and not isinstance(model, list): for n, p in model.named_parameters(): if 'lora' in n: params = create_optim_params(n, p, lr, extra_params) optimizer_params.append(params) continue # If this is true, we can train it. if condition: for n, p in model.named_parameters(): should_negate = 'lora' in n and not is_lora if should_negate: continue params = create_optim_params(n, p, lr, extra_params) optimizer_params.append(params) return optimizer_params def get_optimizer(use_8bit_adam): if use_8bit_adam: try: except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) return bnb.optim.AdamW8bit else: return torch.optim.AdamW def is_mixed_precision(accelerator): weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 return weight_dtype def cast_to_gpu_and_type(model_list, device, weight_dtype): for model in model_list: if model is not None: model.to(device, dtype=weight_dtype) def handle_cache_latents( should_cache, output_dir, train_dataloader, train_batch_size, vae, cached_latent_dir=None, shuffle=False ): # Cache latents by storing them in VRAM. # Speeds up training and saves memory by not encoding during the train loop. if not should_cache: return None vae.to('cuda', dtype=torch.float16) vae.enable_slicing() cached_latent_dir = ( os.path.abspath(cached_latent_dir) if cached_latent_dir is not None else None ) if cached_latent_dir is None: cache_save_dir = f"{output_dir}/cached_latents" os.makedirs(cache_save_dir, exist_ok=True) for i, batch in enumerate(tqdm(train_dataloader, desc="Caching Latents.")): save_name = f"cached_{i}" full_out_path = f"{cache_save_dir}/{save_name}.pt" pixel_values = batch['pixel_values'].to('cuda', dtype=torch.float16)
already_printed_trainables = False logger = get_logger(__name__, log_level="INFO") def create_logging(logging, logger, accelerator): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) def accelerate_set_verbose(accelerator): if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() def get_train_dataset(dataset_types, train_data, tokenizer): train_datasets = [] dataset_cls = [VideoJsonDataset, SingleVideoDataset, ImageDataset, VideoFolderDataset, VideoBLIPDataset] dataset_map = {d.__getname__(): d for d in dataset_cls} # Loop through all available datasets, get the name, then add to list of data to process. for dataset in dataset_types: if dataset in dataset_map: train_datasets.append(dataset_map[dataset](**train_data, tokenizer=tokenizer)) else: raise ValueError(f"Dataset type not found: {dataset} not in {dataset_map.keys()}") return train_datasets def extend_datasets(datasets, dataset_items, extend=False): biggest_data_len = max(x.__len__() for x in datasets) extended = [] for dataset in datasets: if dataset.__len__() == 0: del dataset continue if dataset.__len__() < biggest_data_len: for item in dataset_items: if extend and item not in extended and hasattr(dataset, item): print(f"Extending {item}") value = getattr(dataset, item) value *= biggest_data_len value = value[:biggest_data_len] setattr(dataset, item, value) print(f"New {item} dataset length: {dataset.__len__()}") extended.append(item) def export_to_video(video_frames, output_video_path, fps): fourcc = cv2.VideoWriter_fourcc(*"mp4v") h, w, _ = video_frames[0].shape video_writer = cv2.VideoWriter(output_video_path, fourcc, fps=fps, frameSize=(w, h)) for i in range(len(video_frames)): img = cv2.cvtColor(video_frames[i], cv2.COLOR_RGB2BGR) video_writer.write(img) def create_output_folders(output_dir, config): now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S") out_dir = os.path.join(output_dir, f"train_{now}") os.makedirs(out_dir, exist_ok=True) os.makedirs(f"{out_dir}/samples", exist_ok=True) OmegaConf.save(config, os.path.join(out_dir, 'config.yaml')) return out_dir def load_primary_models(pretrained_model_path, motion_mask, motion_strength): noise_scheduler = DDPMScheduler.from_pretrained(pretrained_model_path, subfolder="scheduler") tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer") text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder") vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae") unet = UNet3DConditionModel.from_pretrained(pretrained_model_path, subfolder="unet", low_cpu_mem_usage=False, device_map=None, motion_mask=motion_mask, motion_strength=motion_strength) if pretrained_model_path.endswith('zeroscope_v2_576w'): #first time init, modify unet conv in2 unet.conv_in2.bias.data = copy.deepcopy(unet.conv_in.bias) torch.nn.init.zeros_(unet.conv_in2.weight) unet.conv_in2.weight.data[:,1:]= copy.deepcopy(unet.conv_in.weight) return noise_scheduler, tokenizer, text_encoder, vae, unet def unet_and_text_g_c(unet, text_encoder, unet_enable, text_enable): unet._set_gradient_checkpointing(value=unet_enable) if text_enable: text_encoder.gradient_checkpointing_enable() else: text_encoder.gradient_checkpointing_disable() def freeze_models(models_to_freeze): for model in models_to_freeze: if model is not None: model.requires_grad_(False) def is_attn(name): return ('attn1' or 'attn2' == name.split('.')[-1]) def set_processors(attentions): for attn in attentions: attn.set_processor(AttnProcessor2_0()) def set_torch_2_attn(unet): optim_count = 0 for name, module in unet.named_modules(): if is_attn(name): if isinstance(module, torch.nn.ModuleList): for m in module: if isinstance(m, BasicTransformerBlock): set_processors([m.attn1, m.attn2]) optim_count += 1 if optim_count > 0: print(f"{optim_count} Attention layers using Scaled Dot Product Attention.") def handle_memory_attention(enable_xformers_memory_efficient_attention, enable_torch_2_attn, unet): try: is_torch_2 = hasattr(F, 'scaled_dot_product_attention') enable_torch_2 = is_torch_2 and enable_torch_2_attn if enable_xformers_memory_efficient_attention and not enable_torch_2: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp) else: raise ValueError("xformers is not available. Make sure it is installed correctly") if enable_torch_2: set_torch_2_attn(unet) except: print("Could not enable memory efficient attention for xformers or Torch 2.0.") def param_optim(model, condition, extra_params=None, is_lora=False, negation=None): extra_params = extra_params if len(extra_params.keys()) > 0 else None return { "model": model, "condition": condition, 'extra_params': extra_params, 'is_lora': is_lora, "negation": negation } def create_optim_params(name='param', params=None, lr=5e-6, extra_params=None): params = { "name": name, "params": params, "lr": lr } if extra_params is not None: for k, v in extra_params.items(): params[k] = v return params def negate_params(name, negation): # We have to do this if we are co-training with LoRA. # This ensures that parameter groups aren't duplicated. if negation is None: return False for n in negation: if n in name and 'temp' not in name: return True return False def create_optimizer_params(model_list, lr): optimizer_params = [] for optim in model_list: model, condition, extra_params, is_lora, negation = optim.values() # Check if we are doing LoRA training. if is_lora and condition and isinstance(model, list): params = create_optim_params( params=itertools.chain(*model), extra_params=extra_params ) optimizer_params.append(params) continue if is_lora and condition and not isinstance(model, list): for n, p in model.named_parameters(): if 'lora' in n: params = create_optim_params(n, p, lr, extra_params) optimizer_params.append(params) continue # If this is true, we can train it. if condition: for n, p in model.named_parameters(): should_negate = 'lora' in n and not is_lora if should_negate: continue params = create_optim_params(n, p, lr, extra_params) optimizer_params.append(params) return optimizer_params def get_optimizer(use_8bit_adam): if use_8bit_adam: try: except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) return bnb.optim.AdamW8bit else: return torch.optim.AdamW def is_mixed_precision(accelerator): weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 return weight_dtype def cast_to_gpu_and_type(model_list, device, weight_dtype): for model in model_list: if model is not None: model.to(device, dtype=weight_dtype) def handle_cache_latents( should_cache, output_dir, train_dataloader, train_batch_size, vae, cached_latent_dir=None, shuffle=False ): # Cache latents by storing them in VRAM. # Speeds up training and saves memory by not encoding during the train loop. if not should_cache: return None vae.to('cuda', dtype=torch.float16) vae.enable_slicing() cached_latent_dir = ( os.path.abspath(cached_latent_dir) if cached_latent_dir is not None else None ) if cached_latent_dir is None: cache_save_dir = f"{output_dir}/cached_latents" os.makedirs(cache_save_dir, exist_ok=True) for i, batch in enumerate(tqdm(train_dataloader, desc="Caching Latents.")): save_name = f"cached_{i}" full_out_path = f"{cache_save_dir}/{save_name}.pt" pixel_values = batch['pixel_values'].to('cuda', dtype=torch.float16)
batch['pixel_values'] = tensor_to_vae_latent(pixel_values, vae)
22
2023-12-07 08:26:29+00:00
24k
modelscope/richdreamer
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> BaseGeometry:\n return other" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def initialize_shape(self) -> None:\n pass\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n \"points_scaled\": points.view(-1, self.cfg.n_input_dims),\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "broadcast", "path": "threestudio/utils/misc.py", "snippet": "def broadcast(tensor, src=0):\n if not _distributed_available():\n return tensor\n else:\n torch.distributed.broadcast(tensor, src=src)\n return tensor" }, { "identifier": "get_rank", "path": "threestudio/utils/misc.py", "snippet": "def get_rank():\n # SLURM_PROCID can be set even if SLURM is not managing the multiprocessing,\n # therefore LOCAL_RANK needs to be checked first\n rank_keys = (\"RANK\", \"LOCAL_RANK\", \"SLURM_PROCID\", \"JSM_NAMESPACE_RANK\")\n for key in rank_keys:\n rank = os.environ.get(key)\n if rank is not None:\n return int(rank)\n return 0" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
import numpy as np import os import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import (BaseExplicitGeometry, BaseGeometry, contract_to_unisphere,) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast, get_rank from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
15,175
if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 nerf_scale: float = 1.0 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False # sdf_bias: Union[float, str] = 0.0 # sdf_bias_params: Optional[Any] = None cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale
points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1)
2
2023-12-06 07:53:11+00:00
24k
rehg-lab/RAVE
annotator/oneformer/oneformer/demo/visualizer.py
[ { "identifier": "MetadataCatalog", "path": "annotator/oneformer/detectron2/data/catalog.py", "snippet": "class _DatasetCatalog(UserDict):\r\nclass Metadata(types.SimpleNamespace):\r\nclass _MetadataCatalog(UserDict):\r\n def register(self, name, func):\r\n def get(self, name):\r\n def list(self) -> List[str]:\r\n def remove(self, name):\r\n def __str__(self):\r\n def __getattr__(self, key):\r\n def __setattr__(self, key, val):\r\n def as_dict(self):\r\n def set(self, **kwargs):\r\n def get(self, key, default=None):\r\n def get(self, name):\r\n def list(self):\r\n def remove(self, name):\r\n def __str__(self):\r\n _RENAMED = {\r\n \"class_names\": \"thing_classes\",\r\n \"dataset_id_to_contiguous_id\": \"thing_dataset_id_to_contiguous_id\",\r\n \"stuff_class_names\": \"stuff_classes\",\r\n }\r" }, { "identifier": "Boxes", "path": "annotator/oneformer/detectron2/structures/boxes.py", "snippet": "class Boxes:\r\n \"\"\"\r\n This structure stores a list of boxes as a Nx4 torch.Tensor.\r\n It supports some common methods about boxes\r\n (`area`, `clip`, `nonempty`, etc),\r\n and also behaves like a Tensor\r\n (support indexing, `to(device)`, `.device`, and iteration over all boxes)\r\n\r\n Attributes:\r\n tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).\r\n \"\"\"\r\n\r\n def __init__(self, tensor: torch.Tensor):\r\n \"\"\"\r\n Args:\r\n tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).\r\n \"\"\"\r\n if not isinstance(tensor, torch.Tensor):\r\n tensor = torch.as_tensor(tensor, dtype=torch.float32, device=torch.device(\"cpu\"))\r\n else:\r\n tensor = tensor.to(torch.float32)\r\n if tensor.numel() == 0:\r\n # Use reshape, so we don't end up creating a new tensor that does not depend on\r\n # the inputs (and consequently confuses jit)\r\n tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32)\r\n assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()\r\n\r\n self.tensor = tensor\r\n\r\n def clone(self) -> \"Boxes\":\r\n \"\"\"\r\n Clone the Boxes.\r\n\r\n Returns:\r\n Boxes\r\n \"\"\"\r\n return Boxes(self.tensor.clone())\r\n\r\n def to(self, device: torch.device):\r\n # Boxes are assumed float32 and does not support to(dtype)\r\n return Boxes(self.tensor.to(device=device))\r\n\r\n def area(self) -> torch.Tensor:\r\n \"\"\"\r\n Computes the area of all the boxes.\r\n\r\n Returns:\r\n torch.Tensor: a vector with areas of each box.\r\n \"\"\"\r\n box = self.tensor\r\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\r\n return area\r\n\r\n def clip(self, box_size: Tuple[int, int]) -> None:\r\n \"\"\"\r\n Clip (in place) the boxes by limiting x coordinates to the range [0, width]\r\n and y coordinates to the range [0, height].\r\n\r\n Args:\r\n box_size (height, width): The clipping box's size.\r\n \"\"\"\r\n assert torch.isfinite(self.tensor).all(), \"Box tensor contains infinite or NaN!\"\r\n h, w = box_size\r\n x1 = self.tensor[:, 0].clamp(min=0, max=w)\r\n y1 = self.tensor[:, 1].clamp(min=0, max=h)\r\n x2 = self.tensor[:, 2].clamp(min=0, max=w)\r\n y2 = self.tensor[:, 3].clamp(min=0, max=h)\r\n self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)\r\n\r\n def nonempty(self, threshold: float = 0.0) -> torch.Tensor:\r\n \"\"\"\r\n Find boxes that are non-empty.\r\n A box is considered empty, if either of its side is no larger than threshold.\r\n\r\n Returns:\r\n Tensor:\r\n a binary vector which represents whether each box is empty\r\n (False) or non-empty (True).\r\n \"\"\"\r\n box = self.tensor\r\n widths = box[:, 2] - box[:, 0]\r\n heights = box[:, 3] - box[:, 1]\r\n keep = (widths > threshold) & (heights > threshold)\r\n return keep\r\n\r\n def __getitem__(self, item) -> \"Boxes\":\r\n \"\"\"\r\n Args:\r\n item: int, slice, or a BoolTensor\r\n\r\n Returns:\r\n Boxes: Create a new :class:`Boxes` by indexing.\r\n\r\n The following usage are allowed:\r\n\r\n 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.\r\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\r\n 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor\r\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\r\n\r\n Note that the returned Boxes might share storage with this Boxes,\r\n subject to Pytorch's indexing semantics.\r\n \"\"\"\r\n if isinstance(item, int):\r\n return Boxes(self.tensor[item].view(1, -1))\r\n b = self.tensor[item]\r\n assert b.dim() == 2, \"Indexing on Boxes with {} failed to return a matrix!\".format(item)\r\n return Boxes(b)\r\n\r\n def __len__(self) -> int:\r\n return self.tensor.shape[0]\r\n\r\n def __repr__(self) -> str:\r\n return \"Boxes(\" + str(self.tensor) + \")\"\r\n\r\n def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:\r\n \"\"\"\r\n Args:\r\n box_size (height, width): Size of the reference box.\r\n boundary_threshold (int): Boxes that extend beyond the reference box\r\n boundary by more than boundary_threshold are considered \"outside\".\r\n\r\n Returns:\r\n a binary vector, indicating whether each box is inside the reference box.\r\n \"\"\"\r\n height, width = box_size\r\n inds_inside = (\r\n (self.tensor[..., 0] >= -boundary_threshold)\r\n & (self.tensor[..., 1] >= -boundary_threshold)\r\n & (self.tensor[..., 2] < width + boundary_threshold)\r\n & (self.tensor[..., 3] < height + boundary_threshold)\r\n )\r\n return inds_inside\r\n\r\n def get_centers(self) -> torch.Tensor:\r\n \"\"\"\r\n Returns:\r\n The box centers in a Nx2 array of (x, y).\r\n \"\"\"\r\n return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2\r\n\r\n def scale(self, scale_x: float, scale_y: float) -> None:\r\n \"\"\"\r\n Scale the box with horizontal and vertical scaling factors\r\n \"\"\"\r\n self.tensor[:, 0::2] *= scale_x\r\n self.tensor[:, 1::2] *= scale_y\r\n\r\n @classmethod\r\n def cat(cls, boxes_list: List[\"Boxes\"]) -> \"Boxes\":\r\n \"\"\"\r\n Concatenates a list of Boxes into a single Boxes\r\n\r\n Arguments:\r\n boxes_list (list[Boxes])\r\n\r\n Returns:\r\n Boxes: the concatenated Boxes\r\n \"\"\"\r\n assert isinstance(boxes_list, (list, tuple))\r\n if len(boxes_list) == 0:\r\n return cls(torch.empty(0))\r\n assert all([isinstance(box, Boxes) for box in boxes_list])\r\n\r\n # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input\r\n cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))\r\n return cat_boxes\r\n\r\n @property\r\n def device(self) -> device:\r\n return self.tensor.device\r\n\r\n # type \"Iterator[torch.Tensor]\", yield, and iter() not supported by torchscript\r\n # https://github.com/pytorch/pytorch/issues/18627\r\n @torch.jit.unused\r\n def __iter__(self):\r\n \"\"\"\r\n Yield a box as a Tensor of shape (4,) at a time.\r\n \"\"\"\r\n yield from self.tensor\r" }, { "identifier": "BoxMode", "path": "annotator/oneformer/detectron2/structures/boxes.py", "snippet": "class BoxMode(IntEnum):\r\n \"\"\"\r\n Enum of different ways to represent a box.\r\n \"\"\"\r\n\r\n XYXY_ABS = 0\r\n \"\"\"\r\n (x0, y0, x1, y1) in absolute floating points coordinates.\r\n The coordinates in range [0, width or height].\r\n \"\"\"\r\n XYWH_ABS = 1\r\n \"\"\"\r\n (x0, y0, w, h) in absolute floating points coordinates.\r\n \"\"\"\r\n XYXY_REL = 2\r\n \"\"\"\r\n Not yet supported!\r\n (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.\r\n \"\"\"\r\n XYWH_REL = 3\r\n \"\"\"\r\n Not yet supported!\r\n (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.\r\n \"\"\"\r\n XYWHA_ABS = 4\r\n \"\"\"\r\n (xc, yc, w, h, a) in absolute floating points coordinates.\r\n (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.\r\n \"\"\"\r\n\r\n @staticmethod\r\n def convert(box: _RawBoxType, from_mode: \"BoxMode\", to_mode: \"BoxMode\") -> _RawBoxType:\r\n \"\"\"\r\n Args:\r\n box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5\r\n from_mode, to_mode (BoxMode)\r\n\r\n Returns:\r\n The converted box of the same type.\r\n \"\"\"\r\n if from_mode == to_mode:\r\n return box\r\n\r\n original_type = type(box)\r\n is_numpy = isinstance(box, np.ndarray)\r\n single_box = isinstance(box, (list, tuple))\r\n if single_box:\r\n assert len(box) == 4 or len(box) == 5, (\r\n \"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,\"\r\n \" where k == 4 or 5\"\r\n )\r\n arr = torch.tensor(box)[None, :]\r\n else:\r\n # avoid modifying the input box\r\n if is_numpy:\r\n arr = torch.from_numpy(np.asarray(box)).clone()\r\n else:\r\n arr = box.clone()\r\n\r\n assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [\r\n BoxMode.XYXY_REL,\r\n BoxMode.XYWH_REL,\r\n ], \"Relative mode not yet supported!\"\r\n\r\n if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:\r\n assert (\r\n arr.shape[-1] == 5\r\n ), \"The last dimension of input shape must be 5 for XYWHA format\"\r\n original_dtype = arr.dtype\r\n arr = arr.double()\r\n\r\n w = arr[:, 2]\r\n h = arr[:, 3]\r\n a = arr[:, 4]\r\n c = torch.abs(torch.cos(a * math.pi / 180.0))\r\n s = torch.abs(torch.sin(a * math.pi / 180.0))\r\n # This basically computes the horizontal bounding rectangle of the rotated box\r\n new_w = c * w + s * h\r\n new_h = c * h + s * w\r\n\r\n # convert center to top-left corner\r\n arr[:, 0] -= new_w / 2.0\r\n arr[:, 1] -= new_h / 2.0\r\n # bottom-right corner\r\n arr[:, 2] = arr[:, 0] + new_w\r\n arr[:, 3] = arr[:, 1] + new_h\r\n\r\n arr = arr[:, :4].to(dtype=original_dtype)\r\n elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:\r\n original_dtype = arr.dtype\r\n arr = arr.double()\r\n arr[:, 0] += arr[:, 2] / 2.0\r\n arr[:, 1] += arr[:, 3] / 2.0\r\n angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)\r\n arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)\r\n else:\r\n if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:\r\n arr[:, 2] += arr[:, 0]\r\n arr[:, 3] += arr[:, 1]\r\n elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:\r\n arr[:, 2] -= arr[:, 0]\r\n arr[:, 3] -= arr[:, 1]\r\n else:\r\n raise NotImplementedError(\r\n \"Conversion from BoxMode {} to {} is not supported yet\".format(\r\n from_mode, to_mode\r\n )\r\n )\r\n\r\n if single_box:\r\n return original_type(arr.flatten().tolist())\r\n if is_numpy:\r\n return arr.numpy()\r\n else:\r\n return arr\r" }, { "identifier": "Keypoints", "path": "annotator/oneformer/detectron2/structures/keypoints.py", "snippet": "class Keypoints:\r\n \"\"\"\r\n Stores keypoint **annotation** data. GT Instances have a `gt_keypoints` property\r\n containing the x,y location and visibility flag of each keypoint. This tensor has shape\r\n (N, K, 3) where N is the number of instances and K is the number of keypoints per instance.\r\n\r\n The visibility flag follows the COCO format and must be one of three integers:\r\n\r\n * v=0: not labeled (in which case x=y=0)\r\n * v=1: labeled but not visible\r\n * v=2: labeled and visible\r\n \"\"\"\r\n\r\n def __init__(self, keypoints: Union[torch.Tensor, np.ndarray, List[List[float]]]):\r\n \"\"\"\r\n Arguments:\r\n keypoints: A Tensor, numpy array, or list of the x, y, and visibility of each keypoint.\r\n The shape should be (N, K, 3) where N is the number of\r\n instances, and K is the number of keypoints per instance.\r\n \"\"\"\r\n device = keypoints.device if isinstance(keypoints, torch.Tensor) else torch.device(\"cpu\")\r\n keypoints = torch.as_tensor(keypoints, dtype=torch.float32, device=device)\r\n assert keypoints.dim() == 3 and keypoints.shape[2] == 3, keypoints.shape\r\n self.tensor = keypoints\r\n\r\n def __len__(self) -> int:\r\n return self.tensor.size(0)\r\n\r\n def to(self, *args: Any, **kwargs: Any) -> \"Keypoints\":\r\n return type(self)(self.tensor.to(*args, **kwargs))\r\n\r\n @property\r\n def device(self) -> torch.device:\r\n return self.tensor.device\r\n\r\n def to_heatmap(self, boxes: torch.Tensor, heatmap_size: int) -> torch.Tensor:\r\n \"\"\"\r\n Convert keypoint annotations to a heatmap of one-hot labels for training,\r\n as described in :paper:`Mask R-CNN`.\r\n\r\n Arguments:\r\n boxes: Nx4 tensor, the boxes to draw the keypoints to\r\n\r\n Returns:\r\n heatmaps:\r\n A tensor of shape (N, K), each element is integer spatial label\r\n in the range [0, heatmap_size**2 - 1] for each keypoint in the input.\r\n valid:\r\n A tensor of shape (N, K) containing whether each keypoint is in the roi or not.\r\n \"\"\"\r\n return _keypoints_to_heatmap(self.tensor, boxes, heatmap_size)\r\n\r\n def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> \"Keypoints\":\r\n \"\"\"\r\n Create a new `Keypoints` by indexing on this `Keypoints`.\r\n\r\n The following usage are allowed:\r\n\r\n 1. `new_kpts = kpts[3]`: return a `Keypoints` which contains only one instance.\r\n 2. `new_kpts = kpts[2:10]`: return a slice of key points.\r\n 3. `new_kpts = kpts[vector]`, where vector is a torch.ByteTensor\r\n with `length = len(kpts)`. Nonzero elements in the vector will be selected.\r\n\r\n Note that the returned Keypoints might share storage with this Keypoints,\r\n subject to Pytorch's indexing semantics.\r\n \"\"\"\r\n if isinstance(item, int):\r\n return Keypoints([self.tensor[item]])\r\n return Keypoints(self.tensor[item])\r\n\r\n def __repr__(self) -> str:\r\n s = self.__class__.__name__ + \"(\"\r\n s += \"num_instances={})\".format(len(self.tensor))\r\n return s\r\n\r\n @staticmethod\r\n def cat(keypoints_list: List[\"Keypoints\"]) -> \"Keypoints\":\r\n \"\"\"\r\n Concatenates a list of Keypoints into a single Keypoints\r\n\r\n Arguments:\r\n keypoints_list (list[Keypoints])\r\n\r\n Returns:\r\n Keypoints: the concatenated Keypoints\r\n \"\"\"\r\n assert isinstance(keypoints_list, (list, tuple))\r\n assert len(keypoints_list) > 0\r\n assert all(isinstance(keypoints, Keypoints) for keypoints in keypoints_list)\r\n\r\n cat_kpts = type(keypoints_list[0])(\r\n torch.cat([kpts.tensor for kpts in keypoints_list], dim=0)\r\n )\r\n return cat_kpts\r" }, { "identifier": "BitMasks", "path": "annotator/oneformer/detectron2/structures/masks.py", "snippet": "class BitMasks:\r\n \"\"\"\r\n This class stores the segmentation masks for all objects in one image, in\r\n the form of bitmaps.\r\n\r\n Attributes:\r\n tensor: bool Tensor of N,H,W, representing N instances in the image.\r\n \"\"\"\r\n\r\n def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):\r\n \"\"\"\r\n Args:\r\n tensor: bool Tensor of N,H,W, representing N instances in the image.\r\n \"\"\"\r\n if isinstance(tensor, torch.Tensor):\r\n tensor = tensor.to(torch.bool)\r\n else:\r\n tensor = torch.as_tensor(tensor, dtype=torch.bool, device=torch.device(\"cpu\"))\r\n assert tensor.dim() == 3, tensor.size()\r\n self.image_size = tensor.shape[1:]\r\n self.tensor = tensor\r\n\r\n @torch.jit.unused\r\n def to(self, *args: Any, **kwargs: Any) -> \"BitMasks\":\r\n return BitMasks(self.tensor.to(*args, **kwargs))\r\n\r\n @property\r\n def device(self) -> torch.device:\r\n return self.tensor.device\r\n\r\n @torch.jit.unused\r\n def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> \"BitMasks\":\r\n \"\"\"\r\n Returns:\r\n BitMasks: Create a new :class:`BitMasks` by indexing.\r\n\r\n The following usage are allowed:\r\n\r\n 1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.\r\n 2. `new_masks = masks[2:10]`: return a slice of masks.\r\n 3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor\r\n with `length = len(masks)`. Nonzero elements in the vector will be selected.\r\n\r\n Note that the returned object might share storage with this object,\r\n subject to Pytorch's indexing semantics.\r\n \"\"\"\r\n if isinstance(item, int):\r\n return BitMasks(self.tensor[item].unsqueeze(0))\r\n m = self.tensor[item]\r\n assert m.dim() == 3, \"Indexing on BitMasks with {} returns a tensor with shape {}!\".format(\r\n item, m.shape\r\n )\r\n return BitMasks(m)\r\n\r\n @torch.jit.unused\r\n def __iter__(self) -> torch.Tensor:\r\n yield from self.tensor\r\n\r\n @torch.jit.unused\r\n def __repr__(self) -> str:\r\n s = self.__class__.__name__ + \"(\"\r\n s += \"num_instances={})\".format(len(self.tensor))\r\n return s\r\n\r\n def __len__(self) -> int:\r\n return self.tensor.shape[0]\r\n\r\n def nonempty(self) -> torch.Tensor:\r\n \"\"\"\r\n Find masks that are non-empty.\r\n\r\n Returns:\r\n Tensor: a BoolTensor which represents\r\n whether each mask is empty (False) or non-empty (True).\r\n \"\"\"\r\n return self.tensor.flatten(1).any(dim=1)\r\n\r\n @staticmethod\r\n def from_polygon_masks(\r\n polygon_masks: Union[\"PolygonMasks\", List[List[np.ndarray]]], height: int, width: int\r\n ) -> \"BitMasks\":\r\n \"\"\"\r\n Args:\r\n polygon_masks (list[list[ndarray]] or PolygonMasks)\r\n height, width (int)\r\n \"\"\"\r\n if isinstance(polygon_masks, PolygonMasks):\r\n polygon_masks = polygon_masks.polygons\r\n masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]\r\n if len(masks):\r\n return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))\r\n else:\r\n return BitMasks(torch.empty(0, height, width, dtype=torch.bool))\r\n\r\n @staticmethod\r\n def from_roi_masks(roi_masks: \"ROIMasks\", height: int, width: int) -> \"BitMasks\":\r\n \"\"\"\r\n Args:\r\n roi_masks:\r\n height, width (int):\r\n \"\"\"\r\n return roi_masks.to_bitmasks(height, width)\r\n\r\n def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:\r\n \"\"\"\r\n Crop each bitmask by the given box, and resize results to (mask_size, mask_size).\r\n This can be used to prepare training targets for Mask R-CNN.\r\n It has less reconstruction error compared to rasterization with polygons.\r\n However we observe no difference in accuracy,\r\n but BitMasks requires more memory to store all the masks.\r\n\r\n Args:\r\n boxes (Tensor): Nx4 tensor storing the boxes for each mask\r\n mask_size (int): the size of the rasterized mask.\r\n\r\n Returns:\r\n Tensor:\r\n A bool tensor of shape (N, mask_size, mask_size), where\r\n N is the number of predicted boxes for this image.\r\n \"\"\"\r\n assert len(boxes) == len(self), \"{} != {}\".format(len(boxes), len(self))\r\n device = self.tensor.device\r\n\r\n batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]\r\n rois = torch.cat([batch_inds, boxes], dim=1) # Nx5\r\n\r\n bit_masks = self.tensor.to(dtype=torch.float32)\r\n rois = rois.to(device=device)\r\n output = (\r\n ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)\r\n .forward(bit_masks[:, None, :, :], rois)\r\n .squeeze(1)\r\n )\r\n output = output >= 0.5\r\n return output\r\n\r\n def get_bounding_boxes(self) -> Boxes:\r\n \"\"\"\r\n Returns:\r\n Boxes: tight bounding boxes around bitmasks.\r\n If a mask is empty, it's bounding box will be all zero.\r\n \"\"\"\r\n boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)\r\n x_any = torch.any(self.tensor, dim=1)\r\n y_any = torch.any(self.tensor, dim=2)\r\n for idx in range(self.tensor.shape[0]):\r\n x = torch.where(x_any[idx, :])[0]\r\n y = torch.where(y_any[idx, :])[0]\r\n if len(x) > 0 and len(y) > 0:\r\n boxes[idx, :] = torch.as_tensor(\r\n [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32\r\n )\r\n return Boxes(boxes)\r\n\r\n @staticmethod\r\n def cat(bitmasks_list: List[\"BitMasks\"]) -> \"BitMasks\":\r\n \"\"\"\r\n Concatenates a list of BitMasks into a single BitMasks\r\n\r\n Arguments:\r\n bitmasks_list (list[BitMasks])\r\n\r\n Returns:\r\n BitMasks: the concatenated BitMasks\r\n \"\"\"\r\n assert isinstance(bitmasks_list, (list, tuple))\r\n assert len(bitmasks_list) > 0\r\n assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)\r\n\r\n cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0))\r\n return cat_bitmasks\r" }, { "identifier": "PolygonMasks", "path": "annotator/oneformer/detectron2/structures/masks.py", "snippet": "class PolygonMasks:\r\n \"\"\"\r\n This class stores the segmentation masks for all objects in one image, in the form of polygons.\r\n\r\n Attributes:\r\n polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.\r\n \"\"\"\r\n\r\n def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):\r\n \"\"\"\r\n Arguments:\r\n polygons (list[list[np.ndarray]]): The first\r\n level of the list correspond to individual instances,\r\n the second level to all the polygons that compose the\r\n instance, and the third level to the polygon coordinates.\r\n The third level array should have the format of\r\n [x0, y0, x1, y1, ..., xn, yn] (n >= 3).\r\n \"\"\"\r\n if not isinstance(polygons, list):\r\n raise ValueError(\r\n \"Cannot create PolygonMasks: Expect a list of list of polygons per image. \"\r\n \"Got '{}' instead.\".format(type(polygons))\r\n )\r\n\r\n def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:\r\n # Use float64 for higher precision, because why not?\r\n # Always put polygons on CPU (self.to is a no-op) since they\r\n # are supposed to be small tensors.\r\n # May need to change this assumption if GPU placement becomes useful\r\n if isinstance(t, torch.Tensor):\r\n t = t.cpu().numpy()\r\n return np.asarray(t).astype(\"float64\")\r\n\r\n def process_polygons(\r\n polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]\r\n ) -> List[np.ndarray]:\r\n if not isinstance(polygons_per_instance, list):\r\n raise ValueError(\r\n \"Cannot create polygons: Expect a list of polygons per instance. \"\r\n \"Got '{}' instead.\".format(type(polygons_per_instance))\r\n )\r\n # transform each polygon to a numpy array\r\n polygons_per_instance = [_make_array(p) for p in polygons_per_instance]\r\n for polygon in polygons_per_instance:\r\n if len(polygon) % 2 != 0 or len(polygon) < 6:\r\n raise ValueError(f\"Cannot create a polygon from {len(polygon)} coordinates.\")\r\n return polygons_per_instance\r\n\r\n self.polygons: List[List[np.ndarray]] = [\r\n process_polygons(polygons_per_instance) for polygons_per_instance in polygons\r\n ]\r\n\r\n def to(self, *args: Any, **kwargs: Any) -> \"PolygonMasks\":\r\n return self\r\n\r\n @property\r\n def device(self) -> torch.device:\r\n return torch.device(\"cpu\")\r\n\r\n def get_bounding_boxes(self) -> Boxes:\r\n \"\"\"\r\n Returns:\r\n Boxes: tight bounding boxes around polygon masks.\r\n \"\"\"\r\n boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32)\r\n for idx, polygons_per_instance in enumerate(self.polygons):\r\n minxy = torch.as_tensor([float(\"inf\"), float(\"inf\")], dtype=torch.float32)\r\n maxxy = torch.zeros(2, dtype=torch.float32)\r\n for polygon in polygons_per_instance:\r\n coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)\r\n minxy = torch.min(minxy, torch.min(coords, dim=0).values)\r\n maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)\r\n boxes[idx, :2] = minxy\r\n boxes[idx, 2:] = maxxy\r\n return Boxes(boxes)\r\n\r\n def nonempty(self) -> torch.Tensor:\r\n \"\"\"\r\n Find masks that are non-empty.\r\n\r\n Returns:\r\n Tensor:\r\n a BoolTensor which represents whether each mask is empty (False) or not (True).\r\n \"\"\"\r\n keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons]\r\n return torch.from_numpy(np.asarray(keep, dtype=bool))\r\n\r\n def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> \"PolygonMasks\":\r\n \"\"\"\r\n Support indexing over the instances and return a `PolygonMasks` object.\r\n `item` can be:\r\n\r\n 1. An integer. It will return an object with only one instance.\r\n 2. A slice. It will return an object with the selected instances.\r\n 3. A list[int]. It will return an object with the selected instances,\r\n correpsonding to the indices in the list.\r\n 4. A vector mask of type BoolTensor, whose length is num_instances.\r\n It will return an object with the instances whose mask is nonzero.\r\n \"\"\"\r\n if isinstance(item, int):\r\n selected_polygons = [self.polygons[item]]\r\n elif isinstance(item, slice):\r\n selected_polygons = self.polygons[item]\r\n elif isinstance(item, list):\r\n selected_polygons = [self.polygons[i] for i in item]\r\n elif isinstance(item, torch.Tensor):\r\n # Polygons is a list, so we have to move the indices back to CPU.\r\n if item.dtype == torch.bool:\r\n assert item.dim() == 1, item.shape\r\n item = item.nonzero().squeeze(1).cpu().numpy().tolist()\r\n elif item.dtype in [torch.int32, torch.int64]:\r\n item = item.cpu().numpy().tolist()\r\n else:\r\n raise ValueError(\"Unsupported tensor dtype={} for indexing!\".format(item.dtype))\r\n selected_polygons = [self.polygons[i] for i in item]\r\n return PolygonMasks(selected_polygons)\r\n\r\n def __iter__(self) -> Iterator[List[np.ndarray]]:\r\n \"\"\"\r\n Yields:\r\n list[ndarray]: the polygons for one instance.\r\n Each Tensor is a float64 vector representing a polygon.\r\n \"\"\"\r\n return iter(self.polygons)\r\n\r\n def __repr__(self) -> str:\r\n s = self.__class__.__name__ + \"(\"\r\n s += \"num_instances={})\".format(len(self.polygons))\r\n return s\r\n\r\n def __len__(self) -> int:\r\n return len(self.polygons)\r\n\r\n def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:\r\n \"\"\"\r\n Crop each mask by the given box, and resize results to (mask_size, mask_size).\r\n This can be used to prepare training targets for Mask R-CNN.\r\n\r\n Args:\r\n boxes (Tensor): Nx4 tensor storing the boxes for each mask\r\n mask_size (int): the size of the rasterized mask.\r\n\r\n Returns:\r\n Tensor: A bool tensor of shape (N, mask_size, mask_size), where\r\n N is the number of predicted boxes for this image.\r\n \"\"\"\r\n assert len(boxes) == len(self), \"{} != {}\".format(len(boxes), len(self))\r\n\r\n device = boxes.device\r\n # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise\r\n # (several small tensors for representing a single instance mask)\r\n boxes = boxes.to(torch.device(\"cpu\"))\r\n\r\n results = [\r\n rasterize_polygons_within_box(poly, box.numpy(), mask_size)\r\n for poly, box in zip(self.polygons, boxes)\r\n ]\r\n \"\"\"\r\n poly: list[list[float]], the polygons for one instance\r\n box: a tensor of shape (4,)\r\n \"\"\"\r\n if len(results) == 0:\r\n return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device)\r\n return torch.stack(results, dim=0).to(device=device)\r\n\r\n def area(self):\r\n \"\"\"\r\n Computes area of the mask.\r\n Only works with Polygons, using the shoelace formula:\r\n https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates\r\n\r\n Returns:\r\n Tensor: a vector, area for each instance\r\n \"\"\"\r\n\r\n area = []\r\n for polygons_per_instance in self.polygons:\r\n area_per_instance = 0\r\n for p in polygons_per_instance:\r\n area_per_instance += polygon_area(p[0::2], p[1::2])\r\n area.append(area_per_instance)\r\n\r\n return torch.tensor(area)\r\n\r\n @staticmethod\r\n def cat(polymasks_list: List[\"PolygonMasks\"]) -> \"PolygonMasks\":\r\n \"\"\"\r\n Concatenates a list of PolygonMasks into a single PolygonMasks\r\n\r\n Arguments:\r\n polymasks_list (list[PolygonMasks])\r\n\r\n Returns:\r\n PolygonMasks: the concatenated PolygonMasks\r\n \"\"\"\r\n assert isinstance(polymasks_list, (list, tuple))\r\n assert len(polymasks_list) > 0\r\n assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list)\r\n\r\n cat_polymasks = type(polymasks_list[0])(\r\n list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list))\r\n )\r\n return cat_polymasks\r" }, { "identifier": "RotatedBoxes", "path": "annotator/oneformer/detectron2/structures/rotated_boxes.py", "snippet": "class RotatedBoxes(Boxes):\r\n \"\"\"\r\n This structure stores a list of rotated boxes as a Nx5 torch.Tensor.\r\n It supports some common methods about boxes\r\n (`area`, `clip`, `nonempty`, etc),\r\n and also behaves like a Tensor\r\n (support indexing, `to(device)`, `.device`, and iteration over all boxes)\r\n \"\"\"\r\n\r\n def __init__(self, tensor: torch.Tensor):\r\n \"\"\"\r\n Args:\r\n tensor (Tensor[float]): a Nx5 matrix. Each row is\r\n (x_center, y_center, width, height, angle),\r\n in which angle is represented in degrees.\r\n While there's no strict range restriction for it,\r\n the recommended principal range is between [-180, 180) degrees.\r\n\r\n Assume we have a horizontal box B = (x_center, y_center, width, height),\r\n where width is along the x-axis and height is along the y-axis.\r\n The rotated box B_rot (x_center, y_center, width, height, angle)\r\n can be seen as:\r\n\r\n 1. When angle == 0:\r\n B_rot == B\r\n 2. When angle > 0:\r\n B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CCW;\r\n 3. When angle < 0:\r\n B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CW.\r\n\r\n Mathematically, since the right-handed coordinate system for image space\r\n is (y, x), where y is top->down and x is left->right, the 4 vertices of the\r\n rotated rectangle :math:`(yr_i, xr_i)` (i = 1, 2, 3, 4) can be obtained from\r\n the vertices of the horizontal rectangle :math:`(y_i, x_i)` (i = 1, 2, 3, 4)\r\n in the following way (:math:`\\\\theta = angle*\\\\pi/180` is the angle in radians,\r\n :math:`(y_c, x_c)` is the center of the rectangle):\r\n\r\n .. math::\r\n\r\n yr_i = \\\\cos(\\\\theta) (y_i - y_c) - \\\\sin(\\\\theta) (x_i - x_c) + y_c,\r\n\r\n xr_i = \\\\sin(\\\\theta) (y_i - y_c) + \\\\cos(\\\\theta) (x_i - x_c) + x_c,\r\n\r\n which is the standard rigid-body rotation transformation.\r\n\r\n Intuitively, the angle is\r\n (1) the rotation angle from y-axis in image space\r\n to the height vector (top->down in the box's local coordinate system)\r\n of the box in CCW, and\r\n (2) the rotation angle from x-axis in image space\r\n to the width vector (left->right in the box's local coordinate system)\r\n of the box in CCW.\r\n\r\n More intuitively, consider the following horizontal box ABCD represented\r\n in (x1, y1, x2, y2): (3, 2, 7, 4),\r\n covering the [3, 7] x [2, 4] region of the continuous coordinate system\r\n which looks like this:\r\n\r\n .. code:: none\r\n\r\n O--------> x\r\n |\r\n | A---B\r\n | | |\r\n | D---C\r\n |\r\n v y\r\n\r\n Note that each capital letter represents one 0-dimensional geometric point\r\n instead of a 'square pixel' here.\r\n\r\n In the example above, using (x, y) to represent a point we have:\r\n\r\n .. math::\r\n\r\n O = (0, 0), A = (3, 2), B = (7, 2), C = (7, 4), D = (3, 4)\r\n\r\n We name vector AB = vector DC as the width vector in box's local coordinate system, and\r\n vector AD = vector BC as the height vector in box's local coordinate system. Initially,\r\n when angle = 0 degree, they're aligned with the positive directions of x-axis and y-axis\r\n in the image space, respectively.\r\n\r\n For better illustration, we denote the center of the box as E,\r\n\r\n .. code:: none\r\n\r\n O--------> x\r\n |\r\n | A---B\r\n | | E |\r\n | D---C\r\n |\r\n v y\r\n\r\n where the center E = ((3+7)/2, (2+4)/2) = (5, 3).\r\n\r\n Also,\r\n\r\n .. math::\r\n\r\n width = |AB| = |CD| = 7 - 3 = 4,\r\n height = |AD| = |BC| = 4 - 2 = 2.\r\n\r\n Therefore, the corresponding representation for the same shape in rotated box in\r\n (x_center, y_center, width, height, angle) format is:\r\n\r\n (5, 3, 4, 2, 0),\r\n\r\n Now, let's consider (5, 3, 4, 2, 90), which is rotated by 90 degrees\r\n CCW (counter-clockwise) by definition. It looks like this:\r\n\r\n .. code:: none\r\n\r\n O--------> x\r\n | B-C\r\n | | |\r\n | |E|\r\n | | |\r\n | A-D\r\n v y\r\n\r\n The center E is still located at the same point (5, 3), while the vertices\r\n ABCD are rotated by 90 degrees CCW with regard to E:\r\n A = (4, 5), B = (4, 1), C = (6, 1), D = (6, 5)\r\n\r\n Here, 90 degrees can be seen as the CCW angle to rotate from y-axis to\r\n vector AD or vector BC (the top->down height vector in box's local coordinate system),\r\n or the CCW angle to rotate from x-axis to vector AB or vector DC (the left->right\r\n width vector in box's local coordinate system).\r\n\r\n .. math::\r\n\r\n width = |AB| = |CD| = 5 - 1 = 4,\r\n height = |AD| = |BC| = 6 - 4 = 2.\r\n\r\n Next, how about (5, 3, 4, 2, -90), which is rotated by 90 degrees CW (clockwise)\r\n by definition? It looks like this:\r\n\r\n .. code:: none\r\n\r\n O--------> x\r\n | D-A\r\n | | |\r\n | |E|\r\n | | |\r\n | C-B\r\n v y\r\n\r\n The center E is still located at the same point (5, 3), while the vertices\r\n ABCD are rotated by 90 degrees CW with regard to E:\r\n A = (6, 1), B = (6, 5), C = (4, 5), D = (4, 1)\r\n\r\n .. math::\r\n\r\n width = |AB| = |CD| = 5 - 1 = 4,\r\n height = |AD| = |BC| = 6 - 4 = 2.\r\n\r\n This covers exactly the same region as (5, 3, 4, 2, 90) does, and their IoU\r\n will be 1. However, these two will generate different RoI Pooling results and\r\n should not be treated as an identical box.\r\n\r\n On the other hand, it's easy to see that (X, Y, W, H, A) is identical to\r\n (X, Y, W, H, A+360N), for any integer N. For example (5, 3, 4, 2, 270) would be\r\n identical to (5, 3, 4, 2, -90), because rotating the shape 270 degrees CCW is\r\n equivalent to rotating the same shape 90 degrees CW.\r\n\r\n We could rotate further to get (5, 3, 4, 2, 180), or (5, 3, 4, 2, -180):\r\n\r\n .. code:: none\r\n\r\n O--------> x\r\n |\r\n | C---D\r\n | | E |\r\n | B---A\r\n |\r\n v y\r\n\r\n .. math::\r\n\r\n A = (7, 4), B = (3, 4), C = (3, 2), D = (7, 2),\r\n\r\n width = |AB| = |CD| = 7 - 3 = 4,\r\n height = |AD| = |BC| = 4 - 2 = 2.\r\n\r\n Finally, this is a very inaccurate (heavily quantized) illustration of\r\n how (5, 3, 4, 2, 60) looks like in case anyone wonders:\r\n\r\n .. code:: none\r\n\r\n O--------> x\r\n | B\\\r\n | / C\r\n | /E /\r\n | A /\r\n | `D\r\n v y\r\n\r\n It's still a rectangle with center of (5, 3), width of 4 and height of 2,\r\n but its angle (and thus orientation) is somewhere between\r\n (5, 3, 4, 2, 0) and (5, 3, 4, 2, 90).\r\n \"\"\"\r\n device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device(\"cpu\")\r\n tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)\r\n if tensor.numel() == 0:\r\n # Use reshape, so we don't end up creating a new tensor that does not depend on\r\n # the inputs (and consequently confuses jit)\r\n tensor = tensor.reshape((0, 5)).to(dtype=torch.float32, device=device)\r\n assert tensor.dim() == 2 and tensor.size(-1) == 5, tensor.size()\r\n\r\n self.tensor = tensor\r\n\r\n def clone(self) -> \"RotatedBoxes\":\r\n \"\"\"\r\n Clone the RotatedBoxes.\r\n\r\n Returns:\r\n RotatedBoxes\r\n \"\"\"\r\n return RotatedBoxes(self.tensor.clone())\r\n\r\n def to(self, device: torch.device):\r\n # Boxes are assumed float32 and does not support to(dtype)\r\n return RotatedBoxes(self.tensor.to(device=device))\r\n\r\n def area(self) -> torch.Tensor:\r\n \"\"\"\r\n Computes the area of all the boxes.\r\n\r\n Returns:\r\n torch.Tensor: a vector with areas of each box.\r\n \"\"\"\r\n box = self.tensor\r\n area = box[:, 2] * box[:, 3]\r\n return area\r\n\r\n # Avoid in-place operations so that we can torchscript; NOTE: this creates a new tensor\r\n def normalize_angles(self) -> None:\r\n \"\"\"\r\n Restrict angles to the range of [-180, 180) degrees\r\n \"\"\"\r\n angle_tensor = (self.tensor[:, 4] + 180.0) % 360.0 - 180.0\r\n self.tensor = torch.cat((self.tensor[:, :4], angle_tensor[:, None]), dim=1)\r\n\r\n def clip(self, box_size: Tuple[int, int], clip_angle_threshold: float = 1.0) -> None:\r\n \"\"\"\r\n Clip (in place) the boxes by limiting x coordinates to the range [0, width]\r\n and y coordinates to the range [0, height].\r\n\r\n For RRPN:\r\n Only clip boxes that are almost horizontal with a tolerance of\r\n clip_angle_threshold to maintain backward compatibility.\r\n\r\n Rotated boxes beyond this threshold are not clipped for two reasons:\r\n\r\n 1. There are potentially multiple ways to clip a rotated box to make it\r\n fit within the image.\r\n 2. It's tricky to make the entire rectangular box fit within the image\r\n and still be able to not leave out pixels of interest.\r\n\r\n Therefore we rely on ops like RoIAlignRotated to safely handle this.\r\n\r\n Args:\r\n box_size (height, width): The clipping box's size.\r\n clip_angle_threshold:\r\n Iff. abs(normalized(angle)) <= clip_angle_threshold (in degrees),\r\n we do the clipping as horizontal boxes.\r\n \"\"\"\r\n h, w = box_size\r\n\r\n # normalize angles to be within (-180, 180] degrees\r\n self.normalize_angles()\r\n\r\n idx = torch.where(torch.abs(self.tensor[:, 4]) <= clip_angle_threshold)[0]\r\n\r\n # convert to (x1, y1, x2, y2)\r\n x1 = self.tensor[idx, 0] - self.tensor[idx, 2] / 2.0\r\n y1 = self.tensor[idx, 1] - self.tensor[idx, 3] / 2.0\r\n x2 = self.tensor[idx, 0] + self.tensor[idx, 2] / 2.0\r\n y2 = self.tensor[idx, 1] + self.tensor[idx, 3] / 2.0\r\n\r\n # clip\r\n x1.clamp_(min=0, max=w)\r\n y1.clamp_(min=0, max=h)\r\n x2.clamp_(min=0, max=w)\r\n y2.clamp_(min=0, max=h)\r\n\r\n # convert back to (xc, yc, w, h)\r\n self.tensor[idx, 0] = (x1 + x2) / 2.0\r\n self.tensor[idx, 1] = (y1 + y2) / 2.0\r\n # make sure widths and heights do not increase due to numerical errors\r\n self.tensor[idx, 2] = torch.min(self.tensor[idx, 2], x2 - x1)\r\n self.tensor[idx, 3] = torch.min(self.tensor[idx, 3], y2 - y1)\r\n\r\n def nonempty(self, threshold: float = 0.0) -> torch.Tensor:\r\n \"\"\"\r\n Find boxes that are non-empty.\r\n A box is considered empty, if either of its side is no larger than threshold.\r\n\r\n Returns:\r\n Tensor: a binary vector which represents\r\n whether each box is empty (False) or non-empty (True).\r\n \"\"\"\r\n box = self.tensor\r\n widths = box[:, 2]\r\n heights = box[:, 3]\r\n keep = (widths > threshold) & (heights > threshold)\r\n return keep\r\n\r\n def __getitem__(self, item) -> \"RotatedBoxes\":\r\n \"\"\"\r\n Returns:\r\n RotatedBoxes: Create a new :class:`RotatedBoxes` by indexing.\r\n\r\n The following usage are allowed:\r\n\r\n 1. `new_boxes = boxes[3]`: return a `RotatedBoxes` which contains only one box.\r\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\r\n 3. `new_boxes = boxes[vector]`, where vector is a torch.ByteTensor\r\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\r\n\r\n Note that the returned RotatedBoxes might share storage with this RotatedBoxes,\r\n subject to Pytorch's indexing semantics.\r\n \"\"\"\r\n if isinstance(item, int):\r\n return RotatedBoxes(self.tensor[item].view(1, -1))\r\n b = self.tensor[item]\r\n assert b.dim() == 2, \"Indexing on RotatedBoxes with {} failed to return a matrix!\".format(\r\n item\r\n )\r\n return RotatedBoxes(b)\r\n\r\n def __len__(self) -> int:\r\n return self.tensor.shape[0]\r\n\r\n def __repr__(self) -> str:\r\n return \"RotatedBoxes(\" + str(self.tensor) + \")\"\r\n\r\n def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:\r\n \"\"\"\r\n Args:\r\n box_size (height, width): Size of the reference box covering\r\n [0, width] x [0, height]\r\n boundary_threshold (int): Boxes that extend beyond the reference box\r\n boundary by more than boundary_threshold are considered \"outside\".\r\n\r\n For RRPN, it might not be necessary to call this function since it's common\r\n for rotated box to extend to outside of the image boundaries\r\n (the clip function only clips the near-horizontal boxes)\r\n\r\n Returns:\r\n a binary vector, indicating whether each box is inside the reference box.\r\n \"\"\"\r\n height, width = box_size\r\n\r\n cnt_x = self.tensor[..., 0]\r\n cnt_y = self.tensor[..., 1]\r\n half_w = self.tensor[..., 2] / 2.0\r\n half_h = self.tensor[..., 3] / 2.0\r\n a = self.tensor[..., 4]\r\n c = torch.abs(torch.cos(a * math.pi / 180.0))\r\n s = torch.abs(torch.sin(a * math.pi / 180.0))\r\n # This basically computes the horizontal bounding rectangle of the rotated box\r\n max_rect_dx = c * half_w + s * half_h\r\n max_rect_dy = c * half_h + s * half_w\r\n\r\n inds_inside = (\r\n (cnt_x - max_rect_dx >= -boundary_threshold)\r\n & (cnt_y - max_rect_dy >= -boundary_threshold)\r\n & (cnt_x + max_rect_dx < width + boundary_threshold)\r\n & (cnt_y + max_rect_dy < height + boundary_threshold)\r\n )\r\n\r\n return inds_inside\r\n\r\n def get_centers(self) -> torch.Tensor:\r\n \"\"\"\r\n Returns:\r\n The box centers in a Nx2 array of (x, y).\r\n \"\"\"\r\n return self.tensor[:, :2]\r\n\r\n def scale(self, scale_x: float, scale_y: float) -> None:\r\n \"\"\"\r\n Scale the rotated box with horizontal and vertical scaling factors\r\n Note: when scale_factor_x != scale_factor_y,\r\n the rotated box does not preserve the rectangular shape when the angle\r\n is not a multiple of 90 degrees under resize transformation.\r\n Instead, the shape is a parallelogram (that has skew)\r\n Here we make an approximation by fitting a rotated rectangle to the parallelogram.\r\n \"\"\"\r\n self.tensor[:, 0] *= scale_x\r\n self.tensor[:, 1] *= scale_y\r\n theta = self.tensor[:, 4] * math.pi / 180.0\r\n c = torch.cos(theta)\r\n s = torch.sin(theta)\r\n\r\n # In image space, y is top->down and x is left->right\r\n # Consider the local coordintate system for the rotated box,\r\n # where the box center is located at (0, 0), and the four vertices ABCD are\r\n # A(-w / 2, -h / 2), B(w / 2, -h / 2), C(w / 2, h / 2), D(-w / 2, h / 2)\r\n # the midpoint of the left edge AD of the rotated box E is:\r\n # E = (A+D)/2 = (-w / 2, 0)\r\n # the midpoint of the top edge AB of the rotated box F is:\r\n # F(0, -h / 2)\r\n # To get the old coordinates in the global system, apply the rotation transformation\r\n # (Note: the right-handed coordinate system for image space is yOx):\r\n # (old_x, old_y) = (s * y + c * x, c * y - s * x)\r\n # E(old) = (s * 0 + c * (-w/2), c * 0 - s * (-w/2)) = (-c * w / 2, s * w / 2)\r\n # F(old) = (s * (-h / 2) + c * 0, c * (-h / 2) - s * 0) = (-s * h / 2, -c * h / 2)\r\n # After applying the scaling factor (sfx, sfy):\r\n # E(new) = (-sfx * c * w / 2, sfy * s * w / 2)\r\n # F(new) = (-sfx * s * h / 2, -sfy * c * h / 2)\r\n # The new width after scaling tranformation becomes:\r\n\r\n # w(new) = |E(new) - O| * 2\r\n # = sqrt[(sfx * c * w / 2)^2 + (sfy * s * w / 2)^2] * 2\r\n # = sqrt[(sfx * c)^2 + (sfy * s)^2] * w\r\n # i.e., scale_factor_w = sqrt[(sfx * c)^2 + (sfy * s)^2]\r\n #\r\n # For example,\r\n # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_w == scale_factor_x;\r\n # when |angle| = 90, c = 0, |s| = 1, scale_factor_w == scale_factor_y\r\n self.tensor[:, 2] *= torch.sqrt((scale_x * c) ** 2 + (scale_y * s) ** 2)\r\n\r\n # h(new) = |F(new) - O| * 2\r\n # = sqrt[(sfx * s * h / 2)^2 + (sfy * c * h / 2)^2] * 2\r\n # = sqrt[(sfx * s)^2 + (sfy * c)^2] * h\r\n # i.e., scale_factor_h = sqrt[(sfx * s)^2 + (sfy * c)^2]\r\n #\r\n # For example,\r\n # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_h == scale_factor_y;\r\n # when |angle| = 90, c = 0, |s| = 1, scale_factor_h == scale_factor_x\r\n self.tensor[:, 3] *= torch.sqrt((scale_x * s) ** 2 + (scale_y * c) ** 2)\r\n\r\n # The angle is the rotation angle from y-axis in image space to the height\r\n # vector (top->down in the box's local coordinate system) of the box in CCW.\r\n #\r\n # angle(new) = angle_yOx(O - F(new))\r\n # = angle_yOx( (sfx * s * h / 2, sfy * c * h / 2) )\r\n # = atan2(sfx * s * h / 2, sfy * c * h / 2)\r\n # = atan2(sfx * s, sfy * c)\r\n #\r\n # For example,\r\n # when sfx == sfy, angle(new) == atan2(s, c) == angle(old)\r\n self.tensor[:, 4] = torch.atan2(scale_x * s, scale_y * c) * 180 / math.pi\r\n\r\n @classmethod\r\n def cat(cls, boxes_list: List[\"RotatedBoxes\"]) -> \"RotatedBoxes\":\r\n \"\"\"\r\n Concatenates a list of RotatedBoxes into a single RotatedBoxes\r\n\r\n Arguments:\r\n boxes_list (list[RotatedBoxes])\r\n\r\n Returns:\r\n RotatedBoxes: the concatenated RotatedBoxes\r\n \"\"\"\r\n assert isinstance(boxes_list, (list, tuple))\r\n if len(boxes_list) == 0:\r\n return cls(torch.empty(0))\r\n assert all([isinstance(box, RotatedBoxes) for box in boxes_list])\r\n\r\n # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input\r\n cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))\r\n return cat_boxes\r\n\r\n @property\r\n def device(self) -> torch.device:\r\n return self.tensor.device\r\n\r\n @torch.jit.unused\r\n def __iter__(self):\r\n \"\"\"\r\n Yield a box as a Tensor of shape (5,) at a time.\r\n \"\"\"\r\n yield from self.tensor\r" }, { "identifier": "PathManager", "path": "annotator/oneformer/detectron2/utils/file_io.py", "snippet": "class Detectron2Handler(PathHandler):\r\n PREFIX = \"detectron2://\"\r\n S3_DETECTRON2_PREFIX = \"https://dl.fbaipublicfiles.com/detectron2/\"\r\n def _get_supported_prefixes(self):\r\n def _get_local_path(self, path, **kwargs):\r\n def _open(self, path, mode=\"r\", **kwargs):\r" }, { "identifier": "random_color", "path": "annotator/oneformer/oneformer/demo/colormap.py", "snippet": "def random_color(rgb=False, maximum=255):\r\n \"\"\"\r\n Args:\r\n rgb (bool): whether to return RGB colors or BGR colors.\r\n maximum (int): either 255 or 1\r\n Returns:\r\n ndarray: a vector of 3 numbers\r\n \"\"\"\r\n idx = np.random.randint(0, len(_COLORS))\r\n ret = _COLORS[idx] * maximum\r\n if not rgb:\r\n ret = ret[::-1]\r\n return ret\r" }, { "identifier": "_COLORS", "path": "annotator/oneformer/oneformer/demo/colormap.py", "snippet": "_COLORS = []\r" } ]
import colorsys import logging import math import numpy as np import cv2 import matplotlib as mpl import matplotlib.colors as mplc import matplotlib.figure as mplfigure import annotator.oneformer.pycocotools.mask as mask_util import torch import random from enum import Enum, unique from matplotlib.backends.backend_agg import FigureCanvasAgg from PIL import Image from annotator.oneformer.detectron2.data import MetadataCatalog from annotator.oneformer.detectron2.structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes from annotator.oneformer.detectron2.utils.file_io import PathManager from .colormap import random_color, _COLORS
17,322
self._draw_text_in_mask(binary_mask, text, lighter_color) return self.output def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5): """ Args: soft_mask (ndarray): float array of shape (H, W), each value in [0, 1]. color: color of the mask. Refer to `matplotlib.colors` for a full list of formats that are accepted. If None, will pick a random color. text (str): if None, will be drawn on the object alpha (float): blending efficient. Smaller values lead to more transparent masks. Returns: output (VisImage): image object with mask drawn. """ if color is None: color = random_color(rgb=True, maximum=1) color = mplc.to_rgb(color) shape2d = (soft_mask.shape[0], soft_mask.shape[1]) rgba = np.zeros(shape2d + (4,), dtype="float32") rgba[:, :, :3] = color rgba[:, :, 3] = soft_mask * alpha self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) if text is not None: lighter_color = self._change_color_brightness(color, brightness_factor=0.7) binary_mask = (soft_mask > 0.5).astype("uint8") # self._draw_text_in_mask(binary_mask, text, lighter_color) return self.output def draw_polygon(self, segment, color, edge_color=None, alpha=0.5): """ Args: segment: numpy array of shape Nx2, containing all the points in the polygon. color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a full list of formats that are accepted. If not provided, a darker shade of the polygon color will be used instead. alpha (float): blending efficient. Smaller values lead to more transparent masks. Returns: output (VisImage): image object with polygon drawn. """ if edge_color is None: # make edge color darker than the polygon color if alpha > 0.8: edge_color = self._change_color_brightness(color, brightness_factor=-0.7) else: edge_color = color edge_color = mplc.to_rgb(edge_color) + (1,) polygon = mpl.patches.Polygon( segment, fill=True, facecolor=mplc.to_rgb(color) + (alpha,), edgecolor=edge_color, linewidth=max(self._default_font_size // 15 * self.output.scale, 1), ) self.output.ax.add_patch(polygon) return self.output """ Internal methods: """ def _jitter(self, color): """ Randomly modifies given color to produce a slightly different color than the color given. Args: color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color picked. The values in the list are in the [0.0, 1.0] range. Returns: jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color after being jittered. The values in the list are in the [0.0, 1.0] range. """ color = mplc.to_rgb(color) vec = np.random.rand(3) # better to do it in another color space vec = vec / np.linalg.norm(vec) * 0.5 res = np.clip(vec + color, 0, 1) return tuple(res) def _create_grayscale_image(self, mask=None): """ Create a grayscale version of the original image. The colors in masked area, if given, will be kept. """ img_bw = self.img.astype("f4").mean(axis=2) img_bw = np.stack([img_bw] * 3, axis=2) if mask is not None: img_bw[mask] = self.img[mask] return img_bw def _change_color_brightness(self, color, brightness_factor): """ Depending on the brightness_factor, gives a lighter or darker color i.e. a color with less or more saturation than the original color. Args: color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of 0 will correspond to no change, a factor in [-1.0, 0) range will result in a darker color and a factor in (0, 1.0] range will result in a lighter color. Returns: modified_color (tuple[double]): a tuple containing the RGB values of the modified color. Each value in the tuple is in the [0.0, 1.0] range. """ assert brightness_factor >= -1.0 and brightness_factor <= 1.0 color = mplc.to_rgb(color) polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) return modified_color def _convert_boxes(self, boxes): """ Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension. """
# Copyright (c) Facebook, Inc. and its affiliates. random.seed(0) logger = logging.getLogger(__name__) __all__ = ["ColorMode", "VisImage", "Visualizer"] _SMALL_OBJECT_AREA_THRESH = 1000 _LARGE_MASK_AREA_THRESH = 120000 _OFF_WHITE = (1.0, 1.0, 1.0) _BLACK = (0, 0, 0) _RED = (1.0, 0, 0) _KEYPOINT_THRESHOLD = 0.05 def instance_color(rgb=False, idx=1, maximum=255): """ Args: rgb (bool): whether to return RGB colors or BGR colors. maximum (int): either 255 or 1 Returns: ndarray: a vector of 3 numbers """ ret = _COLORS[idx] * maximum if not rgb: ret = ret[::-1] return ret @unique class ColorMode(Enum): """ Enum of different color modes to use for instance visualizations. """ IMAGE = 0 """ Picks a random color for every instance and overlay segmentations with low opacity. """ SEGMENTATION = 1 """ Let instances of the same category have similar colors (from metadata.thing_colors), and overlay them with high opacity. This provides more attention on the quality of segmentation. """ IMAGE_BW = 2 """ Same as IMAGE, but convert all areas without masks to gray-scale. Only available for drawing per-instance mask predictions. """ class GenericMask: """ Attribute: polygons (list[ndarray]): list[ndarray]: polygons for this mask. Each ndarray has format [x, y, x, y, ...] mask (ndarray): a binary mask """ def __init__(self, mask_or_polygons, height, width): self._mask = self._polygons = self._has_holes = None self.height = height self.width = width m = mask_or_polygons if isinstance(m, dict): # RLEs assert "counts" in m and "size" in m if isinstance(m["counts"], list): # uncompressed RLEs h, w = m["size"] assert h == height and w == width m = mask_util.frPyObjects(m, h, w) self._mask = mask_util.decode(m)[:, :] return if isinstance(m, list): # list[ndarray] self._polygons = [np.asarray(x).reshape(-1) for x in m] return if isinstance(m, np.ndarray): # assumed to be a binary mask assert m.shape[1] != 2, m.shape assert m.shape == ( height, width, ), f"mask shape: {m.shape}, target dims: {height}, {width}" self._mask = m.astype("uint8") return raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m))) @property def mask(self): if self._mask is None: self._mask = self.polygons_to_mask(self._polygons) return self._mask @property def polygons(self): if self._polygons is None: self._polygons, self._has_holes = self.mask_to_polygons(self._mask) return self._polygons @property def has_holes(self): if self._has_holes is None: if self._mask is not None: self._polygons, self._has_holes = self.mask_to_polygons(self._mask) else: self._has_holes = False # if original format is polygon, does not have holes return self._has_holes def mask_to_polygons(self, mask): # cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level # hierarchy. External contours (boundary) of the object are placed in hierarchy-1. # Internal contours (holes) are placed in hierarchy-2. # cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours. mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) hierarchy = res[-1] if hierarchy is None: # empty mask return [], False has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0 res = res[-2] res = [x.flatten() for x in res] # These coordinates from OpenCV are integers in range [0, W-1 or H-1]. # We add 0.5 to turn them into real-value coordinate space. A better solution # would be to first +0.5 and then dilate the returned polygon by 0.5. res = [x + 0.5 for x in res if len(x) >= 6] return res, has_holes def polygons_to_mask(self, polygons): rle = mask_util.frPyObjects(polygons, self.height, self.width) rle = mask_util.merge(rle) return mask_util.decode(rle)[:, :] def area(self): return self.mask.sum() def bbox(self): p = mask_util.frPyObjects(self.polygons, self.height, self.width) p = mask_util.merge(p) bbox = mask_util.toBbox(p) bbox[2] += bbox[0] bbox[3] += bbox[1] return bbox class _PanopticPrediction: """ Unify different panoptic annotation/prediction formats """ def __init__(self, panoptic_seg, segments_info, metadata=None): if segments_info is None: assert metadata is not None # If "segments_info" is None, we assume "panoptic_img" is a # H*W int32 image storing the panoptic_id in the format of # category_id * label_divisor + instance_id. We reserve -1 for # VOID label. label_divisor = metadata.label_divisor segments_info = [] for panoptic_label in np.unique(panoptic_seg.numpy()): if panoptic_label == -1: # VOID region. continue pred_class = panoptic_label // label_divisor isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values() segments_info.append( { "id": int(panoptic_label), "category_id": int(pred_class), "isthing": bool(isthing), } ) del metadata self._seg = panoptic_seg self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True) areas = areas.numpy() sorted_idxs = np.argsort(-areas) self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs] self._seg_ids = self._seg_ids.tolist() for sid, area in zip(self._seg_ids, self._seg_areas): if sid in self._sinfo: self._sinfo[sid]["area"] = float(area) def non_empty_mask(self): """ Returns: (H, W) array, a mask for all pixels that have a prediction """ empty_ids = [] for id in self._seg_ids: if id not in self._sinfo: empty_ids.append(id) if len(empty_ids) == 0: return np.zeros(self._seg.shape, dtype=np.uint8) assert ( len(empty_ids) == 1 ), ">1 ids corresponds to no labels. This is currently not supported" return (self._seg != empty_ids[0]).numpy().astype(np.bool) def semantic_masks(self): for sid in self._seg_ids: sinfo = self._sinfo.get(sid) if sinfo is None or sinfo["isthing"]: # Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions. continue yield (self._seg == sid).numpy().astype(np.bool), sinfo def instance_masks(self): for sid in self._seg_ids: sinfo = self._sinfo.get(sid) if sinfo is None or not sinfo["isthing"]: continue mask = (self._seg == sid).numpy().astype(np.bool) if mask.sum() > 0: yield mask, sinfo def _create_text_labels(classes, scores, class_names, is_crowd=None): """ Args: classes (list[int] or None): scores (list[float] or None): class_names (list[str] or None): is_crowd (list[bool] or None): Returns: list[str] or None """ labels = None if classes is not None: if class_names is not None and len(class_names) > 0: labels = [class_names[i] for i in classes] else: labels = [str(i) for i in classes] if scores is not None: if labels is None: labels = ["{:.0f}%".format(s * 100) for s in scores] else: labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)] if labels is not None and is_crowd is not None: labels = [l + ("|crowd" if crowd else "") for l, crowd in zip(labels, is_crowd)] return labels class VisImage: def __init__(self, img, scale=1.0): """ Args: img (ndarray): an RGB image of shape (H, W, 3) in range [0, 255]. scale (float): scale the input image """ self.img = img self.scale = scale self.width, self.height = img.shape[1], img.shape[0] self._setup_figure(img) def _setup_figure(self, img): """ Args: Same as in :meth:`__init__()`. Returns: fig (matplotlib.pyplot.figure): top level container for all the image plot elements. ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system. """ fig = mplfigure.Figure(frameon=False) self.dpi = fig.get_dpi() # add a small 1e-2 to avoid precision lost due to matplotlib's truncation # (https://github.com/matplotlib/matplotlib/issues/15363) fig.set_size_inches( (self.width * self.scale + 1e-2) / self.dpi, (self.height * self.scale + 1e-2) / self.dpi, ) self.canvas = FigureCanvasAgg(fig) # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig) ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) ax.axis("off") self.fig = fig self.ax = ax self.reset_image(img) def reset_image(self, img): """ Args: img: same as in __init__ """ img = img.astype("uint8") self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest") def save(self, filepath): """ Args: filepath (str): a string that contains the absolute path, including the file name, where the visualized image will be saved. """ self.fig.savefig(filepath) def get_image(self): """ Returns: ndarray: the visualized image of shape (H, W, 3) (RGB) in uint8 type. The shape is scaled w.r.t the input image using the given `scale` argument. """ canvas = self.canvas s, (width, height) = canvas.print_to_buffer() # buf = io.BytesIO() # works for cairo backend # canvas.print_rgba(buf) # width, height = self.width, self.height # s = buf.getvalue() buffer = np.frombuffer(s, dtype="uint8") img_rgba = buffer.reshape(height, width, 4) rgb, alpha = np.split(img_rgba, [3], axis=2) return rgb.astype("uint8") class Visualizer: """ Visualizer that draws data about detection/segmentation on images. It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}` that draw primitive objects to images, as well as high-level wrappers like `draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}` that draw composite data in some pre-defined style. Note that the exact visualization style for the high-level wrappers are subject to change. Style such as color, opacity, label contents, visibility of labels, or even the visibility of objects themselves (e.g. when the object is too small) may change according to different heuristics, as long as the results still look visually reasonable. To obtain a consistent style, you can implement custom drawing functions with the abovementioned primitive methods instead. If you need more customized visualization styles, you can process the data yourself following their format documented in tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not intend to satisfy everyone's preference on drawing styles. This visualizer focuses on high rendering quality rather than performance. It is not designed to be used for real-time applications. """ # TODO implement a fast, rasterized version using OpenCV def __init__(self, img_rgb, is_img=True, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE): """ Args: img_rgb: a numpy array of shape (H, W, C), where H and W correspond to the height and width of the image respectively. C is the number of color channels. The image is required to be in RGB format since that is a requirement of the Matplotlib library. The image is also expected to be in the range [0, 255]. metadata (Metadata): dataset metadata (e.g. class names and colors) instance_mode (ColorMode): defines one of the pre-defined style for drawing instances on an image. """ if is_img: self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8) else: self.img = np.zeros_like(img_rgb).clip(0, 255).astype(np.uint8) + 255 if metadata is None: metadata = MetadataCatalog.get("__nonexist__") self.metadata = metadata self.output = VisImage(self.img, scale=scale) self.cpu_device = torch.device("cpu") # too small texts are useless, therefore clamp to 9 self._default_font_size = max( np.sqrt(self.output.height * self.output.width) // 90, 10 // scale ) self._instance_mode = instance_mode self.keypoint_threshold = _KEYPOINT_THRESHOLD def get_image(self, img): img = np.asarray(img).clip(0, 255).astype(np.uint8) return VisImage(img, scale=1.0) def draw_box_predictions( self, boxes=None, labels=None, scores=None, assigned_colors=None ): """ Args: boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`, or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image, or a :class:`RotatedBoxes`, or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format for the N objects in a single image, labels (list[str]): the text to be displayed for each instance. assigned_colors (list[matplotlib.colors]): a list of colors, where each color corresponds to each mask or box in the image. Refer to 'matplotlib.colors' for full list of formats that the colors are accepted in. Returns: output (VisImage): image object with visualizations. """ num_instances = 0 boxes = self._convert_boxes(boxes) classes = labels.tolist() scores = scores.tolist() labels = _create_text_labels(classes, scores, self.metadata.get("stuff_classes", None)) num_instances = len(boxes) assert len(labels) == num_instances if assigned_colors is None: # assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] assigned_colors = [instance_color(rgb=True, idx=i, maximum=1) for i in range(num_instances)] if num_instances == 0: return self.output # Display in largest to smallest order to reduce occlusion. areas = None areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1) if areas is not None: sorted_idxs = np.argsort(-areas).tolist() # Re-order overlapped instances in descending order. boxes = boxes[sorted_idxs] if boxes is not None else None labels = [labels[k] for k in sorted_idxs] if labels is not None else None assigned_colors = [assigned_colors[idx] for idx in sorted_idxs] for i in range(num_instances): color = assigned_colors[i] if boxes is not None: self.draw_box(boxes[i], edge_color=color) if labels is not None: # first get a box if boxes is not None: x0, y0, x1, y1 = boxes[i] text_pos = (x0, y0) # if drawing boxes, put text on the box corner. horiz_align = "left" else: continue # drawing the box confidence for keypoints isn't very useful. # for small objects, draw text at the side to avoid occlusion instance_area = (y1 - y0) * (x1 - x0) if ( instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale or y1 - y0 < 40 * self.output.scale ): if y1 >= self.output.height - 5: text_pos = (x1, y0) else: text_pos = (x0, y1) height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width) lighter_color = self._change_color_brightness(color, brightness_factor=0.7) font_size = ( np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size ) self.draw_text( labels[i], text_pos, color=lighter_color, horizontal_alignment=horiz_align, font_size=font_size, ) return self.output def draw_instance_predictions(self, predictions, alpha=0.8, is_text=True): """ Draw instance-level prediction results on an image. Args: predictions (Instances): the output of an instance detection/segmentation model. Following fields will be used to draw: "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle"). Returns: output (VisImage): image object with visualizations. """ boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None scores = predictions.scores if predictions.has("scores") else None classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None labels = _create_text_labels(classes, scores, self.metadata.get("stuff_classes", None)) keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None if predictions.has("pred_masks"): masks = np.asarray(predictions.pred_masks) masks = [GenericMask(x, self.output.height, self.output.width) for x in masks] else: masks = None if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("stuff_colors"): # colors = [ # self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes # ] colors = [ instance_color(rgb=True, idx=c, maximum=1) for c in classes ] else: colors = None if self._instance_mode == ColorMode.IMAGE_BW: self.output.reset_image( self._create_grayscale_image( (predictions.pred_masks.any(dim=0) > 0).numpy() if predictions.has("pred_masks") else None ) ) self.overlay_instances( masks=masks, boxes=boxes, labels=labels, keypoints=keypoints, assigned_colors=colors, alpha=alpha, is_text=is_text, ) return self.output def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8, is_text=True, edge_color=_OFF_WHITE): """ Draw semantic segmentation predictions/labels. Args: sem_seg (Tensor or ndarray): the segmentation of shape (H, W). Each value is the integer label of the pixel. area_threshold (int): segments with less than `area_threshold` are not drawn. alpha (float): the larger it is, the more opaque the segmentations are. Returns: output (VisImage): image object with visualizations. """ if isinstance(sem_seg, torch.Tensor): sem_seg = sem_seg.numpy() labels, areas = np.unique(sem_seg, return_counts=True) sorted_idxs = np.argsort(-areas).tolist() labels = labels[sorted_idxs] for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels): try: mask_color = [x / 255 for x in self.metadata.stuff_colors[label]] except (AttributeError, IndexError): mask_color = None binary_mask = (sem_seg == label).astype(np.uint8) text = self.metadata.stuff_classes[label] self.draw_binary_mask( binary_mask, color=mask_color, edge_color=edge_color, text=text, alpha=alpha, area_threshold=area_threshold, is_text=is_text, ) return self.output def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7, is_text=True,): """ Draw panoptic prediction annotations or results. Args: panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. segments_info (list[dict] or None): Describe each segment in `panoptic_seg`. If it is a ``list[dict]``, each dict contains keys "id", "category_id". If None, category id of each pixel is computed by ``pixel // metadata.label_divisor``. area_threshold (int): stuff segments with less than `area_threshold` are not drawn. Returns: output (VisImage): image object with visualizations. """ pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata) if self._instance_mode == ColorMode.IMAGE_BW: self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask())) # draw mask for all semantic segments first i.e. "stuff" for mask, sinfo in pred.semantic_masks(): category_idx = sinfo["category_id"] try: mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]] except AttributeError: mask_color = None text = self.metadata.stuff_classes[category_idx] self.draw_binary_mask( mask, color=mask_color, edge_color=_OFF_WHITE, text=text, alpha=alpha, area_threshold=area_threshold, is_text=is_text, ) # draw mask for all instances second all_instances = list(pred.instance_masks()) if len(all_instances) == 0: return self.output masks, sinfo = list(zip(*all_instances)) category_ids = [x["category_id"] for x in sinfo] try: scores = [x["score"] for x in sinfo] except KeyError: scores = None labels = _create_text_labels( category_ids, scores, self.metadata.stuff_classes, [x.get("iscrowd", 0) for x in sinfo] ) try: colors = [ self._jitter([x / 255 for x in self.metadata.stuff_colors[c]]) for c in category_ids ] except AttributeError: colors = None self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha, is_text=is_text) return self.output draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility def draw_dataset_dict(self, dic): """ Draw annotations/segmentaions in Detectron2 Dataset format. Args: dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format. Returns: output (VisImage): image object with visualizations. """ annos = dic.get("annotations", None) if annos: if "segmentation" in annos[0]: masks = [x["segmentation"] for x in annos] else: masks = None if "keypoints" in annos[0]: keypts = [x["keypoints"] for x in annos] keypts = np.array(keypts).reshape(len(annos), -1, 3) else: keypts = None boxes = [ BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) if len(x["bbox"]) == 4 else x["bbox"] for x in annos ] colors = None category_ids = [x["category_id"] for x in annos] if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("stuff_colors"): colors = [ self._jitter([x / 255 for x in self.metadata.stuff_colors[c]]) for c in category_ids ] names = self.metadata.get("stuff_classes", None) labels = _create_text_labels( category_ids, scores=None, class_names=names, is_crowd=[x.get("iscrowd", 0) for x in annos], ) self.overlay_instances( labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors ) sem_seg = dic.get("sem_seg", None) if sem_seg is None and "sem_seg_file_name" in dic: with PathManager.open(dic["sem_seg_file_name"], "rb") as f: sem_seg = Image.open(f) sem_seg = np.asarray(sem_seg, dtype="uint8") if sem_seg is not None: self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5) pan_seg = dic.get("pan_seg", None) # if pan_seg is None and "pan_seg_file_name" in dic: # with PathManager.open(dic["pan_seg_file_name"], "rb") as f: # pan_seg = Image.open(f) # pan_seg = np.asarray(pan_seg) # from panopticapi.utils import rgb2id # # pan_seg = rgb2id(pan_seg) if pan_seg is not None: segments_info = dic["segments_info"] pan_seg = torch.tensor(pan_seg) self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5) return self.output def overlay_instances( self, *, boxes=None, labels=None, masks=None, keypoints=None, assigned_colors=None, alpha=0.5, is_text=True, ): """ Args: boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`, or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image, or a :class:`RotatedBoxes`, or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format for the N objects in a single image, labels (list[str]): the text to be displayed for each instance. masks (masks-like object): Supported types are: * :class:`detectron2.structures.PolygonMasks`, :class:`detectron2.structures.BitMasks`. * list[list[ndarray]]: contains the segmentation masks for all objects in one image. The first level of the list corresponds to individual instances. The second level to all the polygon that compose the instance, and the third level to the polygon coordinates. The third level should have the format of [x0, y0, x1, y1, ..., xn, yn] (n >= 3). * list[ndarray]: each ndarray is a binary mask of shape (H, W). * list[dict]: each dict is a COCO-style RLE. keypoints (Keypoint or array like): an array-like object of shape (N, K, 3), where the N is the number of instances and K is the number of keypoints. The last dimension corresponds to (x, y, visibility or score). assigned_colors (list[matplotlib.colors]): a list of colors, where each color corresponds to each mask or box in the image. Refer to 'matplotlib.colors' for full list of formats that the colors are accepted in. Returns: output (VisImage): image object with visualizations. """ num_instances = 0 if boxes is not None: boxes = self._convert_boxes(boxes) num_instances = len(boxes) if masks is not None: masks = self._convert_masks(masks) if num_instances: assert len(masks) == num_instances else: num_instances = len(masks) if keypoints is not None: if num_instances: assert len(keypoints) == num_instances else: num_instances = len(keypoints) keypoints = self._convert_keypoints(keypoints) if labels is not None: assert len(labels) == num_instances if assigned_colors is None: # assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] assigned_colors = [instance_color(rgb=True, idx=i, maximum=1) for i in range(num_instances)] if num_instances == 0: return self.output if boxes is not None and boxes.shape[1] == 5: return self.overlay_rotated_instances( boxes=boxes, labels=labels, assigned_colors=assigned_colors ) # Display in largest to smallest order to reduce occlusion. areas = None if boxes is not None: areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1) elif masks is not None: areas = np.asarray([x.area() for x in masks]) if areas is not None: sorted_idxs = np.argsort(-areas).tolist() # Re-order overlapped instances in descending order. boxes = boxes[sorted_idxs] if boxes is not None else None labels = [labels[k] for k in sorted_idxs] if labels is not None else None masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None assigned_colors = [assigned_colors[idx] for idx in sorted_idxs] keypoints = keypoints[sorted_idxs] if keypoints is not None else None for i in range(num_instances): color = assigned_colors[i] if boxes is not None: self.draw_box(boxes[i], edge_color=color) if masks is not None: for segment in masks[i].polygons: self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha) if labels is not None: # first get a box if boxes is not None: x0, y0, x1, y1 = boxes[i] text_pos = (x0, y0) # if drawing boxes, put text on the box corner. horiz_align = "left" elif masks is not None: # skip small mask without polygon if len(masks[i].polygons) == 0: continue x0, y0, x1, y1 = masks[i].bbox() # draw text in the center (defined by median) when box is not drawn # median is less sensitive to outliers. text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1] horiz_align = "center" else: continue # drawing the box confidence for keypoints isn't very useful. # for small objects, draw text at the side to avoid occlusion instance_area = (y1 - y0) * (x1 - x0) if ( instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale or y1 - y0 < 40 * self.output.scale ): if y1 >= self.output.height - 5: text_pos = (x1, y0) else: text_pos = (x0, y1) height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width) lighter_color = self._change_color_brightness(color, brightness_factor=0.7) font_size = ( np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size ) if is_text: self.draw_text( labels[i], text_pos, color=lighter_color, horizontal_alignment=horiz_align, font_size=font_size, ) # draw keypoints if keypoints is not None: for keypoints_per_instance in keypoints: self.draw_and_connect_keypoints(keypoints_per_instance) return self.output def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None): """ Args: boxes (ndarray): an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format for the N objects in a single image. labels (list[str]): the text to be displayed for each instance. assigned_colors (list[matplotlib.colors]): a list of colors, where each color corresponds to each mask or box in the image. Refer to 'matplotlib.colors' for full list of formats that the colors are accepted in. Returns: output (VisImage): image object with visualizations. """ num_instances = len(boxes) if assigned_colors is None: # assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] assigned_colors = [instance_color(rgb=True, idx=i, maximum=1) for i in range(num_instances)] if num_instances == 0: return self.output # Display in largest to smallest order to reduce occlusion. if boxes is not None: areas = boxes[:, 2] * boxes[:, 3] sorted_idxs = np.argsort(-areas).tolist() # Re-order overlapped instances in descending order. boxes = boxes[sorted_idxs] labels = [labels[k] for k in sorted_idxs] if labels is not None else None colors = [assigned_colors[idx] for idx in sorted_idxs] for i in range(num_instances): self.draw_rotated_box_with_label( boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None ) return self.output def draw_and_connect_keypoints(self, keypoints): """ Draws keypoints of an instance and follows the rules for keypoint connections to draw lines between appropriate keypoints. This follows color heuristics for line color. Args: keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints and the last dimension corresponds to (x, y, probability). Returns: output (VisImage): image object with visualizations. """ visible = {} keypoint_names = self.metadata.get("keypoint_names") for idx, keypoint in enumerate(keypoints): # draw keypoint x, y, prob = keypoint if prob > self.keypoint_threshold: self.draw_circle((x, y), color=_RED) if keypoint_names: keypoint_name = keypoint_names[idx] visible[keypoint_name] = (x, y) if self.metadata.get("keypoint_connection_rules"): for kp0, kp1, color in self.metadata.keypoint_connection_rules: if kp0 in visible and kp1 in visible: x0, y0 = visible[kp0] x1, y1 = visible[kp1] color = tuple(x / 255.0 for x in color) self.draw_line([x0, x1], [y0, y1], color=color) # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip # Note that this strategy is specific to person keypoints. # For other keypoints, it should just do nothing try: ls_x, ls_y = visible["left_shoulder"] rs_x, rs_y = visible["right_shoulder"] mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2 except KeyError: pass else: # draw line from nose to mid-shoulder nose_x, nose_y = visible.get("nose", (None, None)) if nose_x is not None: self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED) try: # draw line from mid-shoulder to mid-hip lh_x, lh_y = visible["left_hip"] rh_x, rh_y = visible["right_hip"] except KeyError: pass else: mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2 self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED) return self.output """ Primitive drawing functions: """ def draw_text( self, text, position, *, font_size=None, color="g", horizontal_alignment="center", rotation=0, ): """ Args: text (str): class label position (tuple): a tuple of the x and y coordinates to place text on image. font_size (int, optional): font of the text. If not provided, a font size proportional to the image width is calculated and used. color: color of the text. Refer to `matplotlib.colors` for full list of formats that are accepted. horizontal_alignment (str): see `matplotlib.text.Text` rotation: rotation angle in degrees CCW Returns: output (VisImage): image object with text drawn. """ if not font_size: font_size = self._default_font_size # since the text background is dark, we don't want the text to be dark color = np.maximum(list(mplc.to_rgb(color)), 0.2) color[np.argmax(color)] = max(0.8, np.max(color)) x, y = position self.output.ax.text( x, y, text, size=font_size * self.output.scale, family="sans-serif", bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"}, verticalalignment="top", horizontalalignment=horizontal_alignment, color=color, zorder=10, rotation=rotation, ) return self.output def draw_box(self, box_coord, alpha=1.0, edge_color="g", line_style="-"): """ Args: box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0 are the coordinates of the image's top left corner. x1 and y1 are the coordinates of the image's bottom right corner. alpha (float): blending efficient. Smaller values lead to more transparent masks. edge_color: color of the outline of the box. Refer to `matplotlib.colors` for full list of formats that are accepted. line_style (string): the string to use to create the outline of the boxes. Returns: output (VisImage): image object with box drawn. """ x0, y0, x1, y1 = box_coord width = x1 - x0 height = y1 - y0 linewidth = 2 self.output.ax.add_patch( mpl.patches.Rectangle( (x0, y0), width, height, fill=False, edgecolor=edge_color, linewidth=linewidth * self.output.scale, alpha=alpha, linestyle=line_style, ) ) return self.output def draw_rotated_box_with_label( self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None ): """ Draw a rotated box with label on its top-left corner. Args: rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle), where cnt_x and cnt_y are the center coordinates of the box. w and h are the width and height of the box. angle represents how many degrees the box is rotated CCW with regard to the 0-degree box. alpha (float): blending efficient. Smaller values lead to more transparent masks. edge_color: color of the outline of the box. Refer to `matplotlib.colors` for full list of formats that are accepted. line_style (string): the string to use to create the outline of the boxes. label (string): label for rotated box. It will not be rendered when set to None. Returns: output (VisImage): image object with box drawn. """ cnt_x, cnt_y, w, h, angle = rotated_box area = w * h # use thinner lines when the box is small linewidth = self._default_font_size / ( 6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3 ) theta = angle * math.pi / 180.0 c = math.cos(theta) s = math.sin(theta) rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)] # x: left->right ; y: top->down rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect] for k in range(4): j = (k + 1) % 4 self.draw_line( [rotated_rect[k][0], rotated_rect[j][0]], [rotated_rect[k][1], rotated_rect[j][1]], color=edge_color, linestyle="--" if k == 1 else line_style, linewidth=linewidth, ) if label is not None: text_pos = rotated_rect[1] # topleft corner height_ratio = h / np.sqrt(self.output.height * self.output.width) label_color = self._change_color_brightness(edge_color, brightness_factor=0.7) font_size = ( np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size ) self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle) return self.output def draw_circle(self, circle_coord, color, radius=3): """ Args: circle_coord (list(int) or tuple(int)): contains the x and y coordinates of the center of the circle. color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. radius (int): radius of the circle. Returns: output (VisImage): image object with box drawn. """ x, y = circle_coord self.output.ax.add_patch( mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color) ) return self.output def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None): """ Args: x_data (list[int]): a list containing x values of all the points being drawn. Length of list should match the length of y_data. y_data (list[int]): a list containing y values of all the points being drawn. Length of list should match the length of x_data. color: color of the line. Refer to `matplotlib.colors` for a full list of formats that are accepted. linestyle: style of the line. Refer to `matplotlib.lines.Line2D` for a full list of formats that are accepted. linewidth (float or None): width of the line. When it's None, a default value will be computed and used. Returns: output (VisImage): image object with line drawn. """ if linewidth is None: linewidth = self._default_font_size / 3 linewidth = max(linewidth, 1) self.output.ax.add_line( mpl.lines.Line2D( x_data, y_data, linewidth=linewidth * self.output.scale, color=color, linestyle=linestyle, ) ) return self.output def draw_binary_mask( self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=10, is_text=True, ): """ Args: binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and W is the image width. Each value in the array is either a 0 or 1 value of uint8 type. color: color of the mask. Refer to `matplotlib.colors` for a full list of formats that are accepted. If None, will pick a random color. edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a full list of formats that are accepted. text (str): if None, will be drawn on the object alpha (float): blending efficient. Smaller values lead to more transparent masks. area_threshold (float): a connected component smaller than this area will not be shown. Returns: output (VisImage): image object with mask drawn. """ if color is None: color = random_color(rgb=True, maximum=1) color = mplc.to_rgb(color) has_valid_segment = False binary_mask = binary_mask.astype("uint8") # opencv needs uint8 mask = GenericMask(binary_mask, self.output.height, self.output.width) shape2d = (binary_mask.shape[0], binary_mask.shape[1]) if not mask.has_holes: # draw polygons for regular masks for segment in mask.polygons: # area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1])) # if area < (area_threshold or 0): # continue has_valid_segment = True segment = segment.reshape(-1, 2) self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha) else: # TODO: Use Path/PathPatch to draw vector graphics: # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon rgba = np.zeros(shape2d + (4,), dtype="float32") rgba[:, :, :3] = color rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha has_valid_segment = True self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) if is_text: if text is not None and has_valid_segment: lighter_color = self._change_color_brightness(color, brightness_factor=0.7) self._draw_text_in_mask(binary_mask, text, lighter_color) return self.output def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5): """ Args: soft_mask (ndarray): float array of shape (H, W), each value in [0, 1]. color: color of the mask. Refer to `matplotlib.colors` for a full list of formats that are accepted. If None, will pick a random color. text (str): if None, will be drawn on the object alpha (float): blending efficient. Smaller values lead to more transparent masks. Returns: output (VisImage): image object with mask drawn. """ if color is None: color = random_color(rgb=True, maximum=1) color = mplc.to_rgb(color) shape2d = (soft_mask.shape[0], soft_mask.shape[1]) rgba = np.zeros(shape2d + (4,), dtype="float32") rgba[:, :, :3] = color rgba[:, :, 3] = soft_mask * alpha self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) if text is not None: lighter_color = self._change_color_brightness(color, brightness_factor=0.7) binary_mask = (soft_mask > 0.5).astype("uint8") # self._draw_text_in_mask(binary_mask, text, lighter_color) return self.output def draw_polygon(self, segment, color, edge_color=None, alpha=0.5): """ Args: segment: numpy array of shape Nx2, containing all the points in the polygon. color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a full list of formats that are accepted. If not provided, a darker shade of the polygon color will be used instead. alpha (float): blending efficient. Smaller values lead to more transparent masks. Returns: output (VisImage): image object with polygon drawn. """ if edge_color is None: # make edge color darker than the polygon color if alpha > 0.8: edge_color = self._change_color_brightness(color, brightness_factor=-0.7) else: edge_color = color edge_color = mplc.to_rgb(edge_color) + (1,) polygon = mpl.patches.Polygon( segment, fill=True, facecolor=mplc.to_rgb(color) + (alpha,), edgecolor=edge_color, linewidth=max(self._default_font_size // 15 * self.output.scale, 1), ) self.output.ax.add_patch(polygon) return self.output """ Internal methods: """ def _jitter(self, color): """ Randomly modifies given color to produce a slightly different color than the color given. Args: color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color picked. The values in the list are in the [0.0, 1.0] range. Returns: jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color after being jittered. The values in the list are in the [0.0, 1.0] range. """ color = mplc.to_rgb(color) vec = np.random.rand(3) # better to do it in another color space vec = vec / np.linalg.norm(vec) * 0.5 res = np.clip(vec + color, 0, 1) return tuple(res) def _create_grayscale_image(self, mask=None): """ Create a grayscale version of the original image. The colors in masked area, if given, will be kept. """ img_bw = self.img.astype("f4").mean(axis=2) img_bw = np.stack([img_bw] * 3, axis=2) if mask is not None: img_bw[mask] = self.img[mask] return img_bw def _change_color_brightness(self, color, brightness_factor): """ Depending on the brightness_factor, gives a lighter or darker color i.e. a color with less or more saturation than the original color. Args: color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of 0 will correspond to no change, a factor in [-1.0, 0) range will result in a darker color and a factor in (0, 1.0] range will result in a lighter color. Returns: modified_color (tuple[double]): a tuple containing the RGB values of the modified color. Each value in the tuple is in the [0.0, 1.0] range. """ assert brightness_factor >= -1.0 and brightness_factor <= 1.0 color = mplc.to_rgb(color) polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) return modified_color def _convert_boxes(self, boxes): """ Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension. """
if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes):
6
2023-12-05 02:51:53+00:00
24k
DiffusionLight/DiffusionLight
relighting/inpainter.py
[ { "identifier": "CustomStableDiffusionControlNetInpaintPipeline", "path": "relighting/pipeline.py", "snippet": "class CustomStableDiffusionControlNetInpaintPipeline(StableDiffusionControlNetInpaintPipeline):\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n image: PipelineImageInput = None,\n mask_image: PipelineImageInput = None,\n control_image: PipelineImageInput = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n strength: float = 1.0,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n controlnet_conditioning_scale: Union[float, List[float]] = 0.5,\n guess_mode: bool = False,\n control_guidance_start: Union[float, List[float]] = 0.0,\n control_guidance_end: Union[float, List[float]] = 1.0,\n newx: int = 0,\n newy: int = 0,\n newr: int = 256,\n current_seed=0,\n use_noise_moving=True,\n ):\n # OVERWRITE METHODS\n self.prepare_mask_latents = custom_prepare_mask_latents.__get__(self, CustomStableDiffusionControlNetInpaintPipeline)\n self.prepare_latents = custom_prepare_latents.__get__(self, CustomStableDiffusionControlNetInpaintPipeline)\n\n controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet\n\n # align format for control guidance\n if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):\n control_guidance_start = len(control_guidance_end) * [control_guidance_start]\n elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):\n control_guidance_end = len(control_guidance_start) * [control_guidance_end]\n elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):\n mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1\n control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [\n control_guidance_end\n ]\n\n # 1. Check inputs. Raise error if not correct\n self.check_inputs(\n prompt,\n control_image,\n height,\n width,\n callback_steps,\n negative_prompt,\n prompt_embeds,\n negative_prompt_embeds,\n controlnet_conditioning_scale,\n control_guidance_start,\n control_guidance_end,\n )\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):\n controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)\n\n global_pool_conditions = (\n controlnet.config.global_pool_conditions\n if isinstance(controlnet, ControlNetModel)\n else controlnet.nets[0].config.global_pool_conditions\n )\n guess_mode = guess_mode or global_pool_conditions\n\n # 3. Encode input prompt\n text_encoder_lora_scale = (\n cross_attention_kwargs.get(\"scale\", None) if cross_attention_kwargs is not None else None\n )\n prompt_embeds, negative_prompt_embeds = self.encode_prompt(\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n lora_scale=text_encoder_lora_scale,\n )\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n if do_classifier_free_guidance:\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])\n\n # 4. Prepare image\n if isinstance(controlnet, ControlNetModel):\n control_image = self.prepare_control_image(\n image=control_image,\n width=width,\n height=height,\n batch_size=batch_size * num_images_per_prompt,\n num_images_per_prompt=num_images_per_prompt,\n device=device,\n dtype=controlnet.dtype,\n do_classifier_free_guidance=do_classifier_free_guidance,\n guess_mode=guess_mode,\n )\n elif isinstance(controlnet, MultiControlNetModel):\n control_images = []\n\n for control_image_ in control_image:\n control_image_ = self.prepare_control_image(\n image=control_image_,\n width=width,\n height=height,\n batch_size=batch_size * num_images_per_prompt,\n num_images_per_prompt=num_images_per_prompt,\n device=device,\n dtype=controlnet.dtype,\n do_classifier_free_guidance=do_classifier_free_guidance,\n guess_mode=guess_mode,\n )\n\n control_images.append(control_image_)\n\n control_image = control_images\n else:\n assert False\n\n # 4. Preprocess mask and image - resizes image and mask w.r.t height and width\n init_image = self.image_processor.preprocess(image, height=height, width=width)\n init_image = init_image.to(dtype=torch.float32)\n\n mask = self.mask_processor.preprocess(mask_image, height=height, width=width)\n\n masked_image = init_image * (mask < 0.5)\n _, _, height, width = init_image.shape\n\n # 5. Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps, num_inference_steps = self.get_timesteps(\n num_inference_steps=num_inference_steps, strength=strength, device=device\n )\n # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)\n latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)\n # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise\n is_strength_max = strength == 1.0\n\n # 6. Prepare latent variables\n num_channels_latents = self.vae.config.latent_channels\n num_channels_unet = self.unet.config.in_channels\n return_image_latents = num_channels_unet == 4\n\n # EDITED HERE\n latents_outputs = self.prepare_latents(\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n latents,\n image=init_image,\n timestep=latent_timestep,\n is_strength_max=is_strength_max,\n return_noise=True,\n return_image_latents=return_image_latents,\n newx=newx,\n newy=newy,\n newr=newr,\n current_seed=current_seed,\n use_noise_moving=use_noise_moving,\n )\n\n if return_image_latents:\n latents, noise, image_latents = latents_outputs\n else:\n latents, noise = latents_outputs\n\n # 7. Prepare mask latent variables\n mask, masked_image_latents = self.prepare_mask_latents(\n mask,\n masked_image,\n batch_size * num_images_per_prompt,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n do_classifier_free_guidance,\n )\n\n # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 7.1 Create tensor stating which controlnets to keep\n controlnet_keep = []\n for i in range(len(timesteps)):\n keeps = [\n 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)\n for s, e in zip(control_guidance_start, control_guidance_end)\n ]\n controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)\n\n # 8. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # controlnet(s) inference\n if guess_mode and do_classifier_free_guidance:\n # Infer ControlNet only for the conditional batch.\n control_model_input = latents\n control_model_input = self.scheduler.scale_model_input(control_model_input, t)\n controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]\n else:\n control_model_input = latent_model_input\n controlnet_prompt_embeds = prompt_embeds\n\n if isinstance(controlnet_keep[i], list):\n cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]\n else:\n controlnet_cond_scale = controlnet_conditioning_scale\n if isinstance(controlnet_cond_scale, list):\n controlnet_cond_scale = controlnet_cond_scale[0]\n cond_scale = controlnet_cond_scale * controlnet_keep[i]\n\n down_block_res_samples, mid_block_res_sample = self.controlnet(\n control_model_input,\n t,\n encoder_hidden_states=controlnet_prompt_embeds,\n controlnet_cond=control_image,\n conditioning_scale=cond_scale,\n guess_mode=guess_mode,\n return_dict=False,\n )\n\n if guess_mode and do_classifier_free_guidance:\n # Infered ControlNet only for the conditional batch.\n # To apply the output of ControlNet to both the unconditional and conditional batches,\n # add 0 to the unconditional batch to keep it unchanged.\n down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]\n mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])\n\n # predict the noise residual\n if num_channels_unet == 9:\n latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)\n\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n down_block_additional_residuals=down_block_res_samples,\n mid_block_additional_residual=mid_block_res_sample,\n return_dict=False,\n )[0]\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]\n\n if num_channels_unet == 4:\n init_latents_proper = image_latents[:1]\n init_mask = mask[:1]\n\n if i < len(timesteps) - 1:\n noise_timestep = timesteps[i + 1]\n init_latents_proper = self.scheduler.add_noise(\n init_latents_proper, noise, torch.tensor([noise_timestep])\n )\n\n latents = (1 - init_mask) * init_latents_proper + init_mask * latents\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n # If we do sequential model offloading, let's offload unet and controlnet\n # manually for max memory savings\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.unet.to(\"cpu\")\n self.controlnet.to(\"cpu\")\n torch.cuda.empty_cache()\n\n if not output_type == \"latent\":\n image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]\n image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)\n else:\n image = latents\n has_nsfw_concept = None\n\n if has_nsfw_concept is None:\n do_denormalize = [True] * image.shape[0]\n else:\n do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]\n\n image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)\n\n # Offload all models\n self.maybe_free_model_hooks()\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)" }, { "identifier": "CustomStableDiffusionInpaintPipeline", "path": "relighting/pipeline_inpaintonly.py", "snippet": "class CustomStableDiffusionInpaintPipeline(StableDiffusionInpaintPipeline):\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n image: PipelineImageInput = None,\n mask_image: PipelineImageInput = None,\n masked_image_latents: torch.FloatTensor = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n strength: float = 1.0,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n newx: int = 0,\n newy: int = 0,\n newr: int = 256,\n current_seed=0,\n use_noise_moving=True,\n ):\n # OVERWRITE METHODS\n self.prepare_mask_latents = custom_prepare_mask_latents.__get__(self, CustomStableDiffusionInpaintPipeline)\n self.prepare_latents = custom_prepare_latents.__get__(self, CustomStableDiffusionInpaintPipeline)\n\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # 1. Check inputs\n self.check_inputs(\n prompt,\n height,\n width,\n strength,\n callback_steps,\n negative_prompt,\n prompt_embeds,\n negative_prompt_embeds,\n )\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n # 3. Encode input prompt\n text_encoder_lora_scale = (\n cross_attention_kwargs.get(\"scale\", None) if cross_attention_kwargs is not None else None\n )\n prompt_embeds, negative_prompt_embeds = self.encode_prompt(\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n lora_scale=text_encoder_lora_scale,\n )\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n if do_classifier_free_guidance:\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])\n\n # 4. set timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps, num_inference_steps = self.get_timesteps(\n num_inference_steps=num_inference_steps, strength=strength, device=device\n )\n # check that number of inference steps is not < 1 - as this doesn't make sense\n if num_inference_steps < 1:\n raise ValueError(\n f\"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline\"\n f\"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline.\"\n )\n # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)\n latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)\n # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise\n is_strength_max = strength == 1.0\n\n # 5. Preprocess mask and image\n\n init_image = self.image_processor.preprocess(image, height=height, width=width)\n init_image = init_image.to(dtype=torch.float32)\n\n # 6. Prepare latent variables\n num_channels_latents = self.vae.config.latent_channels\n num_channels_unet = self.unet.config.in_channels\n return_image_latents = num_channels_unet == 4\n\n latents_outputs = self.prepare_latents(\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n latents,\n image=init_image,\n timestep=latent_timestep,\n is_strength_max=is_strength_max,\n return_noise=True,\n return_image_latents=return_image_latents,\n newx=newx,\n newy=newy,\n newr=newr,\n current_seed=current_seed,\n use_noise_moving=use_noise_moving,\n )\n\n if return_image_latents:\n latents, noise, image_latents = latents_outputs\n else:\n latents, noise = latents_outputs\n\n # 7. Prepare mask latent variables\n mask_condition = self.mask_processor.preprocess(mask_image, height=height, width=width)\n\n if masked_image_latents is None:\n masked_image = init_image * (mask_condition < 0.5)\n else:\n masked_image = masked_image_latents\n\n mask, masked_image_latents = self.prepare_mask_latents(\n mask_condition,\n masked_image,\n batch_size * num_images_per_prompt,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n do_classifier_free_guidance,\n )\n\n # 8. Check that sizes of mask, masked image and latents match\n if num_channels_unet == 9:\n # default case for runwayml/stable-diffusion-inpainting\n num_channels_mask = mask.shape[1]\n num_channels_masked_image = masked_image_latents.shape[1]\n if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:\n raise ValueError(\n f\"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects\"\n f\" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +\"\n f\" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}\"\n f\" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of\"\n \" `pipeline.unet` or your `mask_image` or `image` input.\"\n )\n elif num_channels_unet != 4:\n raise ValueError(\n f\"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}.\"\n )\n\n # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 10. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n\n # concat latents, mask, masked_image_latents in the channel dimension\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n if num_channels_unet == 9:\n latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)\n\n # predict the noise residual\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n return_dict=False,\n )[0]\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]\n\n if num_channels_unet == 4:\n init_latents_proper = image_latents[:1]\n init_mask = mask[:1]\n\n if i < len(timesteps) - 1:\n noise_timestep = timesteps[i + 1]\n init_latents_proper = self.scheduler.add_noise(\n init_latents_proper, noise, torch.tensor([noise_timestep])\n )\n\n latents = (1 - init_mask) * init_latents_proper + init_mask * latents\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n if not output_type == \"latent\":\n condition_kwargs = {}\n if isinstance(self.vae, AsymmetricAutoencoderKL):\n init_image = init_image.to(device=device, dtype=masked_image_latents.dtype)\n init_image_condition = init_image.clone()\n init_image = self._encode_vae_image(init_image, generator=generator)\n mask_condition = mask_condition.to(device=device, dtype=masked_image_latents.dtype)\n condition_kwargs = {\"image\": init_image_condition, \"mask\": mask_condition}\n image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, **condition_kwargs)[0]\n image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)\n else:\n image = latents\n has_nsfw_concept = None\n\n if has_nsfw_concept is None:\n do_denormalize = [True] * image.shape[0]\n else:\n do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]\n\n image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)\n\n # Offload all models\n self.maybe_free_model_hooks()\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)" }, { "identifier": "CustomStableDiffusionXLInpaintPipeline", "path": "relighting/pipeline_inpaintonly.py", "snippet": "class CustomStableDiffusionXLInpaintPipeline(StableDiffusionXLInpaintPipeline):\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n prompt_2: Optional[Union[str, List[str]]] = None,\n image: PipelineImageInput = None,\n mask_image: PipelineImageInput = None,\n masked_image_latents: torch.FloatTensor = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n strength: float = 0.9999,\n num_inference_steps: int = 50,\n denoising_start: Optional[float] = None,\n denoising_end: Optional[float] = None,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n negative_prompt_2: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n pooled_prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n guidance_rescale: float = 0.0,\n original_size: Tuple[int, int] = None,\n crops_coords_top_left: Tuple[int, int] = (0, 0),\n target_size: Tuple[int, int] = None,\n negative_original_size: Optional[Tuple[int, int]] = None,\n negative_crops_coords_top_left: Tuple[int, int] = (0, 0),\n negative_target_size: Optional[Tuple[int, int]] = None,\n aesthetic_score: float = 6.0,\n negative_aesthetic_score: float = 2.5,\n newx: int = 0,\n newy: int = 0,\n newr: int = 256,\n current_seed=0,\n use_noise_moving=True,\n ):\n # OVERWRITE METHODS\n self.prepare_mask_latents = custom_prepare_mask_latents.__get__(self, CustomStableDiffusionXLInpaintPipeline)\n self.prepare_latents = custom_prepare_latents.__get__(self, CustomStableDiffusionXLInpaintPipeline)\n\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # 1. Check inputs\n self.check_inputs(\n prompt,\n prompt_2,\n height,\n width,\n strength,\n callback_steps,\n negative_prompt,\n negative_prompt_2,\n prompt_embeds,\n negative_prompt_embeds,\n )\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n # 3. Encode input prompt\n text_encoder_lora_scale = (\n cross_attention_kwargs.get(\"scale\", None) if cross_attention_kwargs is not None else None\n )\n\n (\n prompt_embeds,\n negative_prompt_embeds,\n pooled_prompt_embeds,\n negative_pooled_prompt_embeds,\n ) = self.encode_prompt(\n prompt=prompt,\n prompt_2=prompt_2,\n device=device,\n num_images_per_prompt=num_images_per_prompt,\n do_classifier_free_guidance=do_classifier_free_guidance,\n negative_prompt=negative_prompt,\n negative_prompt_2=negative_prompt_2,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n pooled_prompt_embeds=pooled_prompt_embeds,\n negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,\n lora_scale=text_encoder_lora_scale,\n )\n\n # 4. set timesteps\n def denoising_value_valid(dnv):\n return isinstance(denoising_end, float) and 0 < dnv < 1\n\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps, num_inference_steps = self.get_timesteps(\n num_inference_steps, strength, device, denoising_start=denoising_start if denoising_value_valid else None\n )\n # check that number of inference steps is not < 1 - as this doesn't make sense\n if num_inference_steps < 1:\n raise ValueError(\n f\"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline\"\n f\"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline.\"\n )\n # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)\n latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)\n # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise\n is_strength_max = strength == 1.0\n\n # 5. Preprocess mask and image\n init_image = self.image_processor.preprocess(image, height=height, width=width)\n init_image = init_image.to(dtype=torch.float32)\n\n mask = self.mask_processor.preprocess(mask_image, height=height, width=width)\n\n if masked_image_latents is not None:\n masked_image = masked_image_latents\n elif init_image.shape[1] == 4:\n # if images are in latent space, we can't mask it\n masked_image = None\n else:\n masked_image = init_image * (mask < 0.5)\n\n # 6. Prepare latent variables\n num_channels_latents = self.vae.config.latent_channels\n num_channels_unet = self.unet.config.in_channels\n return_image_latents = num_channels_unet == 4\n\n # add_noise = True if denoising_start is None else False\n latents_outputs = self.prepare_latents(\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n latents,\n image=init_image,\n timestep=latent_timestep,\n is_strength_max=is_strength_max,\n return_noise=True,\n return_image_latents=return_image_latents,\n newx=newx,\n newy=newy,\n newr=newr,\n current_seed=current_seed,\n use_noise_moving=use_noise_moving,\n )\n\n if return_image_latents:\n latents, noise, image_latents = latents_outputs\n else:\n latents, noise = latents_outputs\n\n # 7. Prepare mask latent variables\n mask, masked_image_latents = self.prepare_mask_latents(\n mask,\n masked_image,\n batch_size * num_images_per_prompt,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n do_classifier_free_guidance,\n )\n\n # 8. Check that sizes of mask, masked image and latents match\n if num_channels_unet == 9:\n # default case for runwayml/stable-diffusion-inpainting\n num_channels_mask = mask.shape[1]\n num_channels_masked_image = masked_image_latents.shape[1]\n if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:\n raise ValueError(\n f\"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects\"\n f\" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +\"\n f\" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}\"\n f\" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of\"\n \" `pipeline.unet` or your `mask_image` or `image` input.\"\n )\n elif num_channels_unet != 4:\n raise ValueError(\n f\"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}.\"\n )\n # 8.1 Prepare extra step kwargs.\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n height, width = latents.shape[-2:]\n height = height * self.vae_scale_factor\n width = width * self.vae_scale_factor\n\n original_size = original_size or (height, width)\n target_size = target_size or (height, width)\n\n # 10. Prepare added time ids & embeddings\n if negative_original_size is None:\n negative_original_size = original_size\n if negative_target_size is None:\n negative_target_size = target_size\n\n add_text_embeds = pooled_prompt_embeds\n add_time_ids, add_neg_time_ids = self._get_add_time_ids(\n original_size,\n crops_coords_top_left,\n target_size,\n aesthetic_score,\n negative_aesthetic_score,\n negative_original_size,\n negative_crops_coords_top_left,\n negative_target_size,\n dtype=prompt_embeds.dtype,\n )\n add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)\n\n if do_classifier_free_guidance:\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)\n add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)\n add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)\n add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)\n\n prompt_embeds = prompt_embeds.to(device)\n add_text_embeds = add_text_embeds.to(device)\n add_time_ids = add_time_ids.to(device)\n\n # 11. Denoising loop\n num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)\n\n if (\n denoising_end is not None\n and denoising_start is not None\n and denoising_value_valid(denoising_end)\n and denoising_value_valid(denoising_start)\n and denoising_start >= denoising_end\n ):\n raise ValueError(\n f\"`denoising_start`: {denoising_start} cannot be larger than or equal to `denoising_end`: \"\n + f\" {denoising_end} when using type float.\"\n )\n elif denoising_end is not None and denoising_value_valid(denoising_end):\n discrete_timestep_cutoff = int(\n round(\n self.scheduler.config.num_train_timesteps\n - (denoising_end * self.scheduler.config.num_train_timesteps)\n )\n )\n num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))\n timesteps = timesteps[:num_inference_steps]\n\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n\n # concat latents, mask, masked_image_latents in the channel dimension\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n if num_channels_unet == 9:\n latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)\n\n # predict the noise residual\n added_cond_kwargs = {\"text_embeds\": add_text_embeds, \"time_ids\": add_time_ids}\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n added_cond_kwargs=added_cond_kwargs,\n return_dict=False,\n )[0]\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n if do_classifier_free_guidance and guidance_rescale > 0.0:\n # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf\n noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]\n\n if num_channels_unet == 4:\n init_latents_proper = image_latents[:1]\n init_mask = mask[:1]\n\n if i < len(timesteps) - 1:\n noise_timestep = timesteps[i + 1]\n init_latents_proper = self.scheduler.add_noise(\n init_latents_proper, noise, torch.tensor([noise_timestep])\n )\n\n latents = (1 - init_mask) * init_latents_proper + init_mask * latents\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n if not output_type == \"latent\":\n # make sure the VAE is in float32 mode, as it overflows in float16\n needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast\n\n if needs_upcasting:\n self.upcast_vae()\n latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)\n\n image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]\n\n # cast back to fp16 if needed\n if needs_upcasting:\n self.vae.to(dtype=torch.float16)\n else:\n return StableDiffusionXLPipelineOutput(images=latents)\n\n # apply watermark if available\n if self.watermark is not None:\n image = self.watermark.apply_watermark(image)\n\n image = self.image_processor.postprocess(image, output_type=output_type)\n\n # Offload all models\n self.maybe_free_model_hooks()\n\n if not return_dict:\n return (image,)\n\n return StableDiffusionXLPipelineOutput(images=image)" }, { "identifier": "SAMPLERS", "path": "relighting/argument.py", "snippet": "SAMPLERS = {\n \"ddim\": DDIMScheduler,\n \"ddpm\": DDPMScheduler,\n \"unipc\": UniPCMultistepScheduler,\n}" }, { "identifier": "VAE_MODELS", "path": "relighting/argument.py", "snippet": "VAE_MODELS = {\n \"sdxl\": \"madebyollin/sdxl-vae-fp16-fix\",\n \"sdxl_fast\": \"madebyollin/sdxl-vae-fp16-fix\",\n}" }, { "identifier": "DEPTH_ESTIMATOR", "path": "relighting/argument.py", "snippet": "DEPTH_ESTIMATOR = \"Intel/dpt-hybrid-midas\"" }, { "identifier": "get_control_signal_type", "path": "relighting/argument.py", "snippet": "def get_control_signal_type(controlnet):\n if \"normal\" in controlnet:\n return \"normal\"\n elif \"depth\" in controlnet:\n return \"depth\"\n else:\n raise NotImplementedError" }, { "identifier": "estimate_scene_depth", "path": "relighting/image_processor.py", "snippet": "def estimate_scene_depth(image, depth_estimator):\n #image = feature_extractor(images=image, return_tensors=\"pt\").pixel_values.to(\"cuda\")\n #with torch.no_grad(), torch.autocast(\"cuda\"):\n # depth_map = depth_estimator(image).predicted_depth\n\n depth_map = depth_estimator(image)['predicted_depth']\n W, H = image.size\n depth_map = torch.nn.functional.interpolate(\n depth_map.unsqueeze(1),\n size=(H, W),\n mode=\"bicubic\",\n align_corners=False,\n )\n depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True)\n depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True)\n depth_map = (depth_map - depth_min) / (depth_max - depth_min)\n image = torch.cat([depth_map] * 3, dim=1)\n\n image = image.permute(0, 2, 3, 1).cpu().numpy()[0]\n image = Image.fromarray((image * 255.0).clip(0, 255).astype(np.uint8))\n return image" }, { "identifier": "estimate_scene_normal", "path": "relighting/image_processor.py", "snippet": "def estimate_scene_normal(image, depth_estimator):\n # can be improve speed do not going back and float between numpy and torch\n normal_image = depth_estimator(image)['predicted_depth'][0]\n\n normal_image = normal_image.numpy()\n\n # upsizing image depth to match input\n hw = np.array(image).shape[:2]\n normal_image = skimage.transform.resize(normal_image, hw, preserve_range=True)\n\n image_depth = normal_image.copy()\n image_depth -= np.min(image_depth)\n image_depth /= np.max(image_depth)\n \n bg_threhold = 0.4\n\n x = cv2.Sobel(normal_image, cv2.CV_32F, 1, 0, ksize=3)\n x[image_depth < bg_threhold] = 0\n\n y = cv2.Sobel(normal_image, cv2.CV_32F, 0, 1, ksize=3)\n y[image_depth < bg_threhold] = 0\n\n z = np.ones_like(x) * np.pi * 2.0\n\n normal_image = np.stack([x, y, z], axis=2)\n normal_image /= np.sum(normal_image ** 2.0, axis=2, keepdims=True) ** 0.5\n\n # rescale back to image size\n return normal_image" }, { "identifier": "merge_normal_map", "path": "relighting/image_processor.py", "snippet": "def merge_normal_map(normal_map, normal_ball, mask_ball, x, y):\n \"\"\"\n Merge a ball to normal map using mask\n @params\n normal_amp (np.array) - normal map of the scene [height, width, 3]\n normal_ball (np.array) - normal map of the ball [ball_height, ball_width, 3]\n mask_ball (np.array) - mask of the ball [ball_height, ball_width]\n x (int) - x position of the ball (top-left)\n y (int) - y position of the ball (top-left)\n @return\n normal_mapthe merge normal map [height, width, 3] \n \"\"\"\n result = normal_map.copy()\n\n mask_ball = mask_ball[..., None]\n ball = (normal_ball * mask_ball) # alpha blending the ball\n unball = (normal_map[y:y+normal_ball.shape[0], x:x+normal_ball.shape[1]] * (1 - mask_ball)) # alpha blending the normal map\n result[y:y+normal_ball.shape[0], x:x+normal_ball.shape[1]] = ball+unball # add them together\n return result" }, { "identifier": "fill_depth_circular", "path": "relighting/image_processor.py", "snippet": "def fill_depth_circular(depth_image, x, y, r):\n depth_image = np.array(depth_image)\n\n for i in range(depth_image.shape[0]):\n for j in range(depth_image.shape[1]):\n xy = (i - x - r//2)**2 + (j - y - r//2)**2\n # if xy <= rr**2:\n # depth_image[j, i, :] = 255\n # depth_image[j, i, :] = int(minv + (maxv - minv) * z)\n if xy <= (r // 2)**2:\n depth_image[j, i, :] = 255\n \n depth_image = Image.fromarray(depth_image)\n return depth_image" }, { "identifier": "get_ideal_normal_ball", "path": "relighting/ball_processor.py", "snippet": "def get_ideal_normal_ball(size, flip_x=True):\n \"\"\"\n Generate normal ball for specific size \n Normal map is x \"left\", y up, z into the screen \n (we flip X to match sobel operator)\n @params\n - size (int) - single value of height and width\n @return:\n - normal_map (np.array) - normal map [size, size, 3]\n - mask (np.array) - mask that make a valid normal map [size,size]\n \"\"\"\n # we flip x to match sobel operator\n x = torch.linspace(1, -1, size)\n y = torch.linspace(1, -1, size)\n x = x.flip(dims=(-1,)) if not flip_x else x\n\n y, x = torch.meshgrid(y, x)\n z = (1 - x**2 - y**2)\n mask = z >= 0\n\n # clean up invalid value outsize the mask\n x = x * mask\n y = y * mask\n z = z * mask\n \n # get real z value\n z = torch.sqrt(z)\n \n # clean up normal map value outside mask \n normal_map = torch.cat([x[..., None], y[..., None], z[..., None]], dim=-1)\n normal_map = normal_map.numpy()\n mask = mask.numpy()\n return normal_map, mask" }, { "identifier": "crop_ball", "path": "relighting/ball_processor.py", "snippet": "def crop_ball(image, mask_ball, x, y, size, apply_mask=True, bg_color = (0, 0, 0)):\n if isinstance(image, Image.Image):\n result = np.array(image)\n else:\n result = image.copy()\n \n result = result[y:y+size, x:x+size]\n if apply_mask:\n result[~mask_ball] = bg_color\n return result" }, { "identifier": "CustomStableDiffusionXLControlNetInpaintPipeline", "path": "relighting/pipeline_xl.py", "snippet": "class CustomStableDiffusionXLControlNetInpaintPipeline(StableDiffusionXLControlNetInpaintPipeline):\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n prompt_2: Optional[Union[str, List[str]]] = None,\n image: PipelineImageInput = None,\n mask_image: PipelineImageInput = None,\n control_image: Union[\n PipelineImageInput,\n List[PipelineImageInput],\n ] = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n strength: float = 0.9999,\n num_inference_steps: int = 50,\n denoising_start: Optional[float] = None,\n denoising_end: Optional[float] = None,\n guidance_scale: float = 5.0,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n negative_prompt_2: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n pooled_prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n controlnet_conditioning_scale: Union[float, List[float]] = 1.0,\n guess_mode: bool = False,\n control_guidance_start: Union[float, List[float]] = 0.0,\n control_guidance_end: Union[float, List[float]] = 1.0,\n guidance_rescale: float = 0.0,\n original_size: Tuple[int, int] = None,\n crops_coords_top_left: Tuple[int, int] = (0, 0),\n target_size: Tuple[int, int] = None,\n aesthetic_score: float = 6.0,\n negative_aesthetic_score: float = 2.5,\n newx: int = 0,\n newy: int = 0,\n newr: int = 256,\n current_seed=0,\n use_noise_moving=True,\n ):\n # OVERWRITE METHODS\n self.prepare_mask_latents = custom_prepare_mask_latents.__get__(self, CustomStableDiffusionXLControlNetInpaintPipeline)\n self.prepare_latents = custom_prepare_latents.__get__(self, CustomStableDiffusionXLControlNetInpaintPipeline)\n\n controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet\n\n # align format for control guidance\n if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):\n control_guidance_start = len(control_guidance_end) * [control_guidance_start]\n elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):\n control_guidance_end = len(control_guidance_start) * [control_guidance_end]\n elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):\n mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1\n control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [\n control_guidance_end\n ]\n\n # # 0.0 Default height and width to unet\n # height = height or self.unet.config.sample_size * self.vae_scale_factor\n # width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # 0.1 align format for control guidance\n if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):\n control_guidance_start = len(control_guidance_end) * [control_guidance_start]\n elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):\n control_guidance_end = len(control_guidance_start) * [control_guidance_end]\n elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):\n mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1\n control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [\n control_guidance_end\n ]\n\n # 1. Check inputs\n self.check_inputs(\n prompt,\n prompt_2,\n control_image,\n strength,\n num_inference_steps,\n callback_steps,\n negative_prompt,\n negative_prompt_2,\n prompt_embeds,\n negative_prompt_embeds,\n pooled_prompt_embeds,\n negative_pooled_prompt_embeds,\n controlnet_conditioning_scale,\n control_guidance_start,\n control_guidance_end,\n )\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):\n controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)\n\n # 3. Encode input prompt\n text_encoder_lora_scale = (\n cross_attention_kwargs.get(\"scale\", None) if cross_attention_kwargs is not None else None\n )\n\n (\n prompt_embeds,\n negative_prompt_embeds,\n pooled_prompt_embeds,\n negative_pooled_prompt_embeds,\n ) = self.encode_prompt(\n prompt=prompt,\n prompt_2=prompt_2,\n device=device,\n num_images_per_prompt=num_images_per_prompt,\n do_classifier_free_guidance=do_classifier_free_guidance,\n negative_prompt=negative_prompt,\n negative_prompt_2=negative_prompt_2,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n pooled_prompt_embeds=pooled_prompt_embeds,\n negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,\n lora_scale=text_encoder_lora_scale,\n )\n\n # 4. set timesteps\n def denoising_value_valid(dnv):\n return isinstance(denoising_end, float) and 0 < dnv < 1\n\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps, num_inference_steps = self.get_timesteps(\n num_inference_steps, strength, device, denoising_start=denoising_start if denoising_value_valid else None\n )\n # check that number of inference steps is not < 1 - as this doesn't make sense\n if num_inference_steps < 1:\n raise ValueError(\n f\"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline\"\n f\"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline.\"\n )\n # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)\n latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)\n # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise\n is_strength_max = strength == 1.0\n\n # 5. Preprocess mask and image - resizes image and mask w.r.t height and width\n # 5.1 Prepare init image\n init_image = self.image_processor.preprocess(image, height=height, width=width)\n init_image = init_image.to(dtype=torch.float32)\n\n # 5.2 Prepare control images\n if isinstance(controlnet, ControlNetModel):\n control_image = self.prepare_control_image(\n image=control_image,\n width=width,\n height=height,\n batch_size=batch_size * num_images_per_prompt,\n num_images_per_prompt=num_images_per_prompt,\n device=device,\n dtype=controlnet.dtype,\n do_classifier_free_guidance=do_classifier_free_guidance,\n guess_mode=guess_mode,\n )\n elif isinstance(controlnet, MultiControlNetModel):\n control_images = []\n\n for control_image_ in control_image:\n control_image_ = self.prepare_control_image(\n image=control_image_,\n width=width,\n height=height,\n batch_size=batch_size * num_images_per_prompt,\n num_images_per_prompt=num_images_per_prompt,\n device=device,\n dtype=controlnet.dtype,\n do_classifier_free_guidance=do_classifier_free_guidance,\n guess_mode=guess_mode,\n )\n\n control_images.append(control_image_)\n\n control_image = control_images\n else:\n raise ValueError(f\"{controlnet.__class__} is not supported.\")\n\n # 5.3 Prepare mask\n mask = self.mask_processor.preprocess(mask_image, height=height, width=width)\n\n masked_image = init_image * (mask < 0.5)\n _, _, height, width = init_image.shape\n\n # 6. Prepare latent variables\n num_channels_latents = self.vae.config.latent_channels\n num_channels_unet = self.unet.config.in_channels\n return_image_latents = num_channels_unet == 4\n\n add_noise = True if denoising_start is None else False\n latents_outputs = self.prepare_latents(\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n latents,\n image=init_image,\n timestep=latent_timestep,\n is_strength_max=is_strength_max,\n return_noise=True,\n return_image_latents=return_image_latents,\n newx=newx,\n newy=newy,\n newr=newr,\n current_seed=current_seed,\n use_noise_moving=use_noise_moving,\n )\n\n if return_image_latents:\n latents, noise, image_latents = latents_outputs\n else:\n latents, noise = latents_outputs\n\n # 7. Prepare mask latent variables\n mask, masked_image_latents = self.prepare_mask_latents(\n mask,\n masked_image,\n batch_size * num_images_per_prompt,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n do_classifier_free_guidance,\n )\n\n # 8. Check that sizes of mask, masked image and latents match\n if num_channels_unet == 9:\n # default case for runwayml/stable-diffusion-inpainting\n num_channels_mask = mask.shape[1]\n num_channels_masked_image = masked_image_latents.shape[1]\n if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:\n raise ValueError(\n f\"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects\"\n f\" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +\"\n f\" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}\"\n f\" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of\"\n \" `pipeline.unet` or your `mask_image` or `image` input.\"\n )\n elif num_channels_unet != 4:\n raise ValueError(\n f\"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}.\"\n )\n # 8.1 Prepare extra step kwargs.\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 8.2 Create tensor stating which controlnets to keep\n controlnet_keep = []\n for i in range(len(timesteps)):\n keeps = [\n 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)\n for s, e in zip(control_guidance_start, control_guidance_end)\n ]\n if isinstance(self.controlnet, MultiControlNetModel):\n controlnet_keep.append(keeps)\n else:\n controlnet_keep.append(keeps[0])\n\n # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n height, width = latents.shape[-2:]\n height = height * self.vae_scale_factor\n width = width * self.vae_scale_factor\n\n original_size = original_size or (height, width)\n target_size = target_size or (height, width)\n\n # 10. Prepare added time ids & embeddings\n add_text_embeds = pooled_prompt_embeds\n add_time_ids, add_neg_time_ids = self._get_add_time_ids(\n original_size,\n crops_coords_top_left,\n target_size,\n aesthetic_score,\n negative_aesthetic_score,\n dtype=prompt_embeds.dtype,\n )\n add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)\n\n if do_classifier_free_guidance:\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)\n add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)\n add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)\n add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)\n\n prompt_embeds = prompt_embeds.to(device)\n add_text_embeds = add_text_embeds.to(device)\n add_time_ids = add_time_ids.to(device)\n\n # 11. Denoising loop\n num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)\n\n if (\n denoising_end is not None\n and denoising_start is not None\n and denoising_value_valid(denoising_end)\n and denoising_value_valid(denoising_start)\n and denoising_start >= denoising_end\n ):\n raise ValueError(\n f\"`denoising_start`: {denoising_start} cannot be larger than or equal to `denoising_end`: \"\n + f\" {denoising_end} when using type float.\"\n )\n elif denoising_end is not None and denoising_value_valid(denoising_end):\n discrete_timestep_cutoff = int(\n round(\n self.scheduler.config.num_train_timesteps\n - (denoising_end * self.scheduler.config.num_train_timesteps)\n )\n )\n num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))\n timesteps = timesteps[:num_inference_steps]\n\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n\n # concat latents, mask, masked_image_latents in the channel dimension\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n added_cond_kwargs = {\"text_embeds\": add_text_embeds, \"time_ids\": add_time_ids}\n\n # controlnet(s) inference\n if guess_mode and do_classifier_free_guidance:\n # Infer ControlNet only for the conditional batch.\n control_model_input = latents\n control_model_input = self.scheduler.scale_model_input(control_model_input, t)\n controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]\n controlnet_added_cond_kwargs = {\n \"text_embeds\": add_text_embeds.chunk(2)[1],\n \"time_ids\": add_time_ids.chunk(2)[1],\n }\n else:\n control_model_input = latent_model_input\n controlnet_prompt_embeds = prompt_embeds\n controlnet_added_cond_kwargs = added_cond_kwargs\n\n if isinstance(controlnet_keep[i], list):\n cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]\n else:\n controlnet_cond_scale = controlnet_conditioning_scale\n if isinstance(controlnet_cond_scale, list):\n controlnet_cond_scale = controlnet_cond_scale[0]\n cond_scale = controlnet_cond_scale * controlnet_keep[i]\n\n # # Resize control_image to match the size of the input to the controlnet\n # if control_image.shape[-2:] != control_model_input.shape[-2:]:\n # control_image = F.interpolate(control_image, size=control_model_input.shape[-2:], mode=\"bilinear\", align_corners=False)\n\n down_block_res_samples, mid_block_res_sample = self.controlnet(\n control_model_input,\n t,\n encoder_hidden_states=controlnet_prompt_embeds,\n controlnet_cond=control_image,\n conditioning_scale=cond_scale,\n guess_mode=guess_mode,\n added_cond_kwargs=controlnet_added_cond_kwargs,\n return_dict=False,\n )\n\n if guess_mode and do_classifier_free_guidance:\n # Infered ControlNet only for the conditional batch.\n # To apply the output of ControlNet to both the unconditional and conditional batches,\n # add 0 to the unconditional batch to keep it unchanged.\n down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]\n mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])\n\n if num_channels_unet == 9:\n latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)\n\n # predict the noise residual\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n down_block_additional_residuals=down_block_res_samples,\n mid_block_additional_residual=mid_block_res_sample,\n added_cond_kwargs=added_cond_kwargs,\n return_dict=False,\n )[0]\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n if do_classifier_free_guidance and guidance_rescale > 0.0:\n print(\"rescale: \", guidance_rescale)\n # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf\n noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]\n\n if num_channels_unet == 4:\n init_latents_proper = image_latents[:1]\n init_mask = mask[:1]\n\n if i < len(timesteps) - 1:\n noise_timestep = timesteps[i + 1]\n init_latents_proper = self.scheduler.add_noise(\n init_latents_proper, noise, torch.tensor([noise_timestep])\n )\n\n latents = (1 - init_mask) * init_latents_proper + init_mask * latents\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n # make sure the VAE is in float32 mode, as it overflows in float16\n if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:\n self.upcast_vae()\n latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)\n\n # If we do sequential model offloading, let's offload unet and controlnet\n # manually for max memory savings\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.unet.to(\"cpu\")\n self.controlnet.to(\"cpu\")\n torch.cuda.empty_cache()\n\n if not output_type == \"latent\":\n image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]\n else:\n return StableDiffusionXLPipelineOutput(images=latents)\n\n # apply watermark if available\n if self.watermark is not None:\n image = self.watermark.apply_watermark(image)\n\n image = self.image_processor.postprocess(image, output_type=output_type)\n\n # Offload last model to CPU\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.final_offload_hook.offload()\n\n if not return_dict:\n return (image,)\n\n return StableDiffusionXLPipelineOutput(images=image)" } ]
import torch import numpy as np import os import pickle from diffusers import ControlNetModel, AutoencoderKL from PIL import Image from tqdm.auto import tqdm from transformers import pipeline as transformers_pipeline from relighting.pipeline import CustomStableDiffusionControlNetInpaintPipeline from relighting.pipeline_inpaintonly import CustomStableDiffusionInpaintPipeline, CustomStableDiffusionXLInpaintPipeline from relighting.argument import SAMPLERS, VAE_MODELS, DEPTH_ESTIMATOR, get_control_signal_type from relighting.image_processor import ( estimate_scene_depth, estimate_scene_normal, merge_normal_map, fill_depth_circular ) from relighting.ball_processor import get_ideal_normal_ball, crop_ball from relighting.pipeline_xl import CustomStableDiffusionXLControlNetInpaintPipeline
18,155
class NoWaterMark: def apply_watermark(self, *args, **kwargs): return args[0] class ControlSignalGenerator(): def __init__(self, sd_arch, control_signal_type, device): self.sd_arch = sd_arch self.control_signal_type = control_signal_type self.device = device def process_sd_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", device=self.device.index) control_image = self.depth_estimator(input_image)['depth'] control_image = np.array(control_image) control_image = control_image[:, :, None] control_image = np.concatenate([control_image, control_image, control_image], axis=2) control_image = Image.fromarray(control_image) control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sdxl_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index) control_image = estimate_scene_depth(input_image, depth_estimator=self.depth_estimator) xs = [x] if not isinstance(x, list) else x ys = [y] if not isinstance(y, list) else y rs = [r] if not isinstance(r, list) else r for x, y, r in zip(xs, ys, rs): #print(f"depth at {x}, {y}, {r}") control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sd_normal(self, input_image, normal_ball, mask_ball, x, y, r=None, normal_ball_path=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index) normal_scene = estimate_scene_normal(input_image, depth_estimator=self.depth_estimator) normal_image = merge_normal_map(normal_scene, normal_ball, mask_ball, x, y) normal_image = (normal_image * 127.5 + 127.5).clip(0, 255).astype(np.uint8) control_image = Image.fromarray(normal_image) return control_image def __call__(self, *args, **kwargs): process_fn = getattr(self, f"process_{self.sd_arch}_{self.control_signal_type}", None) if process_fn is None: raise ValueError else: return process_fn(*args, **kwargs) class BallInpainter(): def __init__(self, pipeline, sd_arch, control_generator, disable_water_mask=True): self.pipeline = pipeline self.sd_arch = sd_arch self.control_generator = control_generator self.median = {} if disable_water_mask: self._disable_water_mask() def _disable_water_mask(self): if hasattr(self.pipeline, "watermark"): self.pipeline.watermark = NoWaterMark() print("Disabled watermasking") @classmethod def from_sd(cls, model, controlnet=None, device=0, sampler="unipc", torch_dtype=torch.float16, disable_water_mask=True, offload=False ): if controlnet is not None: control_signal_type = get_control_signal_type(controlnet) controlnet = ControlNetModel.from_pretrained(controlnet, torch_dtype=torch.float16) pipe = CustomStableDiffusionControlNetInpaintPipeline.from_pretrained( model, controlnet=controlnet, torch_dtype=torch_dtype, ).to(device) control_generator = ControlSignalGenerator("sd", control_signal_type, device=device) else:
class NoWaterMark: def apply_watermark(self, *args, **kwargs): return args[0] class ControlSignalGenerator(): def __init__(self, sd_arch, control_signal_type, device): self.sd_arch = sd_arch self.control_signal_type = control_signal_type self.device = device def process_sd_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", device=self.device.index) control_image = self.depth_estimator(input_image)['depth'] control_image = np.array(control_image) control_image = control_image[:, :, None] control_image = np.concatenate([control_image, control_image, control_image], axis=2) control_image = Image.fromarray(control_image) control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sdxl_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index) control_image = estimate_scene_depth(input_image, depth_estimator=self.depth_estimator) xs = [x] if not isinstance(x, list) else x ys = [y] if not isinstance(y, list) else y rs = [r] if not isinstance(r, list) else r for x, y, r in zip(xs, ys, rs): #print(f"depth at {x}, {y}, {r}") control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sd_normal(self, input_image, normal_ball, mask_ball, x, y, r=None, normal_ball_path=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index) normal_scene = estimate_scene_normal(input_image, depth_estimator=self.depth_estimator) normal_image = merge_normal_map(normal_scene, normal_ball, mask_ball, x, y) normal_image = (normal_image * 127.5 + 127.5).clip(0, 255).astype(np.uint8) control_image = Image.fromarray(normal_image) return control_image def __call__(self, *args, **kwargs): process_fn = getattr(self, f"process_{self.sd_arch}_{self.control_signal_type}", None) if process_fn is None: raise ValueError else: return process_fn(*args, **kwargs) class BallInpainter(): def __init__(self, pipeline, sd_arch, control_generator, disable_water_mask=True): self.pipeline = pipeline self.sd_arch = sd_arch self.control_generator = control_generator self.median = {} if disable_water_mask: self._disable_water_mask() def _disable_water_mask(self): if hasattr(self.pipeline, "watermark"): self.pipeline.watermark = NoWaterMark() print("Disabled watermasking") @classmethod def from_sd(cls, model, controlnet=None, device=0, sampler="unipc", torch_dtype=torch.float16, disable_water_mask=True, offload=False ): if controlnet is not None: control_signal_type = get_control_signal_type(controlnet) controlnet = ControlNetModel.from_pretrained(controlnet, torch_dtype=torch.float16) pipe = CustomStableDiffusionControlNetInpaintPipeline.from_pretrained( model, controlnet=controlnet, torch_dtype=torch_dtype, ).to(device) control_generator = ControlSignalGenerator("sd", control_signal_type, device=device) else:
pipe = CustomStableDiffusionInpaintPipeline.from_pretrained(
1
2023-12-07 14:03:31+00:00
24k
modelscope/normal-depth-diffusion
ldm/models/diffusion/wovae_ddpm.py
[ { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key='image',\n colorize_nlabels=None,\n monitor=None,\n prior_model=None,\n prior_normal=None,\n using_rgb=True):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n self.prior_model = prior_model\n self.using_rgb = using_rgb\n\n assert ddconfig['double_z']\n self.quant_conv = torch.nn.Conv2d(2 * ddconfig['z_channels'],\n 2 * embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim,\n ddconfig['z_channels'], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels) == int\n self.register_buffer('colorize',\n torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n if prior_model is not None:\n self.prior_model = instantiate_from_config(prior_model)\n if prior_normal is not None:\n self.prior_normal = instantiate_from_config(prior_normal)\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n try:\n sd = torch.load(path, map_location='cpu')['state_dict']\n except:\n sd = torch.load(path, map_location='cpu')\n\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print('Deleting key {} from state_dict.'.format(k))\n del sd[k]\n m, u = self.load_state_dict(sd, strict=False)\n if len(m) > 0:\n print('missing keys:')\n print(m)\n if len(u) > 0:\n print('unexpected keys:')\n print(u)\n\n print(f'Restored from {path}')\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def prior_to_eval(self):\n\n if self.prior_model is not None:\n self.prior_model.eval()\n\n if self.prior_normal is not None:\n self.prior_normal.eval()\n\n @torch.no_grad()\n def prior_inference(self, inputs, prior_inputs):\n # depth prior model\n # midas or zoe is 384 model\n prior_results = {}\n\n self.prior_to_eval()\n\n model_prior_results = self.prior_model(prior_inputs)\n prior_results.update(model_prior_results)\n\n # using normal map\n if not self.using_rgb:\n normal_prior = self.prior_normal(prior_inputs)\n prior_results.update(normal_prior)\n\n resize_prior_results = {}\n _, __, h, w = inputs.shape\n\n for key in prior_results.keys():\n resize_prior_results[key] = F.interpolate(\n prior_results[key], (w, h), mode='bilinear')\n\n if self.using_rgb:\n return torch.cat([inputs, resize_prior_results['depth']], dim=1)\n else:\n return torch.cat([\n resize_prior_results['normal'], resize_prior_results['depth']\n ],\n dim=1)\n\n def forward(self, input, sample_posterior=True):\n\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1,\n 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n\n inputs = self.get_input(batch, self.image_key)\n if self.prior_model is not None:\n inputs = self.prior_inference(inputs, batch['prior'])\n\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split='train')\n\n self.log(\n 'rec_loss',\n log_dict_ae['train/rec_loss'],\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True)\n self.log(\n 'aeloss',\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True)\n self.log_dict(\n log_dict_ae,\n prog_bar=False,\n logger=True,\n on_step=True,\n on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split='train')\n\n self.log(\n 'discloss',\n discloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True)\n self.log_dict(\n log_dict_disc,\n prog_bar=False,\n logger=True,\n on_step=True,\n on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n 0,\n self.global_step,\n last_layer=self.get_last_layer(),\n split='val')\n\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n 1,\n self.global_step,\n last_layer=self.get_last_layer(),\n split='val')\n\n self.log('val/rec_loss', log_dict_ae['val/rec_loss'])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n @torch.no_grad()\n def test_step(self, batch, batch_idx):\n pass\n\n @torch.no_grad()\n def sample_imgs(self, batch):\n '''using to test for sampling image\n\n '''\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n return {'samples': reconstructions}\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(\n list(self.encoder.parameters()) + list(self.decoder.parameters())\n + list(self.quant_conv.parameters())\n + list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(\n self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9))\n\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n xrec = repeat(xrec[:, 0, ...], 'b h w -> b c h w', c=3)\n\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n samples = self.decode(torch.randn_like(posterior.sample()))\n samples = repeat(samples[:, 0, ...], 'b h w -> b c h w', c=3)\n log['samples'] = samples\n\n log['reconstructions'] = xrec\n log['inputs'] = x\n return log\n\n @torch.no_grad()\n def log_rgbd(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n\n if x.shape[1] == 3:\n if self.prior_model is not None:\n x = self.prior_inference(x, batch['prior'])\n\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n samples = self.decode(torch.randn_like(posterior.sample()))\n log['samples'] = samples\n log['reconstructions'] = xrec\n log['inputs'] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == 'segmentation'\n if not hasattr(self, 'colorize'):\n self.register_buffer('colorize',\n torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.\n return x" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "VQModelInterface", "path": "ldm/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n\n def __init__(self, model, schedule='linear', **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device('cuda'):\n attr = attr.to(torch.device('cuda'))\n setattr(self, name, attr)\n\n def make_schedule(self,\n ddim_num_steps,\n ddim_discretize='uniform',\n ddim_eta=0.,\n verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[\n 0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model\n .device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev',\n to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod',\n to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod',\n to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod',\n to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod',\n to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod',\n to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas',\n np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) *\n (1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps',\n sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(\n f'Warning: Got {cbs} conditionings but batch-size is {batch_size}'\n )\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}'\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs)\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n **kwargs):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(\n 0, timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[\n 0]\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b, ), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n **kwargs):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat(\n [unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n elif isinstance(c[k], torch.Tensor):\n c_in[k] = torch.cat(\n [unconditional_conditioning[k], c[k]])\n else:\n assert c[k] == unconditional_conditioning[k]\n c_in[k] = c[k]\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(\n torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in,\n c_in).chunk(2)\n # model_t = self.model.apply_model(x, t, c, **kwargs)\n # model_uncond = self.model.apply_model(x, t, unconditional_conditioning, **kwargs)\n model_output = model_uncond + unconditional_guidance_scale * (\n model_t - model_uncond)\n\n if self.model.parameterization == 'v':\n print('using v!')\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == 'eps', 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c,\n **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1),\n sqrt_one_minus_alphas[index],\n device=device)\n\n # current prediction for x_0\n if self.model.parameterization != 'v':\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device,\n repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape)\n * noise)\n\n @torch.no_grad()\n def decode(self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n **kwargs):\n\n timesteps = np.arange(self.ddpm_num_timesteps\n ) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0], ),\n step,\n device=x_latent.device,\n dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs)\n return x_dec" }, { "identifier": "DPMSolverSampler", "path": "ldm/models/diffusion/dpm_solver/sampler.py", "snippet": "class DPMSolverSampler(object):\n\n def __init__(self, model, **kwargs):\n super().__init__()\n self.model = model\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.\n device)\n self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod))\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device('cuda'):\n attr = attr.to(torch.device('cuda'))\n setattr(self, name, attr)\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(\n f'Warning: Got {cbs} conditionings but batch-size is {batch_size}'\n )\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}'\n )\n\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n\n # print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}')\n\n device = self.model.betas.device\n if x_T is None:\n img = torch.randn(size, device=device)\n else:\n img = x_T\n\n ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)\n\n model_fn = model_wrapper(\n lambda x, t, c: self.model.apply_model(x, t, c),\n ns,\n model_type='noise',\n guidance_type='classifier-free',\n condition=conditioning,\n unconditional_condition=unconditional_conditioning,\n guidance_scale=unconditional_guidance_scale,\n )\n\n dpm_solver = DPM_Solver(\n model_fn, ns, predict_x0=True, thresholding=False)\n x = dpm_solver.sample(\n img,\n steps=S,\n skip_type='time_uniform',\n method='multistep',\n order=2,\n lower_order_final=True)\n\n return x.to(device), None" }, { "identifier": "PLMSSampler", "path": "ldm/models/diffusion/plms.py", "snippet": "class PLMSSampler(object):\n\n def __init__(self, model, schedule='linear', **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device('cuda'):\n attr = attr.to(torch.device('cuda'))\n setattr(self, name, attr)\n\n def make_schedule(self,\n ddim_num_steps,\n ddim_discretize='uniform',\n ddim_eta=0.,\n verbose=True):\n if ddim_eta != 0:\n raise ValueError('ddim_eta must be 0 for PLMS')\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[\n 0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model\n .device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev',\n to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod',\n to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod',\n to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod',\n to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod',\n to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod',\n to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas',\n np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) *\n (1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps',\n sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(\n f'Warning: Got {cbs} conditionings but batch-size is {batch_size}'\n )\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}'\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for PLMS sampling is {size}')\n\n samples, intermediates = self.plms_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def plms_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = list(reversed(range(\n 0, timesteps))) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[\n 0]\n print(f'Running PLMS Sampling with {total_steps} timesteps')\n\n iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)\n old_eps = []\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b, ), step, device=device, dtype=torch.long)\n ts_next = torch.full((b, ),\n time_range[min(i + 1,\n len(time_range) - 1)],\n device=device,\n dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_plms(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n old_eps=old_eps,\n t_next=ts_next)\n img, pred_x0, e_t = outs\n old_eps.append(e_t)\n if len(old_eps) >= 4:\n old_eps.pop(0)\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_plms(self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n old_eps=None,\n t_next=None):\n b, *_, device = *x.shape, x.device\n\n def get_model_output(x, t):\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in,\n c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (\n e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == 'eps'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c,\n **corrector_kwargs)\n\n return e_t\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n\n def get_x_prev_and_pred_x0(e_t, index):\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1),\n alphas_prev[index],\n device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1),\n sqrt_one_minus_alphas[index],\n device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device,\n repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n e_t = get_model_output(x, t)\n if len(old_eps) == 0:\n # Pseudo Improved Euler (2nd order)\n x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)\n e_t_next = get_model_output(x_prev, t_next)\n e_t_prime = (e_t + e_t_next) / 2\n elif len(old_eps) == 1:\n # 2nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (3 * e_t - old_eps[-1]) / 2\n elif len(old_eps) == 2:\n # 3nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12\n elif len(old_eps) >= 3:\n # 4nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2]\n - 9 * old_eps[-3]) / 24\n\n x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)\n\n return x_prev, pred_x0, e_t" }, { "identifier": "CrossAttention", "path": "ldm/modules/attention.py", "snippet": "class CrossAttention(nn.Module):\n\n def __init__(self,\n query_dim,\n context_dim=None,\n heads=8,\n dim_head=64,\n dropout=0.):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head**-0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim), nn.Dropout(dropout))\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h),\n (q, k, v))\n\n sim = einsum('b i d, b j d -> b i j', q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, 'b ... -> b (...)')\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, 'b j -> (b h) () j', h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum('b i j, b j d -> b i d', attn, v)\n out = rearrange(out, '(b h) n d -> b n (h d)', h=h)\n return self.to_out(out)" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1, ) * (len(x_shape) - 1)))" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule,\n n_timestep,\n linear_start=1e-4,\n linear_end=2e-2,\n cosine_s=8e-3):\n if schedule == 'linear':\n betas = (\n torch.linspace(\n linear_start**0.5,\n linear_end**0.5,\n n_timestep,\n dtype=torch.float64)**2)\n\n elif schedule == 'cosine':\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep\n + cosine_s)\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == 'sqrt_linear':\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == 'sqrt':\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64)**0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1, ) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(\n self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(\n self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(\n torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1, 2, 3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar\n + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, 'at least one argument must be a Tensor'\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (-1.0 + logvar2 - logvar1 + torch.exp(logvar1 - logvar2) +\n ((mean1 - mean2)**2) * torch.exp(-logvar2))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer(\n 'num_updates',\n torch.tensor(0, dtype=torch.int)\n if use_num_upates else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) /\n (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(\n m_param[key])\n shadow_params[sname].sub_(\n one_minus_decay *\n (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(\n shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(\n f'{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.'\n )\n return total_params" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "filter_nan_loss", "path": "ldm/util.py", "snippet": "def filter_nan_loss(loss):\n fake_loss = torch.isnan(loss)\n loss = loss[torch.logical_not(fake_loss)]\n\n if loss.shape[0] == 0:\n return loss.sum()\n else:\n return loss" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not 'target' in config:\n\n print(config)\n if config == '__is_first_stage__':\n return None\n elif config == '__is_unconditional__':\n return None\n raise KeyError('Expected key `target` to instantiate.')\n return get_obj_from_str(config['target'])(**config.get('params', dict()))" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=20):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new('RGB', wh, color='white')\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(10 * (wh[0] / 256))\n lines = '\\n'.join(xc[bi][start:start + nc]\n for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill='black', font=font)\n except UnicodeEncodeError:\n print('Cant encode string for logging. Skipping.')\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" } ]
import pdb import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn import torch.nn.functional as F from contextlib import contextmanager from functools import partial from einops import rearrange, repeat from ldm.models.autoencoder import (AutoencoderKL, IdentityFirstStage, VQModelInterface) from ldm.models.diffusion.ddim import DDIMSampler from ldm.models.diffusion.dpm_solver import DPMSolverSampler from ldm.models.diffusion.plms import PLMSSampler from ldm.modules.attention import CrossAttention from ldm.modules.diffusionmodules.util import (extract_into_tensor, make_beta_schedule, noise_like) from ldm.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl) from ldm.modules.ema import LitEma from ldm.util import (count_params, default, exists, filter_nan_loss, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat) from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from pytorch_lightning.utilities.distributed import rank_zero_only from pytorch_lightning.utilities.rank_zero import rank_zero_only
16,919
'image', 'LR_image', 'segmentation', 'bbox_img', 'ic' ] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1 ) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{ c_key: [c[:, :, :, :, i]] } for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params[ 'original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2**(num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [ (rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1]) ] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [ (x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates ] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [ torch.LongTensor( self.bbox_tokenizer._crop_encoder(bbox))[None].to( self.device) for bbox in patch_limits ] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning assert isinstance( cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack([ torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd ]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange( adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) print(adapted_cond.shape) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1]) ] # Todo make this more efficient # apply model by loop over crops output_list = [ self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1]) ] assert not isinstance( output_list[0], tuple ) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view( (o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor( [self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl( mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers this is without vae ddpm -- merci """ try: except: __conditioning_keys__ = { 'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y' } def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class anneal_identity(): def __call__(self, x, global_step): return x def upper_bound(arr, key): left = 0 right = len(arr) while left < right: mid = (left + right) >> 1 if arr[mid] < key: left = mid + 1 else: right = mid return left class anneal_warmup(): def __init__(self, anneal_ratio, anneal_global_step, num_steps): self.anneal_ratio = anneal_ratio self.anneal_global_step = anneal_global_step self.steps = num_steps // (len(anneal_global_step) + 1) self.start_steps = self.steps def __call__(self, x, global_step): if (torch.rand(1) > self.anneal_ratio).item(): return x else: return int(self.start_steps + self.start_steps * upper_bound(self.anneal_global_step, global_step)) class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule='linear', loss_type='l2', ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor='val/loss', use_ema=True, first_stage_key='image', image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization='eps', # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., anneal_t=False, # we find at the begining, smaller t, larger denoise mse loss. anneal_global_step=[], anneal_ratio=0.9, prior_model=None, prior_normal=None, input_keys=['rgb'], ): super().__init__() assert parameterization in [ 'eps', 'x0' ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f'{self.__class__.__name__}: Running in {self.parameterization}-prediction mode' ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f'Keeping EMAs of {len(list(self.model_ema.buffers()))}.') self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight self.input_keys = input_keys if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full( fill_value=logvar_init, size=(self.num_timesteps, )) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) ### anneal t function if not anneal_t: self.anneal_func = anneal_identity() else: self.anneal_func = anneal_warmup(anneal_ratio, anneal_global_step, self.num_timesteps) if prior_model is not None: self.prior_model = instantiate_from_config(prior_model) else: self.prior_model = None if prior_normal is not None: self.prior_normal = instantiate_from_config(prior_normal) else: self.prior_normal = None def register_schedule(self, given_betas=None, beta_schedule='linear', timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[ 0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( 'posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer( 'posterior_mean_coef1', to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer( 'posterior_mean_coef2', to_torch((1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == 'eps': lvlb_weights = self.betas**2 / (2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == 'x0': lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / ( 2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError('mu not supported') # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f'{context}: Switched to EMA weights') try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f'{context}: Restored training weights') def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location='cpu') if 'state_dict' in list(sd.keys()): sd = sd['state_dict'] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print('Deleting key {} from state_dict.'.format(k)) del sd[k] missing, unexpected = self.load_state_dict( sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print( f'Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys' ) if len(missing) > 0: print(f'Missing Keys: {missing}') if len(unexpected) > 0: print(f'Unexpected Keys: {unexpected}') if self.use_ema: if len(missing) > 0: model_ema_str = sorted(missing)[-1] # missing model_ema if 'model_ema' in model_ema_str: print(f'Reinitialize model_ema') self.model_ema = LitEma(self.model) print( f'Keeping EMAs of {len(list(self.model_ema.buffers()))}.' ) else: if self.ema_copy == True: print(f'Reinitialize model_ema') self.model_ema = LitEma(self.model) print( f'Keeping EMAs of {len(list(self.model_ema.buffers()))}.' ) def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == 'eps': x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == 'x0': x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape( b, *((1, ) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample( img, torch.full((b, ), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss( target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == 'eps': target = noise elif self.parameterization == 'x0': target = x_start else: raise NotImplementedError( f'Paramterization {self.parameterization} not yet supported') loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0], ), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict # property of model for (to, cuda, cpu, float, half, ...) def to(self, *args, **kwargs): # type: ignore[valid-type] """See :meth:`torch.nn.Module.to`.""" # this converts `str` device to `torch.device` if self.prior_model is not None: self.prior_model.to(*args, **kwargs) if self.prior_normal is not None: self.prior_normal.to(*args, **kwargs) return super().to(*args, **kwargs) def cuda(self, device=None): # type: ignore[valid-type] """Moves all model parameters and buffers to the GPU. This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on GPU while being optimized. Arguments: device: If specified, all parameters will be copied to that device. If `None`, the current CUDA device index will be used. Returns: Module: self """ if device is None: device = torch.device('cuda', torch.cuda.current_device()) elif isinstance(device, int): device = torch.device('cuda', index=device) if self.prior_model is not None: self.prior_model.cuda(device) if self.prior_normal is not None: self.prior_normal.cuda(device) return super().cuda(device=device) def cpu(self): # type: ignore[valid-type] """See :meth:`torch.nn.Module.cpu`.""" if self.prior_model is not None: self.prior_model.cpu() if self.prior_normal is not None: self.prior_normal.cpu() return super().cpu() def float(self): # type: ignore[valid-type] """See :meth:`torch.nn.Module.float`.""" if self.prior_model is not None: self.prior_model.float() if self.prior_normal is not None: self.prior_normal.float() return super().float() def double(self): # type: ignore[valid-type] """See :meth:`torch.nn.Module.double`.""" if self.prior_model is not None: self.prior_model.double() if self.prior_normal is not None: self.prior_normal.double() return super().double() def half(self): # type: ignore[valid-type] """See :meth:`torch.nn.Module.half`.""" if self.prior_model is not None: self.prior_model.half() if self.prior_normal is not None: self.prior_normal.half() return super().half() def prior_to_eval(self): if self.prior_model is not None: self.prior_model.eval() if self.prior_normal is not None: self.prior_normal.eval() @torch.no_grad() def prior_inference(self, inputs, prior_inputs): # depth prior model # midas or zoe is 384 model inputs = inputs.permute(0, 3, 1, 2) prior_results = {} self.prior_to_eval() # using depth prior if self.prior_model is not None: model_prior_results = self.prior_model(prior_inputs) prior_results.update(model_prior_results) # using normal map if self.prior_normal is not None: normal_prior_results = self.prior_normal(prior_inputs) prior_results.update(normal_prior_results) resize_prior_results = {} _, __, h, w = inputs.shape for key in prior_results.keys(): resize_prior_results[key] = F.interpolate( prior_results[key], (w, h), mode='bilinear') # add a rgb input resize_prior_results.update({'rgb': inputs}) input_container = [] for key in self.input_keys: input_container.append(resize_prior_results[key]) return torch.cat(input_container, dim=1).permute(0, 2, 3, 1) @torch.no_grad() def collect_inputs(self, batch): input_container = [] for key in self.input_keys: # [B H W C] input_container.append(batch[key]) return torch.cat(input_container, dim=-1) def training_step(self, batch, batch_idx): if self.prior_model is not None: batch['image'] = self.prior_inference(batch['image'], batch['prior']) # image_condition batch['ic'] = batch['image'][..., :3] else: batch['image'] = self.collect_inputs(batch) # image_condition batch['ic'] = batch['image'][..., :3] loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log( 'global_step', self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log( 'lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): if self.prior_model is not None: batch['image'] = self.prior_inference(batch['image'], batch['prior']) # image_condition batch['ic'] = batch['image'][..., :3] else: batch['image'] = self.collect_inputs(batch) # image_condition batch['ic'] = batch['image'][..., :3] _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = { key + '_ema': loss_dict_ema[key] for key in loss_dict_ema } self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) @torch.no_grad() def test_step(self, batch, batch_idx): if self.prior_model is not None: batch['image'] = self.prior_inference(batch['image'], batch['prior']) # image_condition batch['ic'] = batch['image'][..., :3] else: batch['image'] = self.collect_inputs(batch) # image_condition batch['ic'] = batch['image'][..., :3] with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = { key + '_ema': loss_dict_ema[key] for key in loss_dict_ema } self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): # args: outputs, batch, batch_idx if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log['inputs'] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log['diffusion_row'] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope('Plotting'): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True) log['samples'] = samples log['denoise_row'] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key='image', cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, first_stage_ckpts=None, without_crossattn=False, ema_copy=False, *args, **kwargs): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop('ckpt_path', None) ignore_keys = kwargs.pop('ignore_keys', []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len( first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.first_stage_ckpts = first_stage_ckpts # VAE Load self.instantiate_first_stage(first_stage_config) # CLIP load self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False self.ema_copy = ema_copy if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if self.first_stage_ckpts is not None: first_stage_ckpts = torch.load( self.first_stage_ckpts, map_location='cpu') no_match = self.first_stage_model.load_state_dict( first_stage_ckpts['state_dict'], strict=False) print('encode-decode, no match keys:\n {}'.format(no_match)) for param in self.first_stage_model.parameters(): param.requires_grad = False # lambda-stage-1 without crossattn if without_crossattn: for m in self.modules(): if isinstance(m, CrossAttention): for para in m.parameters(): para.requires_grad = False # RuntimeError: One of the differentiated Tensors does not require grad def make_cond_schedule(self, ): self.cond_ids = torch.full( size=(self.num_timesteps, ), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print('### USING STD-RESCALING ###') x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f'setting self.scale_factor to {self.scale_factor}') print('### USING STD-RESCALING ###') def register_schedule(self, given_betas=None, beta_schedule='linear', timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == '__is_first_stage__': print('Using first stage also as cond stage.') self.cond_stage_model = self.first_stage_model elif config == '__is_unconditional__': print( f'Training {self.__class__.__name__} as an unconditional model.' ) self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError( f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" ) return self.scale_factor * z def get_learned_conditioning(self, c): ''' # CLIP embedding ''' if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable( self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min( torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip( weighting, self.split_input_params['clip_min_weight'], self.split_input_params['clip_max_weight'], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params['tie_braker']: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip( L_weighting, self.split_input_params['clip_min_tie_weight'], self.split_input_params['clip_max_tie_weight']) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold( output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view( 1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting ''' @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out ''' @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.1): ''' we add uncondition prompts to improve classifer-free guidance results ''' x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) ''' encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() ''' _, _c, _h, _w = x.shape z = F.interpolate( x, (_w // 8, _h // 8), mode='bilinear', align_corners=False) if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} # To support classifier-free guidance, randomly drop out only text conditioning 10% like sd-v1.5 random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < uncond, 'n -> n 1 1') null_prompts = self.get_learned_conditioning(['']).to(c.device) cc = torch.where(prompt_mask, null_prompts, c) out = [z, cc] if return_first_stage_outputs: xrec = F.interpolate( z, (_w, _h), mode='bilinear', align_corners=False) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry( z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, 'split_input_params'): if self.split_input_params['patch_distributed_vq']: ks = self.split_input_params['ks'] # eg. (128, 128) stride = self.split_input_params['stride'] # eg. (64, 64) uf = self.split_input_params['vqf'] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print('reducing Kernel') if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print('reducing stride') fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [ self.first_stage_model.decode( z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1]) ] else: output_list = [ self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack( output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) # same as above but without decorator def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry( z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, 'split_input_params'): if self.split_input_params['patch_distributed_vq']: ks = self.split_input_params['ks'] # eg. (128, 128) stride = self.split_input_params['stride'] # eg. (64, 64) uf = self.split_input_params['vqf'] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print('reducing Kernel') if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print('reducing stride') fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [ self.first_stage_model.decode( z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1]) ] else: output_list = [ self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack( output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): if hasattr(self, 'split_input_params'): if self.split_input_params['patch_distributed_vq']: ks = self.split_input_params['ks'] # eg. (128, 128) stride = self.split_input_params['stride'] # eg. (64, 64) df = self.split_input_params['vqf'] self.split_input_params['original_image_size'] = x.shape[-2:] bs, nc, h, w = x.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print('reducing Kernel') if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print('reducing stride') fold, unfold, normalization, weighting = self.get_fold_unfold( x, ks, stride, df=df) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) output_list = [ self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): # obtain encode(x), conditon x, c = self.get_input(batch, self.first_stage_key) # ddpm loss = self(x, c) return loss def guassian_distributed(self, x, sigma=100): y = torch.exp(-(x)**2 / (2 * sigma**2)) return y / y.sum() def forward(self, x, c, *args, **kwargs): # anneal t finetune num_timesteps = self.anneal_func(self.num_timesteps, self.global_step) t = torch.randint( 0, num_timesteps, (x.shape[0], ), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample( x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} if hasattr(self, 'split_input_params'): assert len( cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params['ks'] # eg. (128, 128) stride = self.split_input_params['stride'] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold( x_noisy, ks, stride) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if self.cond_stage_key in [ 'image', 'LR_image', 'segmentation', 'bbox_img', 'ic' ] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1 ) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{ c_key: [c[:, :, :, :, i]] } for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params[ 'original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2**(num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [ (rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1]) ] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [ (x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates ] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [ torch.LongTensor( self.bbox_tokenizer._crop_encoder(bbox))[None].to( self.device) for bbox in patch_limits ] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning assert isinstance( cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack([ torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd ]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange( adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) print(adapted_cond.shape) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1]) ] # Todo make this more efficient # apply model by loop over crops output_list = [ self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1]) ] assert not isinstance( output_list[0], tuple ) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view( (o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor( [self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl( mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
return mean_flat(kl_prior) / np.log(2.0)
21
2023-12-06 07:29:34+00:00
24k
RobertCsordas/moe_attention
tasks/simple/language_model/transformer_lm_mixin.py
[ { "identifier": "TransformerLanguageModel", "path": "models/transformer_language_model.py", "snippet": "class TransformerLanguageModel(LoggingLayer, torch.nn.Module):\n def __init__(self, voc_size: int, embedding_size: Optional[int], state_size: int, dropout: float,\n tied_embedding: bool, layers: List[torch.nn.Module], n_prev_states: int,\n n_prev_states_test: Optional[int] = None, adaptive_cutoffs: List[int] = [],\n same_length_eval: bool = True, norm_before_output: bool = False,\n p_drop_layer: float = 0.0, use_last_state: bool = False, same_length: bool = False):\n\n super().__init__()\n\n self.embedding = torch.nn.Embedding(voc_size, embedding_size or state_size)\n torch.nn.init.kaiming_normal_(self.embedding.weight, mode=\"fan_in\", nonlinearity=\"linear\")\n\n self.shared_layers = all([la is layers[0] for la in layers])\n\n if embedding_size is None:\n self.embedding_adapter = lambda x: x\n else:\n self.embedding_adapter = torch.nn.Linear(embedding_size, state_size)\n\n self.dropout = torch.nn.Dropout(dropout)\n self.layers = layers\n self.unique_layers = torch.nn.ModuleList(unique_obejcts(layers))\n self.output_adapter = lambda x: x\n self.n_prev_states = n_prev_states\n self.n_prev_states_test = n_prev_states_test or n_prev_states\n self.same_length_eval = same_length_eval\n self.embedding_scale = math.sqrt(state_size)\n self.p_drop_layer = p_drop_layer\n self.use_last_state = use_last_state\n self.same_length = same_length\n self.iter = 0\n\n self.adaptive = bool(adaptive_cutoffs)\n\n out_proj_size = (embedding_size or state_size) if tied_embedding else state_size\n if self.adaptive:\n self.output = framework.layers.CustomAdaptiveLogSoftmaxWithLoss(\n out_proj_size, voc_size, adaptive_cutoffs, div_value=1,\n tied_to=self.embedding if tied_embedding else None)\n else:\n self.output = torch.nn.Linear(out_proj_size, voc_size)\n\n if norm_before_output:\n self.out_norm = torch.nn.LayerNorm(state_size)\n else:\n self.out_norm = lambda x: x\n\n if tied_embedding:\n if not self.adaptive:\n self.output.weight = self.embedding.weight\n if embedding_size is not None:\n self.output_adapter = torch.nn.Linear(state_size, embedding_size)\n\n @staticmethod\n def generate_history_mask(sz: int, device: torch.device) -> torch.Tensor:\n return torch.tril(torch.ones(sz, sz, dtype=torch.bool, device=device), diagonal=-1)\n\n def gen_output(self, x: torch.Tensor, target: Optional[torch.Tensor]) -> torch.Tensor:\n net = self.out_norm(x)\n net = self.output_adapter(net)\n net = self.dropout(net)\n\n if self.adaptive:\n net = self.output(net.transpose(0, 1), target)\n else:\n net = self.output(net.transpose(0, 1))\n\n return net\n\n def forward(self, x: torch.Tensor, target: Optional[torch.Tensor], state) -> Tuple[torch.Tensor, Any]:\n causality_mask = Transformer.generate_square_subsequent_mask(x.shape[0], x.device)\n\n net = self.dropout(self.embedding(x.T.long()))\n net = self.embedding_adapter(net)\n net = net * self.embedding_scale\n\n new_state = []\n features = [net]\n\n n_prev_states = self.n_prev_states if self.training else self.n_prev_states_test\n\n same_length = self.same_length or ((not self.training) and self.same_length_eval)\n if same_length and state is not None:\n causality_mask = [self.generate_history_mask(x.shape[0], x.device)] + \\\n [torch.zeros_like(causality_mask)] * (len(state[0]) - 1) + [causality_mask]\n causality_mask = torch.cat(causality_mask, -1)\n\n\n plot_cossim = (self.iter % 100 == 0 and self.training)\n for li, l in enumerate(self.layers):\n if n_prev_states > 0:\n if li == 0:\n # Pos offset should be constant for all layers\n pos_offset = sum(s.shape[1] for s in state[0]) if state is not None else 0\n\n # Concatenate the new state with the previous states\n li_r = -1 if self.use_last_state else li\n s = (state[li_r] + [net]) if state is not None else [net]\n attend_to = torch.cat(s, 1)\n\n if not self.use_last_state:\n s[-1] = s[-1].detach()\n new_state.append(s[-n_prev_states:])\n else:\n pos_offset = None\n attend_to = None\n\n net_o = l(net, mask=AttentionMask(None, causality_mask), attend_to=attend_to,\n pos_offset=pos_offset)\n\n if plot_cossim:\n features.append(net_o)\n\n with torch.no_grad():\n ndiff = torch.norm(net_o - net, p=2, dim=-1)\n n_in = torch.norm(net, p=2, dim=-1)\n self.log(f\"activation_norm/abs_update_layer_{li}\", ndiff.mean())\n self.log(f\"activation_norm/in_layer_{li}\", n_in.mean())\n self.log(f\"activation_norm/rel_update_layer_{li}\", (ndiff/n_in.clamp(min=torch.finfo(n_in.dtype).eps)).mean())\n\n if self.training and self.p_drop_layer > 0.0:\n net = torch.where(torch.rand_like(net_o[..., 0:1]) < self.p_drop_layer, net, net_o)\n else:\n net = net_o\n\n if self.use_last_state and n_prev_states > 0:\n # If we carry over the last state, save it here\n new_state = [((state[0] if state is not None else []) + [net.detach()])[-n_prev_states:]]\n\n if plot_cossim:\n with torch.no_grad():\n f_sample = [f.view(-1, f.shape[-1])[:1024] for f in features]\n f_sample_all = torch.stack(f_sample, -2)\n scores = framework.utils.cossim(f_sample_all, f_sample_all).mean(0)\n self.log(\"feature_cossim\", framework.visualize.plot.Heatmap(scores, range=(0, 1), textval=False))\n\n outs = F.softmax(self.gen_output(f_sample_all, target).transpose(0, 1), -1)\n scores = framework.utils.cossim(outs, outs).mean(0)\n self.log(\"out_dist_cossim\", framework.visualize.plot.Heatmap(scores, range=(0, 1), textval=False))\n\n real_out = outs[:, -1]\n for i in range(outs.shape[-2] - 1):\n self.log(f\"out_diff_{i}\", (outs[:, i] - real_out).norm(dim=-1, p=1).mean())\n\n del outs\n\n\n del features\n\n net = self.gen_output(net, target)\n self.iter += 1\n\n return net, new_state" }, { "identifier": "task", "path": "tasks/task_db.py", "snippet": "def task(name: Optional[str] = None):\n def wrapper(cls):\n n = TASK_PREFIX + (name or camel_to_snake(cls.__name__))\n assert n not in TASKS, f\"Task {n} already exists\"\n TASKS[n] = cls\n return cls\n return wrapper" }, { "identifier": "args", "path": "tasks/task_db.py", "snippet": "def args(fn):\n global ARGS_REGISTERS\n ARGS_REGISTERS.append(fn)\n return fn" }, { "identifier": "RelativeTransformerEncoderLayer", "path": "layers/transformer/relative_transformer.py", "snippet": "class RelativeTransformerEncoderLayer(torch.nn.Module):\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,\n attention_dropout=0, test_pos_clamp: Optional[int] = None, drop_expand: bool = True,\n head_projection_size: Optional[int] = None, ln_after_attention: bool = True):\n super().__init__()\n self.ln_after_attention = ln_after_attention\n self.self_attn = FixedRelativeMultiheadAttention(\n d_model, nhead, dropout=attention_dropout, test_pos_clamp=test_pos_clamp,\n projection_size=head_projection_size)\n self.linear1 = torch.nn.Linear(d_model, dim_feedforward)\n self.dropout = torch.nn.Dropout(dropout) if drop_expand else lambda x: x\n self.linear2 = torch.nn.Linear(dim_feedforward, d_model)\n\n if ln_after_attention:\n self.norm1 = torch.nn.LayerNorm(d_model)\n self.norm2 = torch.nn.LayerNorm(d_model)\n self.dropout1 = torch.nn.Dropout(dropout)\n self.dropout2 = torch.nn.Dropout(dropout)\n\n self.activation = activation\n self.reset_parameters()\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n src2 = self.self_attn(src, attend_to if attend_to is not None else src, mask, pos_offset=pos_offset)\n src = src + self.dropout1(src2)\n src = self.norm1(src) if self.ln_after_attention else src\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))\n src = src + self.dropout2(src2)\n src = self.norm2(src)\n return src\n\n def reset_parameters(self):\n torch.nn.init.xavier_normal_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu')\n if self.activation is F.relu else 1.0)\n torch.nn.init.xavier_uniform_(self.linear2.weight)" }, { "identifier": "PrelnRelativeTransformerEncoderLayer", "path": "layers/transformer/relative_preln_transformer.py", "snippet": "class PrelnRelativeTransformerEncoderLayer(RelativeTransformerEncoderLayer):\n is_preln = True\n\n def __init__(self, d_model, nhead, n_layers: int, dim_feedforward=2048, dropout=0.1,\n activation: ActivationFunction = F.relu, attention_dropout=0, test_pos_clamp: Optional[int] = None,\n drop_expand: bool = True, head_projection_size: Optional[int] = None):\n super().__init__(\n d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward, dropout=dropout,\n activation=activation, attention_dropout=attention_dropout, test_pos_clamp=test_pos_clamp,\n drop_expand=drop_expand, head_projection_size=head_projection_size)\n\n reset_prenorm_params(self, n_layers)\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n src2 = self.norm1(src)\n src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask,\n pos_offset=pos_offset)\n src = src + self.dropout1(src2)\n src2 = self.norm2(src)\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))\n src = src + self.dropout2(src2)\n return src" }, { "identifier": "RelativeMoeTransformerEncoderLayer", "path": "layers/transformer/relative_moe_transformer.py", "snippet": "class RelativeMoeTransformerEncoderLayer(LoggingLayer, torch.nn.Module):\n def __init__(self, d_model, nhead, n_experts: int, expert_size: int, n_layers: int,\n dropout=0.1, activation: ActivationFunction = F.relu, attention_dropout=0,\n test_pos_clamp: Optional[int] = None,\n dropout_mode: str = \"none\", selection_mode: str = \"add\",\n perplexity_reg: float = 0.0,\n n_heads: int = 1, norm_keys: bool = False, perplexity_reg_mode: str=\"step\",\n n_random: int = 0, reg_type: str = \"normal\",\n topk_mode: str = \"full\", head_projection_size: Optional[int] = None,\n activation_after_topk: bool = False,\n drop_parallel: bool = True,\n normalize_expert_sel_init: bool = False, norm_key_init: bool = False, norm_value_init: bool = False,\n identical_init: bool = False,\n sel_norm: str = \"none\",\n preln: bool = True, ln_affine: bool = True,\n moe_dropout_factor: float = 1.0,\n drop_expert: float = 0.0, sync_distributed: bool = True,\n modulation_amplitude: float = 0.5, moe_init_scale: float = 1.0,\n moe_att_n_experts: int = 4, moe_att_expert_dropout: Optional[float] = None,\n moe_att_selection_mode: str = \"sigmoid\",\n moe_att_k: Optional[int] = None, moe_att_ppl_reg: Optional[float] = None,\n q_expert: bool = True, k_expert: bool = True, v_expert: bool = True,\n o_expert: bool = True,\n v_projection_size: Optional[int] = None,\n qside_n_experts: Optional[int] = None,\n moe_attention: bool = False, moe_att_variant: str = \"full\",\n moe_att_shared_experts: bool = False,\n moe_att_kq_n_experts: Optional[int] = None, moe_att_separate_kq_sel: bool = False,\n moe_att_norm_init: bool = False, moe_att_same_sel: bool = False, moe_att_norm_retrieval: bool = False,\n rotate_fraction: float = 0.5, rope_base: float = 10000):\n super().__init__()\n self.preln = preln\n self.i = 0\n\n if moe_attention:\n if moe_att_variant == \"full\":\n self.self_attn = FullMoeRelativeAttention(\n d_model, nhead, dropout=attention_dropout,\n projection_size=head_projection_size, init_std_scale=math.sqrt(2 / n_layers) if preln else 1.0,\n n_experts=moe_att_n_experts,\n perplexity_reg=perplexity_reg if moe_att_ppl_reg is None else moe_att_ppl_reg,\n expert_dropout=drop_expert if moe_att_expert_dropout is None else moe_att_expert_dropout,\n selection_mode=moe_att_selection_mode, q_expert=q_expert, k_expert=k_expert, v_expert=v_expert,\n moe_k=n_heads if moe_att_k is None else moe_att_k, o_expert=o_expert, qside_n_experts=qside_n_experts,\n v_projection_size=v_projection_size, shared_experts=moe_att_shared_experts,\n kq_n_experts=moe_att_kq_n_experts, separate_kq_sel=moe_att_separate_kq_sel,\n normalize_init=moe_att_norm_init,\n same_sel=moe_att_same_sel, normalize_retrieval=moe_att_norm_retrieval,\n )\n elif moe_att_variant == \"full_rope\":\n self.self_attn = FullMoeRopeAttention(\n d_model, nhead, dropout=attention_dropout,\n projection_size=head_projection_size, init_std_scale=math.sqrt(2 / n_layers) if preln else 1.0,\n n_experts=moe_att_n_experts,\n perplexity_reg=perplexity_reg if moe_att_ppl_reg is None else moe_att_ppl_reg,\n expert_dropout=drop_expert if moe_att_expert_dropout is None else moe_att_expert_dropout,\n selection_mode=moe_att_selection_mode, q_expert=q_expert, k_expert=k_expert, v_expert=v_expert,\n moe_k=n_heads if moe_att_k is None else moe_att_k, o_expert=o_expert, qside_n_experts=qside_n_experts,\n v_projection_size=v_projection_size, shared_experts=moe_att_shared_experts,\n kq_n_experts=moe_att_kq_n_experts, separate_kq_sel=moe_att_separate_kq_sel,\n normalize_init=moe_att_norm_init, normalize_retrieval=moe_att_norm_retrieval,\n rotate_fraction=rotate_fraction, rope_base=rope_base,\n )\n else:\n raise ValueError(f\"Unknown attention variant {moe_att_variant}\")\n else:\n self.self_attn = FixedRelativeMultiheadAttention(\n d_model, nhead, dropout=attention_dropout, test_pos_clamp=test_pos_clamp,\n projection_size=head_projection_size)\n\n std_scale = math.sqrt(2.0 / n_layers) if preln else 1.0\n std_scale *= math.sqrt(moe_init_scale)\n\n self.pkm = MoE(\n d_model, n_experts, expert_size, dropout=dropout * moe_dropout_factor, dropout_mode=dropout_mode,\n weight_scale=std_scale, selection_mode=selection_mode,\n perplexity_reg=perplexity_reg, n_heads=n_heads,\n norm_keys=norm_keys, perplexity_reg_mode=perplexity_reg_mode, n_random=n_random,\n reg_type=reg_type, topk_mode=topk_mode,\n activation_after_topk=activation_after_topk,\n activation=activation,\n normalize_expert_sel_init=normalize_expert_sel_init, norm_key_init=norm_key_init,\n norm_value_init=norm_value_init, identical_init=identical_init,\n sel_norm=sel_norm,\n expert_dropout=drop_expert,\n sync_distributed=sync_distributed,\n modulation_amplitude=modulation_amplitude)\n\n self.norm1 = torch.nn.LayerNorm(d_model, elementwise_affine=ln_affine)\n self.norm2 = torch.nn.LayerNorm(d_model, elementwise_affine=ln_affine)\n self.dropout = torch.nn.Dropout(dropout)\n\n self.activation = activation\n self.drop_parallel = drop_parallel\n\n if preln:\n reset_prenorm_params(self, n_layers)\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n\n src2 = self.norm1(src) if self.preln else src\n src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask,\n pos_offset=pos_offset)\n src = src + self.dropout(src2)\n\n if self.preln:\n src2 = self.norm2(src)\n else:\n src = src2 = self.norm1(src)\n\n src3 = self.pkm(src2)\n\n src = src + self.dropout(src3)\n if not self.preln:\n src = self.norm2(src)\n return src" }, { "identifier": "FastRopeTransformerEncoderLayer", "path": "layers/transformer/fast_rope_transformer.py", "snippet": "class FastRopeTransformerEncoderLayer(torch.nn.Module):\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,\n attention_dropout=0, drop_expand: bool = True,\n head_projection_size: Optional[int] = None, preln: bool = False, n_layers: Optional[int] = None,\n rotate_fraction: float = 0.5, rope_base: float = 10000):\n super().__init__()\n self.preln = preln\n self.self_attn = FastRopeAttention(\n d_model, nhead, dropout=attention_dropout,\n projection_size=head_projection_size, rotate_fraction=rotate_fraction,\n rope_base=rope_base)\n self.linear1 = torch.nn.Linear(d_model, dim_feedforward)\n self.dropout = torch.nn.Dropout(dropout) if drop_expand else lambda x: x\n self.linear2 = torch.nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = torch.nn.LayerNorm(d_model)\n self.norm2 = torch.nn.LayerNorm(d_model)\n self.dropout1 = torch.nn.Dropout(dropout)\n self.dropout2 = torch.nn.Dropout(dropout)\n\n self.activation = activation\n\n if preln:\n if n_layers is None:\n raise ValueError(\"n_layers must be specified when using preln\")\n reset_prenorm_params(self, n_layers)\n else:\n self.reset_parameters()\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n src2 = self.norm1(src) if self.preln else src\n src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask, pos_offset=pos_offset)\n src = src + self.dropout1(src2)\n\n if self.preln:\n src2 = self.norm2(src)\n else:\n src2 = src = self.norm1(src)\n\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))\n src = src + self.dropout2(src2)\n\n if not self.preln:\n src = self.norm2(src)\n return src\n\n def reset_parameters(self):\n torch.nn.init.xavier_normal_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu')\n if self.activation is F.relu else 1.0)\n torch.nn.init.xavier_uniform_(self.linear2.weight)" }, { "identifier": "MoeAttentionRelativeTransformerEncoderLayer", "path": "layers/transformer/moe_attention_relative_transformer.py", "snippet": "class MoeAttentionRelativeTransformerEncoderLayer(torch.nn.Module):\n def __init__(self, d_model, nhead, moe_att_n_experts, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,\n attention_dropout=0, drop_expand: bool = True,\n head_projection_size: Optional[int] = None, preln: bool = False, n_layers: Optional[int] = None,\n att_perplexity_reg: float = 0.0, expert_dropout: float = 0.0, att_selection_mode=\"sigmoid\",\n attention_variant=\"moa\", q_expert: bool = True, k_expert: bool = True, v_expert: bool = True,\n o_expert: bool = True, moe_k: int = 2,\n norm_qk_score: bool = False, v_projection_size: Optional[int] = None, same_sel: bool = False,\n qside_n_experts: Optional[int] = None, shared_experts: bool = False,\n kq_n_experts: Optional[int] = None, separate_kq_sel: bool = False,\n cvloss: float = 0.0, switchloss: float = 0.0, zloss: float = 0.0,\n moa_mode: str = \"my\", rotate_fraction: float = 0.5, rope_base: float = 10000,\n moeatt_norm_init: bool = False):\n super().__init__()\n self.is_preln = preln\n if attention_variant not in {\"full\", \"full_rope\"} and (not q_expert):\n raise ValueError(\"q_expert can be disabled only when using qside attention\")\n\n if attention_variant == \"moa\":\n self.self_attn = MoA(\n d_model, nhead, dropout=attention_dropout,\n projection_size=head_projection_size, init_std_scale=math.sqrt(2 / n_layers) if preln else 1.0,\n n_experts=moe_att_n_experts, perplexity_reg=att_perplexity_reg, expert_dropout=expert_dropout,\n selection_mode=att_selection_mode, mode=moa_mode, cvloss=cvloss, switchloss=switchloss, zloss=zloss\n )\n elif attention_variant == \"full\":\n self.self_attn = FullMoeRelativeAttention(\n d_model, nhead, dropout=attention_dropout,\n projection_size=head_projection_size, init_std_scale=math.sqrt(2 / n_layers) if preln else 1.0,\n n_experts=moe_att_n_experts, perplexity_reg=att_perplexity_reg, expert_dropout=expert_dropout,\n selection_mode=att_selection_mode, q_expert=q_expert, k_expert=k_expert, v_expert=v_expert,\n norm_qk_score=norm_qk_score, v_projection_size=v_projection_size, same_sel=same_sel,\n o_expert=o_expert, moe_k=moe_k, qside_n_experts=qside_n_experts,\n shared_experts=shared_experts, kq_n_experts=kq_n_experts, separate_kq_sel=separate_kq_sel,\n normalize_init=moeatt_norm_init\n )\n elif attention_variant == \"full_rope\":\n self.self_attn = FullMoeRopeAttention(\n d_model, nhead, dropout=attention_dropout,\n projection_size=head_projection_size, init_std_scale=math.sqrt(2 / n_layers) if preln else 1.0,\n n_experts=moe_att_n_experts, perplexity_reg=att_perplexity_reg, expert_dropout=expert_dropout,\n selection_mode=att_selection_mode, q_expert=q_expert, k_expert=k_expert, v_expert=v_expert,\n norm_qk_score=norm_qk_score, v_projection_size=v_projection_size, same_sel=same_sel,\n o_expert=o_expert, moe_k=moe_k, qside_n_experts=qside_n_experts,\n shared_experts=shared_experts, kq_n_experts=kq_n_experts, separate_kq_sel=separate_kq_sel,\n rotate_fraction=rotate_fraction, rope_base=rope_base,\n normalize_init=moeatt_norm_init\n )\n else:\n raise ValueError(f\"Unknown attention variant: {attention_variant}\")\n\n self.linear1 = torch.nn.Linear(d_model, dim_feedforward)\n self.dropout = torch.nn.Dropout(dropout) if drop_expand else lambda x: x\n self.linear2 = torch.nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = torch.nn.LayerNorm(d_model)\n self.norm2 = torch.nn.LayerNorm(d_model)\n self.dropout1 = torch.nn.Dropout(dropout)\n self.dropout2 = torch.nn.Dropout(dropout)\n\n self.activation = activation\n\n if preln:\n if n_layers is None:\n raise ValueError(\"n_layers must be specified when using preln\")\n reset_prenorm_params(self, n_layers)\n else:\n self.reset_parameters()\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n src2 = self.norm1(src) if self.is_preln else src\n src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask, pos_offset=pos_offset)\n src = src + self.dropout1(src2)\n\n if self.is_preln:\n src2 = self.norm2(src)\n else:\n src2 = src = self.norm1(src)\n\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))\n src = src + self.dropout2(src2)\n\n if not self.is_preln:\n src = self.norm2(src)\n return src\n\n def reset_parameters(self):\n torch.nn.init.xavier_normal_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu')\n if self.activation is F.relu else 1.0)\n torch.nn.init.xavier_uniform_(self.linear2.weight)" }, { "identifier": "MoE", "path": "layers/moe_layer.py", "snippet": "class MoE(LoggingLayer, RegularizedLayer, OncePerIterLayer, torch.nn.Module):\n def __init__(self, dmodel: int, n_experts: int, expert_size: int, n_heads: int,\n dropout: float = 0, weight_scale: float = 1.0,\n dropout_mode: str = \"none\", selection_mode: str = \"sigmoid\", perplexity_reg: float = 0.0,\n norm_keys: bool = False,\n perplexity_reg_mode: str=\"step\", n_random: int = 0, reg_type: str = \"entropy\",\n topk_mode: str = \"full\", activation_after_topk: bool = False,\n activation = lambda x: F.relu(x, inplace=True),\n normalize_expert_sel_init: bool = False, norm_key_init: bool = False, norm_value_init: bool = False,\n identical_init: bool = False,\n rescale_normed: bool = False, sel_norm: str = \"none\",\n v_dim: Optional[int] = None,\n expert_dropout: float = 0.0,\n sync_distributed: bool = False,\n modulation_amplitude: float = 0.5,\n ppl_past_blocks: int = 0):\n\n super().__init__()\n self.k_dim = dmodel\n self.v_dim = v_dim if v_dim is not None else dmodel\n self.n_experts = n_experts\n self.expert_size = expert_size\n self.size = self.n_experts * self.expert_size\n self.dropout = dropout\n self.dropout_mode = dropout_mode\n self.selection_mode = selection_mode\n self.perplexity_reg = perplexity_reg\n self.k_vec_dim = self.k_dim\n self.n_heads = n_heads\n self.norm_keys = norm_keys\n self.perplexity_reg_mode = perplexity_reg_mode\n self.n_random = n_random\n self.reg_type = reg_type\n self.topk_mode = topk_mode\n self.activation_after_topk = activation_after_topk\n self.activation = activation\n self.weight_scale = weight_scale\n self.normalize_expert_sel_init = normalize_expert_sel_init\n self.norm_key_init = norm_key_init\n self.norm_value_init = norm_value_init\n self.identical_init = identical_init\n self.layer = 0\n self.initalized = False\n self.rescale_normed = rescale_normed\n self.sel_norm = sel_norm\n self.was_training = True\n self.expert_dropout = expert_dropout\n self.reg_counts = 0\n self.sync_distributed = sync_distributed and torch.distributed.is_initialized()\n self.modulation_amplitude = modulation_amplitude\n self.record_all_expert_sel_counts = False\n self.ppl_past_blocks = ppl_past_blocks\n self.blocks_for_ppl = []\n self.recorded_inputs = []\n\n self.coocurence = None\n\n assert self.selection_mode in {\"gate\", \"sigmoid\", \"sinkhorn\", \"sinkhorn2\", \"sinkmoid\", \"sinkmax\", \"sinkhorn_local\", \"mul\", \"sinkmoid2\", \"sinkmax2\"}\n assert self.perplexity_reg_mode in {\"step\", \"global\", \"time\", \"global_time\"}\n assert self.dropout_mode in {\"none\", \"score\"}\n assert self.reg_type in {\"perplexity\", \"variance\", \"entropy\", \"l2\", \"switch\"}\n assert self.topk_mode in {\"full\", \"l1_approx\", \"approx\"}\n assert self.sel_norm in {\"none\", \"cos\", \"input\", \"weights\"}\n\n self.register_buffer(\"iter\", torch.tensor(0, dtype=torch.int64), persistent=False)\n\n if selection_mode in {\"mul\"} and activation_after_topk:\n raise ValueError(\"Activation after topk is not supported with mul selection\")\n\n self.keys = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim, self.expert_size))\n\n self.values = torch.nn.Parameter(torch.empty(self.n_experts, self.expert_size, self.v_dim))\n\n self.expert_sel = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim))\n self.sel = lambda x: F.linear(x, self.expert_sel)\n\n torch.nn.init.normal_(self.expert_sel, std=self.k_vec_dim ** -0.5 * weight_scale)\n torch.nn.init.normal_(self.keys, std=dmodel ** -0.5 * weight_scale)\n torch.nn.init.normal_(self.values, std=self.size ** -0.5 * weight_scale)\n self.sel_hist = []\n self.index_sel_counts = 0\n self.index_sel_norm = 0\n\n self.index_sel_counts_100 = 0\n self.index_sel_norm_100 = 0\n\n self.sel_count_log = None\n\n self.all_expert_sel_counts = []\n self.all_expert_sel_soft = []\n\n self.register_buffer(\"kv_sel_counts\", torch.zeros(self.n_experts, self.expert_size), persistent=False)\n self.register_buffer(\"kv_sel_counts_100\", torch.zeros_like(self.kv_sel_counts))\n\n if self.rescale_normed and self.sel_norm != \"none\":\n self.sel_scale = torch.nn.Parameter(torch.ones([1]))\n else:\n self.sel_scale = 1.0\n\n self.register_buffer(\"seq\", torch.arange(max(self.n_heads, self.n_experts, self.k_dim, self.v_dim), dtype=torch.long), persistent=False)\n self.regroup_weights()\n\n if self.ppl_past_blocks > 0 and self.reg_type not in {\"perplexity\", \"entropy\"}:\n print(f\"Warning: ppl_past_blocks>0 (currently {self.ppl_past_blocks}) is only supported with perplexity and entropy regularization\")\n\n def keys_to_logical_order(self, keys: torch.Tensor) -> torch.Tensor:\n k = keys.view(self.n_experts, self.k_vec_dim, self.expert_size)\n return k.permute(0, 2, 1).contiguous().view(-1, self.k_vec_dim)\n\n def keys_from_logical_order(self, keys: torch.Tensor) -> torch.Tensor:\n return keys.view(self.n_experts, self.expert_size, self.k_vec_dim).permute(0, 2, 1).contiguous().view(self.n_experts * self.k_vec_dim, self.expert_size)\n\n def renorm_keep_std(self, weight: torch.Tensor, dim: int = 0):\n with torch.no_grad():\n std = weight.std()\n weight.div_(weight.norm(dim=dim, keepdim=True))\n weight.mul_(std / weight.std())\n\n def regroup_weights(self) -> Optional[torch.Tensor]:\n with torch.no_grad():\n if self.norm_key_init:\n self.renorm_keep_std(self.keys.view(self.n_experts, self.k_vec_dim, self.expert_size), dim=1)\n\n if self.norm_value_init:\n self.renorm_keep_std(self.values, dim=1)\n\n if self.identical_init:\n k = self.keys.view(self.n_experts, self.k_vec_dim, self.expert_size)\n self.keys.set_(k[:1].expand_as(k).reshape_as(self.keys))\n\n v = self.values.view(self.n_experts, self.expert_size, self.v_dim)\n self.values.set_(v[:1].expand_as(v).reshape_as(self.values))\n\n if self.normalize_expert_sel_init:\n self.renorm_keep_std(self.expert_sel, dim=1)\n\n def ani(self, x: torch.Tensor) -> torch.Tensor:\n assert x.ndim == 2\n chunk_size = 32\n\n xnorm = F.normalize(x, 2, dim=-1)\n\n accu = 0\n for i in range(0, x.shape[0], chunk_size):\n a = xnorm[i: i + chunk_size]\n sims = xnorm @ a.T\n sims[i : i + chunk_size].fill_diagonal_(0)\n accu += sims.sum()\n\n return accu / (x.shape[0] * (x.shape[0] - 1))\n\n def log_expert_sel_usage(self, prefix: str, channel_sel_counts: torch.Tensor):\n sel_nonzero = (channel_sel_counts != 0).type(torch.float).sum(axis=-1) / self.expert_size\n self.log(f\"{prefix}/mean\", sel_nonzero.mean())\n self.log(f\"{prefix}/min\", sel_nonzero.min())\n self.log(f\"{prefix}/max\", sel_nonzero.max())\n\n\n def pre_train_forward(self):\n if self.norm_keys:\n with torch.no_grad():\n self.keys.div_(self.keys.norm(dim=-1, keepdim=True))\n\n if self.training and not self.was_training:\n sorted_counts = self.index_sel_counts.sort(descending=True).values\n self.log(\"test_exert_channel_usage\", framework.visualize.plot.Barplot(sorted_counts, xlabel=\"expert\", ylabel=\"usage count\"), drop_old=True)\n\n self.layer = 0\n if self.sel_hist:\n self.sel_hist = []\n self.index_sel_counts = 0\n self.index_sel_norm = 0\n self.reg_counts = 0\n\n def before_loss(self):\n if self.sel_hist:\n # Concatenate against time dimension. Important for the within-batch regularization\n sel = torch.cat(self.sel_hist, -2)\n self.add_perplexity_reg(sel)\n\n self.sel_hist = []\n\n if self.index_sel_norm > 0:\n if self.training:\n with torch.no_grad():\n self.log(\"usag_rel_perplexity_all_layers\", utils.relative_perplexity(self.index_sel_counts / self.index_sel_norm))\n self.log(\"dead_expert_proportion_all_layers\", (self.index_sel_counts == 0).float().sum() / self.n_experts)\n\n self.log_expert_sel_usage(\"exert_channel_usage\", self.kv_sel_counts)\n\n self.kv_sel_counts_100.add_(self.kv_sel_counts)\n self.kv_sel_counts.zero_()\n\n self.index_sel_counts_100 = self.index_sel_counts_100 + self.index_sel_counts\n self.index_sel_norm_100 = self.index_sel_norm_100 + self.index_sel_norm\n\n if self.training and self.iter % 100 == 0:\n norm_cnt = self.index_sel_counts_100 / self.index_sel_norm_100\n self.log(\"usag_rel_perplexity_100\", utils.relative_perplexity(norm_cnt))\n self.log(\"dead_expert_proportion_100\", (self.index_sel_counts_100 == 0).float().sum() / self.n_experts)\n\n sorted_counts = self.index_sel_counts_100.sort(descending=True).values\n self.log(\"usage_counts_100\", framework.visualize.plot.Barplot(sorted_counts, xlabel=\"expert\", ylabel=\"usage count\"), drop_old=True)\n\n\n self.log_expert_sel_usage(\"exert_channel_usage_100\", self.kv_sel_counts_100)\n self.kv_sel_counts_100.zero_()\n\n self.index_sel_counts_100 = 0\n self.index_sel_norm_100 = 0\n\n self.log(\"ani/keys\", self.ani(self.keys_to_logical_order(self.keys)))\n self.log(\"ani/values\", self.ani(self.values.flatten(0, -2)))\n self.log(\"ani/expert_sel\", self.ani(self.expert_sel.T))\n\n if self.training:\n self.iter += 1\n\n def topk(self, x: torch.Tensor, k: int, approx: bool) -> Tuple[torch.Tensor, torch.Tensor]:\n if approx:\n x = x.view(*x.shape[:-1], k, -1)\n scores, ind = x.max(-1)\n return scores, self.seq[:k] * x.shape[-1] + ind\n else:\n return x.topk(k, dim=-1, sorted=False)\n\n def rolling_logsumexp(self, x: torch.Tensor) -> torch.Tensor:\n # Simulate calculating logsumexp over a bigger batch than the current one. Will have stale values, but that\n # should not matter much later in training.\n if self.ppl_past_blocks == 0 or not self.training:\n return F.log_softmax(x, dim=-1)\n else:\n if len(self.blocks_for_ppl) == self.ppl_past_blocks:\n self.blocks_for_ppl.pop(0)\n\n self.blocks_for_ppl.append(x)\n res = F.log_softmax(torch.cat(self.blocks_for_ppl, dim=0), dim=-1)\n self.blocks_for_ppl[-1] = self.blocks_for_ppl[-1].detach()\n return res\n\n def add_perplexity_reg(self, sel: torch.Tensor):\n sync_distributed = self.sync_distributed and (self.perplexity_reg_mode not in {\"time\", \"global_time\"})\n\n if self.perplexity_reg_mode in {\"time\", \"global_time\"}:\n sel = sel.flatten(0, -3)\n else:\n sel = sel.flatten(0, -2)\n\n # Note: sel are raw logits, no matter what activation is used\n if self.perplexity_reg > 0:\n if self.reg_type == \"perplexity\":\n sel_d = self.rolling_logsumexp(sel)\n sel_d = framework.utils.distributed_ops.log_mean(sel_d, -2, self.sync_distributed)\n loss = lambda: self.perplexity_reg * ( - utils.relative_perplexity_l(sel_d).mean())\n elif self.reg_type == \"entropy\":\n sel_d = self.rolling_logsumexp(sel)\n sel_d = framework.utils.distributed_ops.log_mean(sel_d, -2, self.sync_distributed)\n loss = lambda: self.perplexity_reg * ( - utils.entropy_l(sel_d).mean())\n elif self.reg_type == \"variance\":\n if sync_distributed:\n raise NotImplementedError(\"Variance regularization is not supported in distributed mode\")\n avg_sel = sel.mean(-2)\n loss = lambda: self.perplexity_reg * avg_sel.var(-1).mean()\n elif self.reg_type == \"l2\":\n loss = lambda: self.perplexity_reg * sel.pow(2).mean()\n elif self.reg_type == \"switch\":\n if sync_distributed:\n torch.distributed.all_reduce(self.reg_counts, op=torch.distributed.ReduceOp.SUM)\n\n p_sel_real = self.reg_counts / self.reg_counts.sum(-1, keepdims=True)\n if self.perplexity_reg_mode in {\"time\", \"global_time\"}:\n p_sel_real = p_sel_real.unsqueeze(-2)\n\n loss = lambda: self.perplexity_reg * (F.softmax(sel, dim=-1) * p_sel_real).mean()\n self.reg_counts = 0\n else:\n assert False\n\n self.add_reg(loss, \"moe\")\n\n def compute_scores(self, input: torch.Tensor, index: CVMMSel, expert_scores: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n scores = cvmm(input, index, self.keys)\n\n if self.selection_mode in {\"mul\"}:\n scores = scores * expert_scores[..., None]\n elif self.selection_mode in {\"gate\", \"sigmoid\", \"sinkhorn\", \"sinkhorn2\", \"sinkmoid\", \"sinkmax\", \"sinkmoid2\"}:\n # Handle it later\n pass\n\n scores = self.activation(scores)\n\n plot_training = self.train and self.iter % 10 == 0\n if plot_training:\n with torch.no_grad():\n gt0 = (scores > 0).float()\n gt0_s = gt0.sum()\n\n if plot_training:\n self.log(\"relu_pass_rate\", gt0_s / scores.numel())\n\n self.kv_sel_counts.index_add_(0, index.raw_sel.flatten(), gt0.flatten(end_dim=-2))\n\n if self.dropout > 0 and self.dropout_mode != \"none\":\n scores = F.dropout(scores, self.dropout, training=self.training)\n\n return scores\n\n def sel_activation(self, sel: torch.Tensor, seq_len: int) -> Tuple[torch.Tensor, torch.Tensor]:\n reg_sel = sel\n if self.selection_mode in {\"sigmoid\"}:\n sel = torch.sigmoid(sel)\n elif self.selection_mode in {\"mul\"}:\n sel = sel.abs()\n reg_sel = sel\n elif self.selection_mode in {\"gate\"}:\n sel = F.softmax(sel, dim=-1)\n with torch.no_grad():\n self.log(\"expert_rel_perplexity_per_selection\", utils.relative_perplexity(sel).mean())\n else:\n assert False\n\n return sel, reg_sel\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n out = 0\n\n in1 = in2 = input\n\n sel = self.sel(in1)\n # sel=sel.float()\n\n if self.sel_norm == \"cos\":\n sel = sel / (in1.norm(dim=-1, keepdim=True) * self.expert_sel.norm(dim=-1)[None]) * self.sel_scale\n elif self.sel_norm == \"weights\":\n sel = sel * (self.sel_scale / self.expert_sel.norm(dim=-1)[None])\n elif self.sel_norm == \"input\":\n sel = sel * (self.sel_scale / in1.norm(dim=-1, keepdim=True))\n\n sel_raw = reg_sel = sel\n\n inv_val = float(\"-inf\")\n\n if not self.activation_after_topk:\n # Sinkhorn should be always applied before top-k\n sel, reg_sel = self.sel_activation(sel, input.shape[-2])\n inv_val = 0\n\n if self.training and self.expert_dropout > 0:\n if self.selection_mode not in {\"sigmoid\", \"gate\"}:\n raise ValueError(\"Expert dropout not supported in this mode\")\n\n mask = torch.rand_like(sel) < self.expert_dropout\n sel2 = sel.masked_fill(mask, inv_val)\n else:\n sel2 = sel\n\n sel_val, sel_index = self.topk(sel2, self.n_heads, self.topk_mode in {\"l1_approx\", \"approx\"})\n\n if self.activation_after_topk or (self.selection_mode in {\"mul\"}):\n sel_val = torch.gather(sel_raw, -1, sel_index)\n sel_val, reg_sel = self.sel_activation(sel_val, input.shape[-2])\n\n\n record_counts_now = (self.training and self.iter % 10 == 0) or (not self.training) or (self.record_all_expert_sel_counts)\n\n if not self.training:\n sel_index_flat = sel_index.flatten(end_dim=-2)\n if self.coocurence is None:\n self.coocurence = torch.zeros([self.n_experts, self.n_experts], device=sel_index_flat.device, dtype=torch.long)\n\n for h1 in range(self.n_heads):\n for h2 in range(self.n_heads):\n ind_flat = sel_index_flat[..., h1] * self.n_experts + sel_index_flat[..., h2]\n values = torch.tensor([1], device=self.coocurence.device, dtype=self.coocurence.dtype).expand_as(ind_flat)\n # values = sel_val[..., h2].flatten()\n self.coocurence.flatten().put_(ind_flat, values, accumulate=True)\n # self.coocurence[sel_index_flat[..., h1], sel_index_flat[..., h2]] += 1\n\n if record_counts_now or self.reg_type == \"switch\":\n reg_counts = F.one_hot(sel_index, self.n_experts).type_as(input)\n\n if self.reg_type == \"switch\":\n reg_counts2 = reg_counts.view(*input.shape[:-2], input.shape[-2] * self.n_heads, self.n_experts)\n if self.perplexity_reg_mode == \"time\":\n reg_counts2 = reg_counts2.sum(-2)\n else:\n reg_counts2 = reg_counts2.flatten(end_dim=-2).sum(0)\n\n self.reg_counts = self.reg_counts + reg_counts2\n\n if record_counts_now:\n with torch.no_grad():\n sel_counts = reg_counts.flatten(end_dim=-2).sum(0)\n cnt = sel_index.nelement()\n\n p_expert_sel = sel_counts / cnt\n\n self.index_sel_counts = self.index_sel_counts + sel_counts\n self.index_sel_norm = self.index_sel_norm + cnt\n\n if self.record_all_expert_sel_counts:\n softcnt = torch.zeros_like(sel_counts, dtype=sel_val.dtype)\n softcnt.index_add_(0, sel_index.flatten(), sel_val.flatten())\n\n self.all_expert_sel_soft.append(softcnt)\n self.all_expert_sel_counts.append(sel_counts)\n\n if self.training:\n self.log(\"min_sel_score\", sel_val.min(dim=-1).values.mean())\n self.log(\"max_sel_score\", sel_val.max(dim=-1).values.mean())\n\n sel_oh = F.one_hot(sel_index, self.n_experts).sum(-2).bool()\n if self.layer >= 1 and self.training:\n self.log(f\"layer_sel_overlap_{self.layer}\", ((self.prev_sel_oh & sel_oh).sum(-1).float() / self.n_heads).mean())\n\n self.prev_sel_oh = sel_oh\n\n ppl = utils.relative_perplexity(p_expert_sel)\n self.log(\"usage_rel_perplexity\", ppl)\n self.log(\"dead_expert_proportion\", (p_expert_sel == 0).float().sum() / self.n_experts)\n\n if self.perplexity_reg_mode in {\"step\", \"time\"}:\n self.add_perplexity_reg(reg_sel)\n elif self.perplexity_reg > 0 and self.training:\n self.sel_hist.append(reg_sel)\n\n sel_indices = cvmm_prepare_sel2(sel_index.int())\n\n scores = self.compute_scores(in2, sel_indices, sel_val)\n\n sel_indices = sel_indices.clone()\n sel_indices.reduction_weight = sel_val\n sel_indices.sel_index = sel_indices.out_index\n sel_indices.out_index = None\n\n if self.selection_mode not in {\"gate\", \"sigmoid\"}:\n sel_indices.reduction_weight = torch.ones_like(sel_indices.reduction_weight)\n\n out = cvmm(scores, sel_indices, self.values)\n\n self.layer += 1\n\n self.was_training = self.training\n res = out.view(*input.shape[:-1], self.v_dim)\n return res\n\n def dump_logs(self, save_dir: str):\n if self.coocurence is not None:\n os.makedirs(save_dir, exist_ok=True)\n torch.save(self.coocurence, os.path.join(save_dir, \"coocurence.pt\"))\n\n def get_logs(self) -> Dict[str, Any]:\n res = super().get_logs()\n\n if self.coocurence is not None:\n coo = self.coocurence / self.coocurence.diagonal().clamp(min=1)[:, None]\n res[\"expert_coocurence\"] = framework.visualize.plot.Heatmap(coo, xlabel=\"expert\", ylabel=\"expert\", textval=False)\n self.coocurence = None\n return res" }, { "identifier": "Result", "path": "interfaces/result.py", "snippet": "class Result:\n outputs: torch.Tensor\n loss: torch.Tensor\n\n batch_dim = 0\n\n def plot(self) -> Dict[str, Any]:\n return {}\n\n @property\n def batch_size(self) -> int:\n return self.outputs.shape[self.batch_dim]\n\n @staticmethod\n def merge(l: List, batch_weights: Optional[List[float]] = None):\n if len(l) == 1:\n return l[0]\n batch_weights = batch_weights if batch_weights is not None else [1] * len(l)\n loss = sum([r.loss * w for r, w in zip(l, batch_weights)]) / sum(batch_weights)\n out = torch.cat([r.outputs for r in l], l[0].batch_dim)\n return l[0].__class__(out, loss)" }, { "identifier": "LayerVisualizer", "path": "layers/layer_with_visualization.py", "snippet": "class LayerVisualizer:\n def __init__(self, module: torch.nn.Module, options: Dict[str, Any] = {}):\n self.modules = []\n self.options = options\n self.curr_options = None\n for n, m in module.named_modules():\n if isinstance(m, LayerWithVisualization):\n self.modules.append((n, m))\n\n def plot(self) -> Dict[str, Any]:\n res = {}\n for n, m in self.modules:\n res.update({f\"{n}/{k}\": v for k, v in m.plot(self.curr_options).items()})\n m.visualization_enabled = False\n\n self.curr_options = None\n return res\n\n def prepare(self, options: Dict[str, Any] = {}):\n self.curr_options = self.options.copy()\n self.curr_options.update(options)\n\n for _, m in self.modules:\n m.prepare()\n m.visualization_enabled = True" }, { "identifier": "FullMoeRelativeAttentionCore", "path": "layers/transformer/full_moe_relative_attention.py", "snippet": "class FullMoeRelativeAttentionCore(LayerWithVisualization, LoggingLayer, RegularizedLayer, OncePerIterLayer, torch.nn.Module):\n def __init__(self, state_size: int, n_heads: int, n_experts: int, dropout: float = 0.0, input_size: Optional[int] = None,\n projection_size: Optional[int] = None, output_size: Optional[int] = None, init_std_scale: float = 1.0,\n perplexity_reg: float = 0, share_pk: bool = True, expert_dropout: float = 0.0,\n selection_mode: str = \"sigmoid\", moe_k: int = 2, q_expert: bool = True,\n k_expert: bool = True, v_expert: bool = True, o_expert: bool = True, norm_qk_score: bool = False,\n v_projection_size: Optional[int] = None, same_sel: bool = False,\n qside_n_experts: Optional[int] = None, shared_experts: bool = False,\n kq_n_experts: Optional[int] = None, separate_kq_sel: bool = False,\n normalize_init: bool = False, normalize_retrieval: bool = False):\n\n super().__init__()\n\n self.input_size = input_size or state_size\n self.output_size = output_size or state_size\n self.pe_size = self.input_size\n self.perplexity_reg = perplexity_reg\n self.share_pk = share_pk\n self.expert_dropout = expert_dropout\n self.selection_mode = selection_mode\n self.iter = 0\n self.moe_k = moe_k\n self.norm_qk_score = norm_qk_score\n self.same_sel = same_sel\n self.shared_experts = shared_experts\n self.init_std_scale = init_std_scale\n self.normalize_init = normalize_init\n self.attention_to_visualize = []\n self.selections_to_visualize = {}\n\n self.is_expert = {\n \"k\": k_expert,\n \"q\": q_expert,\n \"v\": v_expert,\n \"o\": o_expert\n }\n self.n_experts = {\n \"k\": kq_n_experts or n_experts,\n \"q\": kq_n_experts or qside_n_experts or n_experts,\n \"v\": n_experts,\n \"o\": qside_n_experts or n_experts\n }\n\n self.separate_k_sel = separate_kq_sel or (self.n_experts[\"k\"] != self.n_experts[\"v\"])\n self.separate_q_sel = separate_kq_sel or (self.n_experts[\"q\"] != self.n_experts[\"o\"])\n\n self.sel_hist = {}\n self.sel_counts_100 = {}\n\n self.n_heads = n_heads\n self.dropout = torch.nn.Dropout(dropout) if dropout > 0 else lambda x: x\n self.projection_size = projection_size or (state_size // n_heads)\n self.v_projection_size = v_projection_size or self.projection_size\n\n self.std_in = init_std_scale * math.sqrt(1 / self.input_size)\n std_out = init_std_scale * math.sqrt(1 / (n_heads * self.v_projection_size))\n\n self.create_selection_logic()\n\n self.src_side_maps = {\"k\", \"v\"}\n\n self.projections = torch.nn.ParameterDict({\n \"q\": self.create_param_block(\"q\", self.input_size, self.projection_size, self.std_in),\n \"k\": self.create_param_block(\"k\", self.input_size, self.projection_size, self.std_in),\n \"v\": self.create_param_block(\"v\", self.input_size, self.v_projection_size, self.std_in),\n \"o\": self.create_param_block(\"o\", self.v_projection_size, self.output_size, std_out),\n })\n\n if normalize_retrieval:\n self.norm_ret = torch.nn.LayerNorm(self.projection_size)\n else:\n self.norm_ret = lambda x: x\n\n self.sel_correlation = 0\n\n self.register_buffer(\"scale\", torch.full([1], 1.0 / math.sqrt(self.projection_size)), persistent=False)\n\n def renorm_keep_std(self, weight: torch.Tensor, dim: int = 0):\n with torch.no_grad():\n std = weight.std()\n weight.div_(weight.norm(dim=dim, keepdim=True))\n weight.mul_(std / weight.std())\n\n def get_n_copies(self, name: str):\n return self.n_heads\n\n def create_param_block(self, name: str, in_size: int, out_size: int, std: float):\n n_copies = self.get_n_copies(name)\n\n if self.is_expert[name]:\n exp_mul = 1 if self.shared_experts else n_copies\n p = torch.nn.Parameter(torch.randn(exp_mul * self.n_experts[name], in_size, out_size) * std)\n if self.normalize_init:\n self.renorm_keep_std(p, dim=0)\n return p\n else:\n if name == \"o\":\n in_size = n_copies * in_size\n else:\n out_size = n_copies * out_size\n return torch.nn.Parameter(torch.randn(out_size, in_size) * std)\n\n def create_selection_logic(self):\n sels_params = {}\n self.sel_map = {}\n\n def register_remap(dest: str, src: str) -> bool:\n if not (src in sels_params or src in self.sel_map):\n # src is not defined\n return False\n\n assert self.n_experts[src] == self.n_experts[dest]\n self.sel_map[dest] = self.sel_map.get(src, src)\n return True\n\n if self.is_expert[\"o\"]:\n sels_params[\"o\"] = self.init_sel(\"o\", self.std_in)\n\n if self.is_expert[\"q\"] and (self.separate_q_sel or not register_remap(\"q\", \"o\")):\n sels_params[\"q\"] = self.init_sel(\"q\", self.std_in)\n\n if self.is_expert[\"v\"] and ((not self.same_sel) or not register_remap(\"v\", \"o\")):\n sels_params[\"v\"] = self.init_sel(\"v\", self.std_in)\n\n if self.is_expert[\"k\"]:\n if (not (self.same_sel and self.separate_k_sel and register_remap(\"k\", \"q\"))) and (self.separate_k_sel or not register_remap(\"k\", \"v\")):\n sels_params[\"k\"] = self.init_sel(\"k\", self.std_in)\n\n self.selections = torch.nn.ParameterDict(sels_params)\n\n def init_sel(self, name: str, std: float) -> torch.nn.Parameter:\n n_copies = self.get_n_copies(name)\n n_experts = self.n_experts[name]\n sel = torch.nn.Parameter(torch.randn(n_experts*n_copies, self.input_size) * std)\n self.renorm_rows(sel)\n return sel\n\n def renorm_rows(self, x: torch.Tensor):\n with torch.no_grad():\n std_t = x.std(dim=-1, keepdim=True)\n x.div_(x.norm(dim=-1, keepdim=True))\n x.mul_(std_t / x.std())\n\n\n def project_to_torch_order(self, x: torch.Tensor):\n return x.view(*x.shape[:-1], self.get_n_copies(\"k\"), -1).transpose(-2, -3)\n\n def get_mask_tensor(self, src_len: int, mask: Optional[AttentionMask]) -> Optional[torch.Tensor]:\n if mask is None or (mask.position_mask is None and mask.src_length_mask is None):\n return None\n\n # mask.position_mask: [..., N_out, N_in]\n # mask.src_length_mask: [B, ...., N_in]\n # True where it has to be masked\n\n if mask.position_mask is not None:\n n_pad = src_len - mask.position_mask.shape[-1]\n if n_pad > 0:\n pm = F.pad(mask.position_mask, (n_pad, 0), 'constant', value=False)\n else:\n pm = mask.position_mask\n\n if mask.position_mask is None:\n m = mask.src_length_mask.unsqueeze(-2).unsqueeze(-2)\n elif mask.src_length_mask is None:\n m = pm\n else:\n m = mask.src_length_mask.unsqueeze(-2).unsqueeze(-2) | pm\n\n return m\n\n def train(self, mode: bool = True):\n self.sel_hist = {}\n return super().train(mode)\n\n def get_lost_on_hist(self, l: List[torch.Tensor]) -> torch.Tensor:\n assert l[0].ndim == 4\n l = [t.flatten(1,2) for t in l]\n sel = torch.cat(l, -2)\n sel_d = F.log_softmax(sel, dim=-1)\n sel_d = framework.utils.distributed_ops.log_mean(sel_d, -2, sync_distributed=False)\n return self.perplexity_reg * ( - utils.entropy_l(sel_d).mean())\n\n def get_reg_loss(self) -> Dict[str, torch.Tensor]:\n l = super().get_reg_loss()\n for k, v in self.sel_hist.items():\n l[f\"moe_att_entropy/{k}\"] = self.get_lost_on_hist(v)\n\n self.sel_hist = {}\n return l\n\n def get_sel(self, t: torch.Tensor, w: torch.Tensor, name: str) -> Selection:\n n_experts = self.n_experts[name]\n n_copies = self.get_n_copies(name)\n\n sel = F.linear(t, w).float()\n sel = sel.view(*sel.shape[:-1], n_copies, -1)\n with torch.no_grad():\n if self.expert_dropout > 0 and self.training:\n mask = torch.rand_like(sel) < self.expert_dropout\n sel2 = sel.masked_fill(mask, float('-inf'))\n else:\n sel2 = sel\n _, sel_index = sel2.topk(self.moe_k, dim=-1, sorted=False)\n sel_val = torch.gather(sel, -1, sel_index)\n\n if self.selection_mode == \"softmax\":\n sel_val = sel_val.softmax(-1)\n elif self.selection_mode == \"sigmoid\":\n sel_val = sel_val.sigmoid()\n else:\n raise ValueError(\"Unknown selection mode: \" + self.selection_mode)\n\n exp_shift = 0 if self.shared_experts else n_experts\n\n sel_index_shifted = (torch.arange(n_copies, device=sel_index.device, dtype=sel_index.dtype) * exp_shift).unsqueeze(-1) + sel_index\n sel_index_pp = cvmm_prepare_sel2(sel_index_shifted.flatten(-2,-1), sel_val)\n\n return Selection(sel, sel_val, sel_index, sel_index_pp)\n\n def before_loss(self):\n self.iter += 1\n if self.iter % 100 == 0:\n for k, v in self.sel_counts_100.items():\n sorted_counts = v.sort(descending=True).values\n self.log(f\"sel_counts/{k}\", framework.visualize.plot.Barplot(sorted_counts, xlabel=\"expert\", ylabel=\"usage count\"), drop_old=True)\n\n self.sel_counts_100 = {}\n\n def exp_proj(self, x: torch.Tensor, w: torch.Tensor, sel: Selection) -> torch.Tensor:\n return cvmm(x, sel.sel_index, w)\n\n def compute_sel(self, curr_state: torch.Tensor, attend_to: torch.Tensor) -> Dict[str, Selection]:\n self.selection_mode\n outs = {}\n done = {}\n cross_atten = curr_state is not attend_to\n\n for name in (set(self.selections.keys()) | set(self.sel_map.keys())):\n name_actual = self.sel_map.get(name, name)\n\n # There coukd be 2 versions of everything: source side and destination side. Check if they are different,\n # and if not, use the cached version, my_id is the unique identifier for this transformation\n is_src_side = (name in self.src_side_maps) or not cross_atten\n my_id = (name_actual, is_src_side)\n\n cached = done.get(my_id)\n if cached is not None:\n outs[name] = cached\n continue\n\n # No cache, actually compute\n inp = attend_to if is_src_side else curr_state\n v = self.selections[name_actual]\n outs[name] = self.get_sel(inp, v, name)\n\n # Save history for regularization\n if self.perplexity_reg > 0 and self.training:\n if name not in self.sel_hist:\n self.sel_hist[name] = []\n self.sel_hist[name].append(outs[name].raw_sel)\n\n # Visualize statistics\n if self.training and self.iter % 10 == 0:\n self.sel_counts_100[name] = self.sel_counts_100.get(name, 0) + \\\n F.one_hot(outs[name].raw_sel_index.flatten(), self.n_experts[name]).sum(0)\n\n done[my_id] = outs[name]\n\n return outs\n\n def project(self, name: str, src: torch.Tensor, sel: Dict[str, Selection]) -> torch.Tensor:\n if name in sel:\n sv = sel[name]\n if self.norm_qk_score and name in {\"q\", \"k\"}:\n sv.sel_index.reduction_weight = F.normalize(sv.sel_index.reduction_weight, p=1, dim=-1)\n return self.exp_proj(src, self.projections[name], sv)\n else:\n return F.linear(src, self.projections[name])\n\n def attend(self, curr_state: torch.Tensor, attend_to: torch.Tensor, pos_offset: int, v: torch.Tensor,\n k: torch.Tensor, q: torch.Tensor, mask: Optional[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:\n raise NotImplementedError()\n\n def attention_proj(self, att: torch.Tensor, v: torch.Tensor,\n mask: Optional[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:\n if mask is not None:\n att.masked_fill_(mask, float('-inf'))\n\n att = F.softmax(att, dim=-1)\n\n res = att @ v\n return res, att\n\n def forward(self, curr_state: torch.Tensor, attend_to: torch.Tensor, mask: Optional[AttentionMask],\n pos_offset: Optional[int] = None, need_weights: bool = False):\n # curr_state: [batch_size, out_len, c]\n # attend_to: [batch_size, in_len, c]\n\n if pos_offset is None:\n assert curr_state.shape[1] == attend_to.shape[1], \"If attend_to has different shape than curr_state, pos_offset should be provided\"\n pos_offset = 0\n\n sel = self.compute_sel(curr_state, attend_to)\n\n # scale q and k with sqrt(scale) before the attention. This should save memory, be faster, and\n # keep the range of k and v better. It should make attention NaNs better with float16.\n scale = self.scale.sqrt()\n\n q = self.project(\"q\", curr_state, sel)\n q = q * scale.type_as(q)\n k = self.project(\"k\", attend_to, sel)\n k = k * scale.type_as(k)\n v = self.project(\"v\", attend_to, sel)\n\n q = self.project_to_torch_order(q) if \"q\" not in sel else q.transpose(-2,-3)\n k = self.project_to_torch_order(k) if \"k\" not in sel else k.transpose(-2,-3)\n v = self.project_to_torch_order(v) if \"v\" not in sel else v.transpose(-2,-3)\n\n k = self.dropout(k)\n\n res, att = self.attend(curr_state, attend_to, pos_offset, v, k, q, self.get_mask_tensor(attend_to.shape[-2], mask))\n res = self.norm_ret(res)\n\n if self.visualization_enabled:\n self.attention_to_visualize.append(att[0].detach())\n for k, s in sel.items():\n if k not in self.selections_to_visualize:\n self.selections_to_visualize[k] = []\n\n with torch.no_grad():\n m = torch.zeros([*s.sel_val[0].shape[:-1], self.n_experts[k]], device=s.sel_val.device, dtype=s.sel_val.dtype)\n m.scatter_(-1, s.raw_sel_index[0], s.sel_val[0])\n\n self.selections_to_visualize[k].append(m)\n\n if self.get_n_copies(\"k\") != self.get_n_copies(\"v\"):\n res = res.view(\n *res.shape[:-1], self.get_n_copies(\"v\") // self.get_n_copies(\"k\"), -1).transpose(2,3).flatten(1,2).contiguous()\n\n if self.is_expert[\"o\"]:\n res = res.transpose(-2, -3)\n # The output selection indices are calculated from the current state and are also used for projecting \"q\".\n # But that projection needs to create multiple copies for the different heads. Here we already have the\n # heads, but we have to create copies for the top-k elements. We can calculate that from the reduction\n # weight. We also want to compute not only the weighted average between the top-k elements, but also\n # of the different heads. So reshape the reduction weight accordingly.\n o_sel = sel[\"o\"].sel_index.clone()\n o_sel.sel_index = o_sel.out_index // o_sel.reduction_weight.shape[-1]\n o_sel.reduction_weight = o_sel.reduction_weight.flatten(-2)\n out = cvmm(res, o_sel, self.projections[\"o\"])\n else:\n res = res.transpose(-2, -3)\n out = F.linear(res.contiguous().view(*curr_state.shape[:-1], -1), self.projections[\"o\"])\n\n return out\n\n def plot(self, options: Dict[str, Any]) -> Dict[str, Any]:\n r = {}\n marks = options.get(\"steplabel\")\n n_steps = options.get(\"n_steps\") or 9999999\n y_marks = options.get(\"target_labels\", marks)\n\n ns1 = (self.attention_to_visualize[0].shape[-2] + n_steps) if n_steps < 0 else 0\n ns1_e = self.attention_to_visualize[0].shape[-2] if n_steps < 0 else n_steps\n ns2 = (self.attention_to_visualize[0].shape[-1] + n_steps) if n_steps < 0 else 0\n ns2_e = self.attention_to_visualize[0].shape[-1] if n_steps < 0 else n_steps\n\n if marks is not None:\n assert len(marks) == self.attention_to_visualize[0].shape[-1]\n marks = marks[ns2:ns2_e]\n\n if y_marks is not None:\n assert len(y_marks) == self.attention_to_visualize[0].shape[-2]\n y_marks = y_marks[ns1:ns1_e]\n\n if options.get(\"mha.plot_head_details\") and self.attention_to_visualize[0].shape[0] > 1:\n for head in range(self.attention_to_visualize[0].shape[0]):\n sel_map = {k: [e[:, head][ns1:ns1_e] if k in {'q', 'o'} else e[:, head][ns2:ns2_e] for e in v] for k, v in self.selections_to_visualize.items()}\n selections = {k: torch.stack(v, 0).cpu() for k, v in sel_map.items()}\n\n x_selections = {k: v for k, v in selections.items() if k in {'k', 'v'}}\n y_selections = {k: v for k, v in selections.items() if k in {'q', 'o'}}\n\n r[f\"head_{head}\"] = MoEAttentionPlot(\n torch.stack([layer[head][ns1:ns1_e, ns2:ns2_e] for _, layer in enumerate(self.attention_to_visualize)], 0),\n x_selections, y_selections,\n ylabel=\"dest\", xlabel=\"src\", x_marks=marks, y_marks=y_marks)\n\n r[\"attention_max\"] = framework.visualize.plot.AnimatedHeatmap(\n torch.stack([layer.max(0)[0][ns1:ns1_e, ns2:ns2_e] for _, layer in enumerate(self.attention_to_visualize)], 0),\n ylabel=\"dest\", xlabel=\"src\", textval=False, x_marks=marks, y_marks=y_marks, ignore_wrong_marks=True)\n\n self.attention_to_visualize = []\n self.selections_to_visualize = {}\n return r\n\n def dump_logs(self, save_dir: str):\n if torch.is_tensor(self.sel_correlation):\n os.makedirs(save_dir, exist_ok=True)\n torch.save(self.sel_correlation, os.path.join(save_dir, \"sel_correlation.pt\"))\n\n def get_logs(self) -> Dict[str, Any]:\n res = super().get_logs()\n\n if torch.is_tensor(self.sel_correlation):\n coo = self.sel_correlation / self.sel_correlation.flatten(1).sum(-1).clamp(min=1)[:, None, None]\n for h in range(self.n_heads):\n res[f\"expert_coocurence_{h}\"] = framework.visualize.plot.Heatmap(coo[h], xlabel=\"o expert\", ylabel=\"v expert\", textval=False)\n self.sel_correlation = 0\n return res" } ]
import framework import torch import torch.nn import torch.nn.functional as F import torch.utils.data import math from typing import List, Tuple, Dict, Any from models import TransformerLanguageModel from ... import task, args from layers.transformer import RelativeTransformerEncoderLayer, PrelnRelativeTransformerEncoderLayer from layers.transformer.relative_moe_transformer import RelativeMoeTransformerEncoderLayer from layers.transformer.fast_rope_transformer import FastRopeTransformerEncoderLayer from layers.transformer.moe_attention_relative_transformer import MoeAttentionRelativeTransformerEncoderLayer from layers.moe_layer import MoE from interfaces import Result from layers import LayerVisualizer from layers.transformer.full_moe_relative_attention import FullMoeRelativeAttentionCore
19,108
@args def a(parser: framework.helpers.ArgumentParser): parser.add_argument("-lm.trafo.context_blocks", default=1) parser.add_argument("-lm.trafo.test_context_blocks", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.test_pos_clamp", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.same_length_eval", default=False) parser.add_argument("-lm.trafo.same_length", default=False) parser.add_argument("-lm.trafo.last_layer_context", default=False) parser.add_argument("-lm.trafo.xl_init", default=False) parser.add_argument("-lm.trafo.embedding_mode_init", default="default", choice=["default", "scale_to_sqrt_dmodel", "init_to_sqrt_dmodel", "one_and_scale_to_sqrt_dmodel", "like_preln"]) parser.add_argument("-pkm.n_heads", default=1) parser.add_argument("-moe.n_experts", default=128) parser.add_argument("-moe.expert_size", default=128) parser.add_argument("-moe.selection_mode", default="sigmoid", choice=["gate", "sigmoid", "mul"]) parser.add_argument("-moe.perplexity_reg", default=0.0) parser.add_argument("-moe.perplexity_reg_mode", default="step", choice=["step", "global", "time", "global_time"]) parser.add_argument("-moe.reg_type", default="entropy", choice=["perplexity", "variance", "entropy", "l2", "switch", "normal"]) parser.add_argument("-moe.norm_keys", default=False) parser.add_argument("-moe.n_random", default=0) parser.add_argument("-moe.topk_mode", default="full", choice=["full", "l1_approx", "approx"]) parser.add_argument("-moe.activation_after_topk", default=False) parser.add_argument("-moe.drop_parallel", default=True) parser.add_argument("-moe.norm_key_init", default=False) parser.add_argument("-moe.norm_value_init", default=False) parser.add_argument("-moe.identical_init", default=False) parser.add_argument("-moe.sel_lr_multipler", default=1.0) parser.add_argument("-moe.expert_lr_multipler", default=1.0) parser.add_argument("-moe.sel_norm", default="none", choice=["none", "cos", "input", "weights"]) parser.add_argument("-moe.dropout_factor", default=1.0) parser.add_argument("-moe.drop_expert", default=0.0) parser.add_argument("-moe.sync_distributed", default=True) parser.add_argument("-moe.modulation_amplitude", default=0.5) parser.add_argument("-moe.init_scale", default=1.0) parser.add_argument("-moe.norm_expert_sel_init", default=False) parser.add_argument("-kvmem.dropout", default="none", choice=["none", "early", "late", "weight", "score"]) parser.add_argument("-kvmem.norm_values", default=False) parser.add_argument("-transformer.topk_value", default=32) parser.add_argument("-transformer.activation", default="relu", choice=["relu", "topk", "gelu", "identity", "sigmoid", "softmax"]) parser.add_argument("-transformer.p_drop_layer", default=0.0) parser.add_argument("-transformer.head_projection_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.ln_affine", default=True) parser.add_argument("-transformer.ln_after_attention", default=True) parser.add_argument("-moe.att.n_experts", default=4) parser.add_argument("-moe.att.variant", default="moa", choice=["moa", "simple", "qside", "full", "full_rope", "seq", "target"]) parser.add_argument("-moe.att.enable", default=False) parser.add_argument("-moe.att.q_expert", default=True) parser.add_argument("-moe.att.k_expert", default=True) parser.add_argument("-moe.att.v_expert", default=True) parser.add_argument("-moe.att.o_expert", default=True) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_qk", default=False) parser.add_argument("-moe.att.v_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.same_sel", default=False) parser.add_argument("-moe.att.expert_dropout", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.selection_mode", default="sigmoid", choice=["sigmoid", "softmax"]) parser.add_argument("-moe.att.perplexity_reg", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.qside_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_ret", default=False) parser.add_argument("-moe.att.shared_experts", default=False) parser.add_argument("-moe.att.drop_expert", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.kq_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.separate_kq_sel", default=False) parser.add_argument("-moe.att.norm_init", default=False) parser.add_argument("-rope.rotate_fraction", default=0.5) parser.add_argument("-rope.base", default=10000.0) parser.add_argument("-moa.mode", default="my", choice=["my", "moa"]) parser.add_argument("-moa.cvloss", default=0.0) parser.add_argument("-moa.switchloss", default=0.0) parser.add_argument("-moa.zloss", default=0.0) parser.add_argument("-debug_plot_interval", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.plot_head_details", default=False) parser.add_argument("-plot.n_steps", default=-128)
@args def a(parser: framework.helpers.ArgumentParser): parser.add_argument("-lm.trafo.context_blocks", default=1) parser.add_argument("-lm.trafo.test_context_blocks", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.test_pos_clamp", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.same_length_eval", default=False) parser.add_argument("-lm.trafo.same_length", default=False) parser.add_argument("-lm.trafo.last_layer_context", default=False) parser.add_argument("-lm.trafo.xl_init", default=False) parser.add_argument("-lm.trafo.embedding_mode_init", default="default", choice=["default", "scale_to_sqrt_dmodel", "init_to_sqrt_dmodel", "one_and_scale_to_sqrt_dmodel", "like_preln"]) parser.add_argument("-pkm.n_heads", default=1) parser.add_argument("-moe.n_experts", default=128) parser.add_argument("-moe.expert_size", default=128) parser.add_argument("-moe.selection_mode", default="sigmoid", choice=["gate", "sigmoid", "mul"]) parser.add_argument("-moe.perplexity_reg", default=0.0) parser.add_argument("-moe.perplexity_reg_mode", default="step", choice=["step", "global", "time", "global_time"]) parser.add_argument("-moe.reg_type", default="entropy", choice=["perplexity", "variance", "entropy", "l2", "switch", "normal"]) parser.add_argument("-moe.norm_keys", default=False) parser.add_argument("-moe.n_random", default=0) parser.add_argument("-moe.topk_mode", default="full", choice=["full", "l1_approx", "approx"]) parser.add_argument("-moe.activation_after_topk", default=False) parser.add_argument("-moe.drop_parallel", default=True) parser.add_argument("-moe.norm_key_init", default=False) parser.add_argument("-moe.norm_value_init", default=False) parser.add_argument("-moe.identical_init", default=False) parser.add_argument("-moe.sel_lr_multipler", default=1.0) parser.add_argument("-moe.expert_lr_multipler", default=1.0) parser.add_argument("-moe.sel_norm", default="none", choice=["none", "cos", "input", "weights"]) parser.add_argument("-moe.dropout_factor", default=1.0) parser.add_argument("-moe.drop_expert", default=0.0) parser.add_argument("-moe.sync_distributed", default=True) parser.add_argument("-moe.modulation_amplitude", default=0.5) parser.add_argument("-moe.init_scale", default=1.0) parser.add_argument("-moe.norm_expert_sel_init", default=False) parser.add_argument("-kvmem.dropout", default="none", choice=["none", "early", "late", "weight", "score"]) parser.add_argument("-kvmem.norm_values", default=False) parser.add_argument("-transformer.topk_value", default=32) parser.add_argument("-transformer.activation", default="relu", choice=["relu", "topk", "gelu", "identity", "sigmoid", "softmax"]) parser.add_argument("-transformer.p_drop_layer", default=0.0) parser.add_argument("-transformer.head_projection_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.ln_affine", default=True) parser.add_argument("-transformer.ln_after_attention", default=True) parser.add_argument("-moe.att.n_experts", default=4) parser.add_argument("-moe.att.variant", default="moa", choice=["moa", "simple", "qside", "full", "full_rope", "seq", "target"]) parser.add_argument("-moe.att.enable", default=False) parser.add_argument("-moe.att.q_expert", default=True) parser.add_argument("-moe.att.k_expert", default=True) parser.add_argument("-moe.att.v_expert", default=True) parser.add_argument("-moe.att.o_expert", default=True) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_qk", default=False) parser.add_argument("-moe.att.v_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.same_sel", default=False) parser.add_argument("-moe.att.expert_dropout", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.selection_mode", default="sigmoid", choice=["sigmoid", "softmax"]) parser.add_argument("-moe.att.perplexity_reg", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.qside_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_ret", default=False) parser.add_argument("-moe.att.shared_experts", default=False) parser.add_argument("-moe.att.drop_expert", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.kq_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.separate_kq_sel", default=False) parser.add_argument("-moe.att.norm_init", default=False) parser.add_argument("-rope.rotate_fraction", default=0.5) parser.add_argument("-rope.base", default=10000.0) parser.add_argument("-moa.mode", default="my", choice=["my", "moa"]) parser.add_argument("-moa.cvloss", default=0.0) parser.add_argument("-moa.switchloss", default=0.0) parser.add_argument("-moa.zloss", default=0.0) parser.add_argument("-debug_plot_interval", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.plot_head_details", default=False) parser.add_argument("-plot.n_steps", default=-128)
@task()
1
2023-12-13 08:45:02+00:00
24k
AIFSH/NativeDancer
nativedancer/third_part/detectron2/modeling/meta_arch/retinanet.py
[ { "identifier": "configurable", "path": "nativedancer/third_part/detectron2/config/config.py", "snippet": "def configurable(init_func=None, *, from_config=None):\n \"\"\"\n Decorate a function or a class's __init__ method so that it can be called\n with a :class:`CfgNode` object using a :func:`from_config` function that translates\n :class:`CfgNode` to arguments.\n\n Examples:\n ::\n # Usage 1: Decorator on __init__:\n class A:\n @configurable\n def __init__(self, a, b=2, c=3):\n pass\n\n @classmethod\n def from_config(cls, cfg): # 'cfg' must be the first argument\n # Returns kwargs to be passed to __init__\n return {\"a\": cfg.A, \"b\": cfg.B}\n\n a1 = A(a=1, b=2) # regular construction\n a2 = A(cfg) # construct with a cfg\n a3 = A(cfg, b=3, c=4) # construct with extra overwrite\n\n # Usage 2: Decorator on any function. Needs an extra from_config argument:\n @configurable(from_config=lambda cfg: {\"a: cfg.A, \"b\": cfg.B})\n def a_func(a, b=2, c=3):\n pass\n\n a1 = a_func(a=1, b=2) # regular call\n a2 = a_func(cfg) # call with a cfg\n a3 = a_func(cfg, b=3, c=4) # call with extra overwrite\n\n Args:\n init_func (callable): a class's ``__init__`` method in usage 1. The\n class must have a ``from_config`` classmethod which takes `cfg` as\n the first argument.\n from_config (callable): the from_config function in usage 2. It must take `cfg`\n as its first argument.\n \"\"\"\n\n if init_func is not None:\n assert (\n inspect.isfunction(init_func)\n and from_config is None\n and init_func.__name__ == \"__init__\"\n ), \"Incorrect use of @configurable. Check API documentation for examples.\"\n\n @functools.wraps(init_func)\n def wrapped(self, *args, **kwargs):\n try:\n from_config_func = type(self).from_config\n except AttributeError as e:\n raise AttributeError(\n \"Class with @configurable must have a 'from_config' classmethod.\"\n ) from e\n if not inspect.ismethod(from_config_func):\n raise TypeError(\"Class with @configurable must have a 'from_config' classmethod.\")\n\n if _called_with_cfg(*args, **kwargs):\n explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)\n init_func(self, **explicit_args)\n else:\n init_func(self, *args, **kwargs)\n\n return wrapped\n\n else:\n if from_config is None:\n return configurable # @configurable() is made equivalent to @configurable\n assert inspect.isfunction(\n from_config\n ), \"from_config argument of configurable must be a function!\"\n\n def wrapper(orig_func):\n @functools.wraps(orig_func)\n def wrapped(*args, **kwargs):\n if _called_with_cfg(*args, **kwargs):\n explicit_args = _get_args_from_config(from_config, *args, **kwargs)\n return orig_func(**explicit_args)\n else:\n return orig_func(*args, **kwargs)\n\n wrapped.from_config = from_config\n return wrapped\n\n return wrapper" }, { "identifier": "get_norm", "path": "nativedancer/third_part/detectron2/layers/batch_norm.py", "snippet": "def get_norm(norm, out_channels):\n \"\"\"\n Args:\n norm (str or callable): either one of BN, SyncBN, FrozenBN, GN;\n or a callable that takes a channel number and returns\n the normalization layer as a nn.Module.\n\n Returns:\n nn.Module or None: the normalization layer\n \"\"\"\n if norm is None:\n return None\n if isinstance(norm, str):\n if len(norm) == 0:\n return None\n norm = {\n \"BN\": BatchNorm2d,\n # Fixed in https://github.com/pytorch/pytorch/pull/36382\n \"SyncBN\": NaiveSyncBatchNorm if env.TORCH_VERSION <= (1, 5) else nn.SyncBatchNorm,\n \"FrozenBN\": FrozenBatchNorm2d,\n \"GN\": lambda channels: nn.GroupNorm(32, channels),\n # for debugging:\n \"nnSyncBN\": nn.SyncBatchNorm,\n \"naiveSyncBN\": NaiveSyncBatchNorm,\n # expose stats_mode N as an option to caller, required for zero-len inputs\n \"naiveSyncBN_N\": lambda channels: NaiveSyncBatchNorm(channels, stats_mode=\"N\"),\n \"LN\": lambda channels: LayerNorm(channels),\n }[norm]\n return norm(out_channels)" }, { "identifier": "CycleBatchNormList", "path": "nativedancer/third_part/detectron2/layers/batch_norm.py", "snippet": "class CycleBatchNormList(nn.ModuleList):\n \"\"\"\n Implement domain-specific BatchNorm by cycling.\n\n When a BatchNorm layer is used for multiple input domains or input\n features, it might need to maintain a separate test-time statistics\n for each domain. See Sec 5.2 in :paper:`rethinking-batchnorm`.\n\n This module implements it by using N separate BN layers\n and it cycles through them every time a forward() is called.\n\n NOTE: The caller of this module MUST guarantee to always call\n this module by multiple of N times. Otherwise its test-time statistics\n will be incorrect.\n \"\"\"\n\n def __init__(self, length: int, bn_class=nn.BatchNorm2d, **kwargs):\n \"\"\"\n Args:\n length: number of BatchNorm layers to cycle.\n bn_class: the BatchNorm class to use\n kwargs: arguments of the BatchNorm class, such as num_features.\n \"\"\"\n self._affine = kwargs.pop(\"affine\", True)\n super().__init__([bn_class(**kwargs, affine=False) for k in range(length)])\n if self._affine:\n # shared affine, domain-specific BN\n channels = self[0].num_features\n self.weight = nn.Parameter(torch.ones(channels))\n self.bias = nn.Parameter(torch.zeros(channels))\n self._pos = 0\n\n def forward(self, x):\n ret = self[self._pos](x)\n self._pos = (self._pos + 1) % len(self)\n\n if self._affine:\n w = self.weight.reshape(1, -1, 1, 1)\n b = self.bias.reshape(1, -1, 1, 1)\n return ret * w + b\n else:\n return ret\n\n def extra_repr(self):\n return f\"affine={self._affine}\"" }, { "identifier": "batched_nms", "path": "nativedancer/third_part/detectron2/layers/nms.py", "snippet": "def batched_nms(\n boxes: torch.Tensor, scores: torch.Tensor, idxs: torch.Tensor, iou_threshold: float\n):\n \"\"\"\n Same as torchvision.ops.boxes.batched_nms, but with float().\n \"\"\"\n assert boxes.shape[-1] == 4\n # Note: Torchvision already has a strategy (https://github.com/pytorch/vision/issues/1311)\n # to decide whether to use coordinate trick or for loop to implement batched_nms. So we\n # just call it directly.\n # Fp16 does not have enough range for batched NMS, so adding float().\n return box_ops.batched_nms(boxes.float(), scores, idxs, iou_threshold)" }, { "identifier": "ShapeSpec", "path": "nativedancer/third_part/detectron2/layers/shape_spec.py", "snippet": "class ShapeSpec:\n \"\"\"\n A simple structure that contains basic shape specification about a tensor.\n It is often used as the auxiliary inputs/outputs of models,\n to complement the lack of shape inference ability among pytorch modules.\n \"\"\"\n\n channels: Optional[int] = None\n height: Optional[int] = None\n width: Optional[int] = None\n stride: Optional[int] = None" }, { "identifier": "cat", "path": "nativedancer/third_part/detectron2/layers/wrappers.py", "snippet": "def cat(tensors: List[torch.Tensor], dim: int = 0):\n \"\"\"\n Efficient version of torch.cat that avoids a copy if there is only a single element in a list\n \"\"\"\n assert isinstance(tensors, (list, tuple))\n if len(tensors) == 1:\n return tensors[0]\n return torch.cat(tensors, dim)" }, { "identifier": "Boxes", "path": "nativedancer/third_part/detectron2/structures/boxes.py", "snippet": "class Boxes:\n \"\"\"\n This structure stores a list of boxes as a Nx4 torch.Tensor.\n It supports some common methods about boxes\n (`area`, `clip`, `nonempty`, etc),\n and also behaves like a Tensor\n (support indexing, `to(device)`, `.device`, and iteration over all boxes)\n\n Attributes:\n tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).\n \"\"\"\n\n def __init__(self, tensor: torch.Tensor):\n \"\"\"\n Args:\n tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).\n \"\"\"\n if not isinstance(tensor, torch.Tensor):\n tensor = torch.as_tensor(tensor, dtype=torch.float32, device=torch.device(\"cpu\"))\n else:\n tensor = tensor.to(torch.float32)\n if tensor.numel() == 0:\n # Use reshape, so we don't end up creating a new tensor that does not depend on\n # the inputs (and consequently confuses jit)\n tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32)\n assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()\n\n self.tensor = tensor\n\n def clone(self) -> \"Boxes\":\n \"\"\"\n Clone the Boxes.\n\n Returns:\n Boxes\n \"\"\"\n return Boxes(self.tensor.clone())\n\n def to(self, device: torch.device):\n # Boxes are assumed float32 and does not support to(dtype)\n return Boxes(self.tensor.to(device=device))\n\n def area(self) -> torch.Tensor:\n \"\"\"\n Computes the area of all the boxes.\n\n Returns:\n torch.Tensor: a vector with areas of each box.\n \"\"\"\n box = self.tensor\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\n return area\n\n def clip(self, box_size: Tuple[int, int]) -> None:\n \"\"\"\n Clip (in place) the boxes by limiting x coordinates to the range [0, width]\n and y coordinates to the range [0, height].\n\n Args:\n box_size (height, width): The clipping box's size.\n \"\"\"\n assert torch.isfinite(self.tensor).all(), \"Box tensor contains infinite or NaN!\"\n h, w = box_size\n x1 = self.tensor[:, 0].clamp(min=0, max=w)\n y1 = self.tensor[:, 1].clamp(min=0, max=h)\n x2 = self.tensor[:, 2].clamp(min=0, max=w)\n y2 = self.tensor[:, 3].clamp(min=0, max=h)\n self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)\n\n def nonempty(self, threshold: float = 0.0) -> torch.Tensor:\n \"\"\"\n Find boxes that are non-empty.\n A box is considered empty, if either of its side is no larger than threshold.\n\n Returns:\n Tensor:\n a binary vector which represents whether each box is empty\n (False) or non-empty (True).\n \"\"\"\n box = self.tensor\n widths = box[:, 2] - box[:, 0]\n heights = box[:, 3] - box[:, 1]\n keep = (widths > threshold) & (heights > threshold)\n return keep\n\n def __getitem__(self, item) -> \"Boxes\":\n \"\"\"\n Args:\n item: int, slice, or a BoolTensor\n\n Returns:\n Boxes: Create a new :class:`Boxes` by indexing.\n\n The following usage are allowed:\n\n 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\n 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\n\n Note that the returned Boxes might share storage with this Boxes,\n subject to Pytorch's indexing semantics.\n \"\"\"\n if isinstance(item, int):\n return Boxes(self.tensor[item].view(1, -1))\n b = self.tensor[item]\n assert b.dim() == 2, \"Indexing on Boxes with {} failed to return a matrix!\".format(item)\n return Boxes(b)\n\n def __len__(self) -> int:\n return self.tensor.shape[0]\n\n def __repr__(self) -> str:\n return \"Boxes(\" + str(self.tensor) + \")\"\n\n def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:\n \"\"\"\n Args:\n box_size (height, width): Size of the reference box.\n boundary_threshold (int): Boxes that extend beyond the reference box\n boundary by more than boundary_threshold are considered \"outside\".\n\n Returns:\n a binary vector, indicating whether each box is inside the reference box.\n \"\"\"\n height, width = box_size\n inds_inside = (\n (self.tensor[..., 0] >= -boundary_threshold)\n & (self.tensor[..., 1] >= -boundary_threshold)\n & (self.tensor[..., 2] < width + boundary_threshold)\n & (self.tensor[..., 3] < height + boundary_threshold)\n )\n return inds_inside\n\n def get_centers(self) -> torch.Tensor:\n \"\"\"\n Returns:\n The box centers in a Nx2 array of (x, y).\n \"\"\"\n return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2\n\n def scale(self, scale_x: float, scale_y: float) -> None:\n \"\"\"\n Scale the box with horizontal and vertical scaling factors\n \"\"\"\n self.tensor[:, 0::2] *= scale_x\n self.tensor[:, 1::2] *= scale_y\n\n @classmethod\n def cat(cls, boxes_list: List[\"Boxes\"]) -> \"Boxes\":\n \"\"\"\n Concatenates a list of Boxes into a single Boxes\n\n Arguments:\n boxes_list (list[Boxes])\n\n Returns:\n Boxes: the concatenated Boxes\n \"\"\"\n assert isinstance(boxes_list, (list, tuple))\n if len(boxes_list) == 0:\n return cls(torch.empty(0))\n assert all([isinstance(box, Boxes) for box in boxes_list])\n\n # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input\n cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))\n return cat_boxes\n\n @property\n def device(self) -> device:\n return self.tensor.device\n\n # type \"Iterator[torch.Tensor]\", yield, and iter() not supported by torchscript\n # https://github.com/pytorch/pytorch/issues/18627\n @torch.jit.unused\n def __iter__(self):\n \"\"\"\n Yield a box as a Tensor of shape (4,) at a time.\n \"\"\"\n yield from self.tensor" }, { "identifier": "pairwise_iou", "path": "nativedancer/third_part/detectron2/structures/boxes.py", "snippet": "def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:\n \"\"\"\n Given two lists of boxes of size N and M, compute the IoU\n (intersection over union) between **all** N x M pairs of boxes.\n The box order must be (xmin, ymin, xmax, ymax).\n\n Args:\n boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.\n\n Returns:\n Tensor: IoU, sized [N,M].\n \"\"\"\n area1 = boxes1.area() # [N]\n area2 = boxes2.area() # [M]\n inter = pairwise_intersection(boxes1, boxes2)\n\n # handle empty boxes\n iou = torch.where(\n inter > 0,\n inter / (area1[:, None] + area2 - inter),\n torch.zeros(1, dtype=inter.dtype, device=inter.device),\n )\n return iou" }, { "identifier": "ImageList", "path": "nativedancer/third_part/detectron2/structures/image_list.py", "snippet": "class ImageList:\n \"\"\"\n Structure that holds a list of images (of possibly\n varying sizes) as a single tensor.\n This works by padding the images to the same size.\n The original sizes of each image is stored in `image_sizes`.\n\n Attributes:\n image_sizes (list[tuple[int, int]]): each tuple is (h, w).\n During tracing, it becomes list[Tensor] instead.\n \"\"\"\n\n def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]):\n \"\"\"\n Arguments:\n tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1\n image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can\n be smaller than (H, W) due to padding.\n \"\"\"\n self.tensor = tensor\n self.image_sizes = image_sizes\n\n def __len__(self) -> int:\n return len(self.image_sizes)\n\n def __getitem__(self, idx) -> torch.Tensor:\n \"\"\"\n Access the individual image in its original size.\n\n Args:\n idx: int or slice\n\n Returns:\n Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1\n \"\"\"\n size = self.image_sizes[idx]\n return self.tensor[idx, ..., : size[0], : size[1]]\n\n @torch.jit.unused\n def to(self, *args: Any, **kwargs: Any) -> \"ImageList\":\n cast_tensor = self.tensor.to(*args, **kwargs)\n return ImageList(cast_tensor, self.image_sizes)\n\n @property\n def device(self) -> device:\n return self.tensor.device\n\n @staticmethod\n def from_tensors(\n tensors: List[torch.Tensor],\n size_divisibility: int = 0,\n pad_value: float = 0.0,\n padding_constraints: Optional[Dict[str, int]] = None,\n ) -> \"ImageList\":\n \"\"\"\n Args:\n tensors: a tuple or list of `torch.Tensor`, each of shape (Hi, Wi) or\n (C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded\n to the same shape with `pad_value`.\n size_divisibility (int): If `size_divisibility > 0`, add padding to ensure\n the common height and width is divisible by `size_divisibility`.\n This depends on the model and many models need a divisibility of 32.\n pad_value (float): value to pad.\n padding_constraints (optional[Dict]): If given, it would follow the format as\n {\"size_divisibility\": int, \"square_size\": int}, where `size_divisibility` will\n overwrite the above one if presented and `square_size` indicates the\n square padding size if `square_size` > 0.\n Returns:\n an `ImageList`.\n \"\"\"\n assert len(tensors) > 0\n assert isinstance(tensors, (tuple, list))\n for t in tensors:\n assert isinstance(t, torch.Tensor), type(t)\n assert t.shape[:-2] == tensors[0].shape[:-2], t.shape\n\n image_sizes = [(im.shape[-2], im.shape[-1]) for im in tensors]\n image_sizes_tensor = [shapes_to_tensor(x) for x in image_sizes]\n max_size = torch.stack(image_sizes_tensor).max(0).values\n\n if padding_constraints is not None:\n square_size = padding_constraints.get(\"square_size\", 0)\n if square_size > 0:\n # pad to square.\n max_size[0] = max_size[1] = square_size\n if \"size_divisibility\" in padding_constraints:\n size_divisibility = padding_constraints[\"size_divisibility\"]\n if size_divisibility > 1:\n stride = size_divisibility\n # the last two dims are H,W, both subject to divisibility requirement\n max_size = (max_size + (stride - 1)).div(stride, rounding_mode=\"floor\") * stride\n\n # handle weirdness of scripting and tracing ...\n if torch.jit.is_scripting():\n max_size: List[int] = max_size.to(dtype=torch.long).tolist()\n else:\n if torch.jit.is_tracing():\n image_sizes = image_sizes_tensor\n\n if len(tensors) == 1:\n # This seems slightly (2%) faster.\n # TODO: check whether it's faster for multiple images as well\n image_size = image_sizes[0]\n padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]]\n batched_imgs = F.pad(tensors[0], padding_size, value=pad_value).unsqueeze_(0)\n else:\n # max_size can be a tensor in tracing mode, therefore convert to list\n batch_shape = [len(tensors)] + list(tensors[0].shape[:-2]) + list(max_size)\n device = (\n None if torch.jit.is_scripting() else (\"cpu\" if torch.jit.is_tracing() else None)\n )\n batched_imgs = tensors[0].new_full(batch_shape, pad_value, device=device)\n batched_imgs = move_device_like(batched_imgs, tensors[0])\n for i, img in enumerate(tensors):\n # Use `batched_imgs` directly instead of `img, pad_img = zip(tensors, batched_imgs)`\n # Tracing mode cannot capture `copy_()` of temporary locals\n batched_imgs[i, ..., : img.shape[-2], : img.shape[-1]].copy_(img)\n\n return ImageList(batched_imgs.contiguous(), image_sizes)" }, { "identifier": "Instances", "path": "nativedancer/third_part/detectron2/structures/instances.py", "snippet": "class Instances:\n \"\"\"\n This class represents a list of instances in an image.\n It stores the attributes of instances (e.g., boxes, masks, labels, scores) as \"fields\".\n All fields must have the same ``__len__`` which is the number of instances.\n\n All other (non-field) attributes of this class are considered private:\n they must start with '_' and are not modifiable by a user.\n\n Some basic usage:\n\n 1. Set/get/check a field:\n\n .. code-block:: python\n\n instances.gt_boxes = Boxes(...)\n print(instances.pred_masks) # a tensor of shape (N, H, W)\n print('gt_masks' in instances)\n\n 2. ``len(instances)`` returns the number of instances\n 3. Indexing: ``instances[indices]`` will apply the indexing on all the fields\n and returns a new :class:`Instances`.\n Typically, ``indices`` is a integer vector of indices,\n or a binary mask of length ``num_instances``\n\n .. code-block:: python\n\n category_3_detections = instances[instances.pred_classes == 3]\n confident_detections = instances[instances.scores > 0.9]\n \"\"\"\n\n def __init__(self, image_size: Tuple[int, int], **kwargs: Any):\n \"\"\"\n Args:\n image_size (height, width): the spatial size of the image.\n kwargs: fields to add to this `Instances`.\n \"\"\"\n self._image_size = image_size\n self._fields: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.set(k, v)\n\n @property\n def image_size(self) -> Tuple[int, int]:\n \"\"\"\n Returns:\n tuple: height, width\n \"\"\"\n return self._image_size\n\n def __setattr__(self, name: str, val: Any) -> None:\n if name.startswith(\"_\"):\n super().__setattr__(name, val)\n else:\n self.set(name, val)\n\n def __getattr__(self, name: str) -> Any:\n if name == \"_fields\" or name not in self._fields:\n raise AttributeError(\"Cannot find field '{}' in the given Instances!\".format(name))\n return self._fields[name]\n\n def set(self, name: str, value: Any) -> None:\n \"\"\"\n Set the field named `name` to `value`.\n The length of `value` must be the number of instances,\n and must agree with other existing fields in this object.\n \"\"\"\n with warnings.catch_warnings(record=True):\n data_len = len(value)\n if len(self._fields):\n assert (\n len(self) == data_len\n ), \"Adding a field of length {} to a Instances of length {}\".format(data_len, len(self))\n self._fields[name] = value\n\n def has(self, name: str) -> bool:\n \"\"\"\n Returns:\n bool: whether the field called `name` exists.\n \"\"\"\n return name in self._fields\n\n def remove(self, name: str) -> None:\n \"\"\"\n Remove the field called `name`.\n \"\"\"\n del self._fields[name]\n\n def get(self, name: str) -> Any:\n \"\"\"\n Returns the field called `name`.\n \"\"\"\n return self._fields[name]\n\n def get_fields(self) -> Dict[str, Any]:\n \"\"\"\n Returns:\n dict: a dict which maps names (str) to data of the fields\n\n Modifying the returned dict will modify this instance.\n \"\"\"\n return self._fields\n\n # Tensor-like methods\n def to(self, *args: Any, **kwargs: Any) -> \"Instances\":\n \"\"\"\n Returns:\n Instances: all fields are called with a `to(device)`, if the field has this method.\n \"\"\"\n ret = Instances(self._image_size)\n for k, v in self._fields.items():\n if hasattr(v, \"to\"):\n v = v.to(*args, **kwargs)\n ret.set(k, v)\n return ret\n\n def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> \"Instances\":\n \"\"\"\n Args:\n item: an index-like object and will be used to index all the fields.\n\n Returns:\n If `item` is a string, return the data in the corresponding field.\n Otherwise, returns an `Instances` where all fields are indexed by `item`.\n \"\"\"\n if type(item) == int:\n if item >= len(self) or item < -len(self):\n raise IndexError(\"Instances index out of range!\")\n else:\n item = slice(item, None, len(self))\n\n ret = Instances(self._image_size)\n for k, v in self._fields.items():\n ret.set(k, v[item])\n return ret\n\n def __len__(self) -> int:\n for v in self._fields.values():\n # use __len__ because len() has to be int and is not friendly to tracing\n return v.__len__()\n raise NotImplementedError(\"Empty Instances does not support __len__!\")\n\n def __iter__(self):\n raise NotImplementedError(\"`Instances` object is not iterable!\")\n\n @staticmethod\n def cat(instance_lists: List[\"Instances\"]) -> \"Instances\":\n \"\"\"\n Args:\n instance_lists (list[Instances])\n\n Returns:\n Instances\n \"\"\"\n assert all(isinstance(i, Instances) for i in instance_lists)\n assert len(instance_lists) > 0\n if len(instance_lists) == 1:\n return instance_lists[0]\n\n image_size = instance_lists[0].image_size\n if not isinstance(image_size, torch.Tensor): # could be a tensor in tracing\n for i in instance_lists[1:]:\n assert i.image_size == image_size\n ret = Instances(image_size)\n for k in instance_lists[0]._fields.keys():\n values = [i.get(k) for i in instance_lists]\n v0 = values[0]\n if isinstance(v0, torch.Tensor):\n values = torch.cat(values, dim=0)\n elif isinstance(v0, list):\n values = list(itertools.chain(*values))\n elif hasattr(type(v0), \"cat\"):\n values = type(v0).cat(values)\n else:\n raise ValueError(\"Unsupported type {} for concatenation\".format(type(v0)))\n ret.set(k, values)\n return ret\n\n def __str__(self) -> str:\n s = self.__class__.__name__ + \"(\"\n s += \"num_instances={}, \".format(len(self))\n s += \"image_height={}, \".format(self._image_size[0])\n s += \"image_width={}, \".format(self._image_size[1])\n s += \"fields=[{}])\".format(\", \".join((f\"{k}: {v}\" for k, v in self._fields.items())))\n return s\n\n __repr__ = __str__" }, { "identifier": "get_event_storage", "path": "nativedancer/third_part/detectron2/utils/events.py", "snippet": "def get_event_storage():\n \"\"\"\n Returns:\n The :class:`EventStorage` object that's currently being used.\n Throws an error if no :class:`EventStorage` is currently enabled.\n \"\"\"\n assert len(\n _CURRENT_STORAGE_STACK\n ), \"get_event_storage() has to be called inside a 'with EventStorage(...)' context!\"\n return _CURRENT_STORAGE_STACK[-1]" }, { "identifier": "build_anchor_generator", "path": "nativedancer/third_part/detectron2/modeling/anchor_generator.py", "snippet": "def build_anchor_generator(cfg, input_shape):\n \"\"\"\n Built an anchor generator from `cfg.MODEL.ANCHOR_GENERATOR.NAME`.\n \"\"\"\n anchor_generator = cfg.MODEL.ANCHOR_GENERATOR.NAME\n return ANCHOR_GENERATOR_REGISTRY.get(anchor_generator)(cfg, input_shape)" }, { "identifier": "build_backbone", "path": "nativedancer/third_part/detectron2/modeling/backbone/build.py", "snippet": "def build_backbone(cfg, input_shape=None):\n \"\"\"\n Build a backbone from `cfg.MODEL.BACKBONE.NAME`.\n\n Returns:\n an instance of :class:`Backbone`\n \"\"\"\n if input_shape is None:\n input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN))\n\n backbone_name = cfg.MODEL.BACKBONE.NAME\n backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg, input_shape)\n assert isinstance(backbone, Backbone)\n return backbone" }, { "identifier": "Backbone", "path": "nativedancer/third_part/detectron2/modeling/backbone/backbone.py", "snippet": "class Backbone(nn.Module, metaclass=ABCMeta):\n \"\"\"\n Abstract base class for network backbones.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n The `__init__` method of any subclass can specify its own set of arguments.\n \"\"\"\n super().__init__()\n\n @abstractmethod\n def forward(self):\n \"\"\"\n Subclasses must override this method, but adhere to the same return type.\n\n Returns:\n dict[str->Tensor]: mapping from feature name (e.g., \"res2\") to tensor\n \"\"\"\n pass\n\n @property\n def size_divisibility(self) -> int:\n \"\"\"\n Some backbones require the input height and width to be divisible by a\n specific integer. This is typically true for encoder / decoder type networks\n with lateral connection (e.g., FPN) for which feature maps need to match\n dimension in the \"bottom up\" and \"top down\" paths. Set to 0 if no specific\n input size divisibility is required.\n \"\"\"\n return 0\n\n @property\n def padding_constraints(self) -> Dict[str, int]:\n \"\"\"\n This property is a generalization of size_divisibility. Some backbones and training\n recipes require specific padding constraints, such as enforcing divisibility by a specific\n integer (e.g., FPN) or padding to a square (e.g., ViTDet with large-scale jitter\n in :paper:vitdet). `padding_constraints` contains these optional items like:\n {\n \"size_divisibility\": int,\n \"square_size\": int,\n # Future options are possible\n }\n `size_divisibility` will read from here if presented and `square_size` indicates the\n square padding size if `square_size` > 0.\n\n TODO: use type of Dict[str, int] to avoid torchscipt issues. The type of padding_constraints\n could be generalized as TypedDict (Python 3.8+) to support more types in the future.\n \"\"\"\n return {}\n\n def output_shape(self):\n \"\"\"\n Returns:\n dict[str->ShapeSpec]\n \"\"\"\n # this is a backward-compatible default\n return {\n name: ShapeSpec(\n channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]\n )\n for name in self._out_features\n }" }, { "identifier": "Box2BoxTransform", "path": "nativedancer/third_part/detectron2/modeling/box_regression.py", "snippet": "class Box2BoxTransform:\n \"\"\"\n The box-to-box transform defined in R-CNN. The transformation is parameterized\n by 4 deltas: (dx, dy, dw, dh). The transformation scales the box's width and height\n by exp(dw), exp(dh) and shifts a box's center by the offset (dx * width, dy * height).\n \"\"\"\n\n def __init__(\n self, weights: Tuple[float, float, float, float], scale_clamp: float = _DEFAULT_SCALE_CLAMP\n ):\n \"\"\"\n Args:\n weights (4-element tuple): Scaling factors that are applied to the\n (dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set\n such that the deltas have unit variance; now they are treated as\n hyperparameters of the system.\n scale_clamp (float): When predicting deltas, the predicted box scaling\n factors (dw and dh) are clamped such that they are <= scale_clamp.\n \"\"\"\n self.weights = weights\n self.scale_clamp = scale_clamp\n\n def get_deltas(self, src_boxes, target_boxes):\n \"\"\"\n Get box regression transformation deltas (dx, dy, dw, dh) that can be used\n to transform the `src_boxes` into the `target_boxes`. That is, the relation\n ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless\n any delta is too large and is clamped).\n\n Args:\n src_boxes (Tensor): source boxes, e.g., object proposals\n target_boxes (Tensor): target of the transformation, e.g., ground-truth\n boxes.\n \"\"\"\n assert isinstance(src_boxes, torch.Tensor), type(src_boxes)\n assert isinstance(target_boxes, torch.Tensor), type(target_boxes)\n\n src_widths = src_boxes[:, 2] - src_boxes[:, 0]\n src_heights = src_boxes[:, 3] - src_boxes[:, 1]\n src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths\n src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights\n\n target_widths = target_boxes[:, 2] - target_boxes[:, 0]\n target_heights = target_boxes[:, 3] - target_boxes[:, 1]\n target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths\n target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights\n\n wx, wy, ww, wh = self.weights\n dx = wx * (target_ctr_x - src_ctr_x) / src_widths\n dy = wy * (target_ctr_y - src_ctr_y) / src_heights\n dw = ww * torch.log(target_widths / src_widths)\n dh = wh * torch.log(target_heights / src_heights)\n\n deltas = torch.stack((dx, dy, dw, dh), dim=1)\n assert (src_widths > 0).all().item(), \"Input boxes to Box2BoxTransform are not valid!\"\n return deltas\n\n def apply_deltas(self, deltas, boxes):\n \"\"\"\n Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`.\n\n Args:\n deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1.\n deltas[i] represents k potentially different class-specific\n box transformations for the single box boxes[i].\n boxes (Tensor): boxes to transform, of shape (N, 4)\n \"\"\"\n deltas = deltas.float() # ensure fp32 for decoding precision\n boxes = boxes.to(deltas.dtype)\n\n widths = boxes[:, 2] - boxes[:, 0]\n heights = boxes[:, 3] - boxes[:, 1]\n ctr_x = boxes[:, 0] + 0.5 * widths\n ctr_y = boxes[:, 1] + 0.5 * heights\n\n wx, wy, ww, wh = self.weights\n dx = deltas[:, 0::4] / wx\n dy = deltas[:, 1::4] / wy\n dw = deltas[:, 2::4] / ww\n dh = deltas[:, 3::4] / wh\n\n # Prevent sending too large values into torch.exp()\n dw = torch.clamp(dw, max=self.scale_clamp)\n dh = torch.clamp(dh, max=self.scale_clamp)\n\n pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]\n pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]\n pred_w = torch.exp(dw) * widths[:, None]\n pred_h = torch.exp(dh) * heights[:, None]\n\n x1 = pred_ctr_x - 0.5 * pred_w\n y1 = pred_ctr_y - 0.5 * pred_h\n x2 = pred_ctr_x + 0.5 * pred_w\n y2 = pred_ctr_y + 0.5 * pred_h\n pred_boxes = torch.stack((x1, y1, x2, y2), dim=-1)\n return pred_boxes.reshape(deltas.shape)" }, { "identifier": "_dense_box_regression_loss", "path": "nativedancer/third_part/detectron2/modeling/box_regression.py", "snippet": "def _dense_box_regression_loss(\n anchors: List[Union[Boxes, torch.Tensor]],\n box2box_transform: Box2BoxTransform,\n pred_anchor_deltas: List[torch.Tensor],\n gt_boxes: List[torch.Tensor],\n fg_mask: torch.Tensor,\n box_reg_loss_type=\"smooth_l1\",\n smooth_l1_beta=0.0,\n):\n \"\"\"\n Compute loss for dense multi-level box regression.\n Loss is accumulated over ``fg_mask``.\n\n Args:\n anchors: #lvl anchor boxes, each is (HixWixA, 4)\n pred_anchor_deltas: #lvl predictions, each is (N, HixWixA, 4)\n gt_boxes: N ground truth boxes, each has shape (R, 4) (R = sum(Hi * Wi * A))\n fg_mask: the foreground boolean mask of shape (N, R) to compute loss on\n box_reg_loss_type (str): Loss type to use. Supported losses: \"smooth_l1\", \"giou\",\n \"diou\", \"ciou\".\n smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to\n use L1 loss. Only used when `box_reg_loss_type` is \"smooth_l1\"\n \"\"\"\n if isinstance(anchors[0], Boxes):\n anchors = type(anchors[0]).cat(anchors).tensor # (R, 4)\n else:\n anchors = cat(anchors)\n if box_reg_loss_type == \"smooth_l1\":\n gt_anchor_deltas = [box2box_transform.get_deltas(anchors, k) for k in gt_boxes]\n gt_anchor_deltas = torch.stack(gt_anchor_deltas) # (N, R, 4)\n loss_box_reg = smooth_l1_loss(\n cat(pred_anchor_deltas, dim=1)[fg_mask],\n gt_anchor_deltas[fg_mask],\n beta=smooth_l1_beta,\n reduction=\"sum\",\n )\n elif box_reg_loss_type == \"giou\":\n pred_boxes = [\n box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)\n ]\n loss_box_reg = giou_loss(\n torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction=\"sum\"\n )\n elif box_reg_loss_type == \"diou\":\n pred_boxes = [\n box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)\n ]\n loss_box_reg = diou_loss(\n torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction=\"sum\"\n )\n elif box_reg_loss_type == \"ciou\":\n pred_boxes = [\n box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)\n ]\n loss_box_reg = ciou_loss(\n torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction=\"sum\"\n )\n else:\n raise ValueError(f\"Invalid dense box regression loss type '{box_reg_loss_type}'\")\n return loss_box_reg" }, { "identifier": "Matcher", "path": "nativedancer/third_part/detectron2/modeling/matcher.py", "snippet": "class Matcher:\n \"\"\"\n This class assigns to each predicted \"element\" (e.g., a box) a ground-truth\n element. Each predicted element will have exactly zero or one matches; each\n ground-truth element may be matched to zero or more predicted elements.\n\n The matching is determined by the MxN match_quality_matrix, that characterizes\n how well each (ground-truth, prediction)-pair match each other. For example,\n if the elements are boxes, this matrix may contain box intersection-over-union\n overlap values.\n\n The matcher returns (a) a vector of length N containing the index of the\n ground-truth element m in [0, M) that matches to prediction n in [0, N).\n (b) a vector of length N containing the labels for each prediction.\n \"\"\"\n\n def __init__(\n self, thresholds: List[float], labels: List[int], allow_low_quality_matches: bool = False\n ):\n \"\"\"\n Args:\n thresholds (list): a list of thresholds used to stratify predictions\n into levels.\n labels (list): a list of values to label predictions belonging at\n each level. A label can be one of {-1, 0, 1} signifying\n {ignore, negative class, positive class}, respectively.\n allow_low_quality_matches (bool): if True, produce additional matches\n for predictions with maximum match quality lower than high_threshold.\n See set_low_quality_matches_ for more details.\n\n For example,\n thresholds = [0.3, 0.5]\n labels = [0, -1, 1]\n All predictions with iou < 0.3 will be marked with 0 and\n thus will be considered as false positives while training.\n All predictions with 0.3 <= iou < 0.5 will be marked with -1 and\n thus will be ignored.\n All predictions with 0.5 <= iou will be marked with 1 and\n thus will be considered as true positives.\n \"\"\"\n # Add -inf and +inf to first and last position in thresholds\n thresholds = thresholds[:]\n assert thresholds[0] > 0\n thresholds.insert(0, -float(\"inf\"))\n thresholds.append(float(\"inf\"))\n # Currently torchscript does not support all + generator\n assert all([low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:])])\n assert all([l in [-1, 0, 1] for l in labels])\n assert len(labels) == len(thresholds) - 1\n self.thresholds = thresholds\n self.labels = labels\n self.allow_low_quality_matches = allow_low_quality_matches\n\n def __call__(self, match_quality_matrix):\n \"\"\"\n Args:\n match_quality_matrix (Tensor[float]): an MxN tensor, containing the\n pairwise quality between M ground-truth elements and N predicted\n elements. All elements must be >= 0 (due to the us of `torch.nonzero`\n for selecting indices in :meth:`set_low_quality_matches_`).\n\n Returns:\n matches (Tensor[int64]): a vector of length N, where matches[i] is a matched\n ground-truth index in [0, M)\n match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates\n whether a prediction is a true or false positive or ignored\n \"\"\"\n assert match_quality_matrix.dim() == 2\n if match_quality_matrix.numel() == 0:\n default_matches = match_quality_matrix.new_full(\n (match_quality_matrix.size(1),), 0, dtype=torch.int64\n )\n # When no gt boxes exist, we define IOU = 0 and therefore set labels\n # to `self.labels[0]`, which usually defaults to background class 0\n # To choose to ignore instead, can make labels=[-1,0,-1,1] + set appropriate thresholds\n default_match_labels = match_quality_matrix.new_full(\n (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8\n )\n return default_matches, default_match_labels\n\n assert torch.all(match_quality_matrix >= 0)\n\n # match_quality_matrix is M (gt) x N (predicted)\n # Max over gt elements (dim 0) to find best gt candidate for each prediction\n matched_vals, matches = match_quality_matrix.max(dim=0)\n\n match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8)\n\n for (l, low, high) in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]):\n low_high = (matched_vals >= low) & (matched_vals < high)\n match_labels[low_high] = l\n\n if self.allow_low_quality_matches:\n self.set_low_quality_matches_(match_labels, match_quality_matrix)\n\n return matches, match_labels\n\n def set_low_quality_matches_(self, match_labels, match_quality_matrix):\n \"\"\"\n Produce additional matches for predictions that have only low-quality matches.\n Specifically, for each ground-truth G find the set of predictions that have\n maximum overlap with it (including ties); for each prediction in that set, if\n it is unmatched, then match it to the ground-truth G.\n\n This function implements the RPN assignment case (i) in Sec. 3.1.2 of\n :paper:`Faster R-CNN`.\n \"\"\"\n # For each gt, find the prediction with which it has highest quality\n highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)\n # Find the highest quality match available, even if it is low, including ties.\n # Note that the matches qualities must be positive due to the use of\n # `torch.nonzero`.\n _, pred_inds_with_highest_quality = nonzero_tuple(\n match_quality_matrix == highest_quality_foreach_gt[:, None]\n )\n # If an anchor was labeled positive only due to a low-quality match\n # with gt_A, but it has larger overlap with gt_B, it's matched index will still be gt_B.\n # This follows the implementation in Detectron, and is found to have no significant impact.\n match_labels[pred_inds_with_highest_quality] = 1" }, { "identifier": "META_ARCH_REGISTRY", "path": "nativedancer/third_part/detectron2/modeling/meta_arch/build.py", "snippet": "META_ARCH_REGISTRY = Registry(\"META_ARCH\") # noqa F401 isort:skip" }, { "identifier": "DenseDetector", "path": "nativedancer/third_part/detectron2/modeling/meta_arch/dense_detector.py", "snippet": "class DenseDetector(nn.Module):\n \"\"\"\n Base class for dense detector. We define a dense detector as a fully-convolutional model that\n makes per-pixel (i.e. dense) predictions.\n \"\"\"\n\n def __init__(\n self,\n backbone: Backbone,\n head: nn.Module,\n head_in_features: Optional[List[str]] = None,\n *,\n pixel_mean,\n pixel_std,\n ):\n \"\"\"\n Args:\n backbone: backbone module\n head: head module\n head_in_features: backbone features to use in head. Default to all backbone features.\n pixel_mean (Tuple[float]):\n Values to be used for image normalization (BGR order).\n To train on images of different number of channels, set different mean & std.\n Default values are the mean pixel value from ImageNet: [103.53, 116.28, 123.675]\n pixel_std (Tuple[float]):\n When using pre-trained models in Detectron1 or any MSRA models,\n std has been absorbed into its conv1 weights, so the std needs to be set 1.\n Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std)\n \"\"\"\n super().__init__()\n\n self.backbone = backbone\n self.head = head\n if head_in_features is None:\n shapes = self.backbone.output_shape()\n self.head_in_features = sorted(shapes.keys(), key=lambda x: shapes[x].stride)\n else:\n self.head_in_features = head_in_features\n self.register_buffer(\"pixel_mean\", torch.tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self):\n return self.pixel_mean.device\n\n def _move_to_current_device(self, x):\n return move_device_like(x, self.pixel_mean)\n\n def forward(self, batched_inputs: List[Dict[str, Tensor]]):\n \"\"\"\n Args:\n batched_inputs: a list, batched outputs of :class:`DatasetMapper` .\n Each item in the list contains the inputs for one image.\n For now, each item in the list is a dict that contains:\n\n * image: Tensor, image in (C, H, W) format.\n * instances: Instances\n\n Other information that's included in the original dicts, such as:\n\n * \"height\", \"width\" (int): the output resolution of the model, used in inference.\n See :meth:`postprocess` for details.\n\n Returns:\n In training, dict[str, Tensor]: mapping from a named loss to a tensor storing the\n loss. Used during training only. In inference, the standard output format, described\n in :doc:`/tutorials/models`.\n \"\"\"\n images = self.preprocess_image(batched_inputs)\n features = self.backbone(images.tensor)\n features = [features[f] for f in self.head_in_features]\n predictions = self.head(features)\n\n if self.training:\n assert not torch.jit.is_scripting(), \"Not supported\"\n assert \"instances\" in batched_inputs[0], \"Instance annotations are missing in training!\"\n gt_instances = [x[\"instances\"].to(self.device) for x in batched_inputs]\n return self.forward_training(images, features, predictions, gt_instances)\n else:\n results = self.forward_inference(images, features, predictions)\n if torch.jit.is_scripting():\n return results\n\n processed_results = []\n for results_per_image, input_per_image, image_size in zip(\n results, batched_inputs, images.image_sizes\n ):\n height = input_per_image.get(\"height\", image_size[0])\n width = input_per_image.get(\"width\", image_size[1])\n r = detector_postprocess(results_per_image, height, width)\n processed_results.append({\"instances\": r})\n return processed_results\n\n def forward_training(self, images, features, predictions, gt_instances):\n raise NotImplementedError()\n\n def preprocess_image(self, batched_inputs: List[Dict[str, Tensor]]):\n \"\"\"\n Normalize, pad and batch the input images.\n \"\"\"\n images = [self._move_to_current_device(x[\"image\"]) for x in batched_inputs]\n images = [(x - self.pixel_mean) / self.pixel_std for x in images]\n images = ImageList.from_tensors(\n images,\n self.backbone.size_divisibility,\n padding_constraints=self.backbone.padding_constraints,\n )\n return images\n\n def _transpose_dense_predictions(\n self, predictions: List[List[Tensor]], dims_per_anchor: List[int]\n ) -> List[List[Tensor]]:\n \"\"\"\n Transpose the dense per-level predictions.\n\n Args:\n predictions: a list of outputs, each is a list of per-level\n predictions with shape (N, Ai x K, Hi, Wi), where N is the\n number of images, Ai is the number of anchors per location on\n level i, K is the dimension of predictions per anchor.\n dims_per_anchor: the value of K for each predictions. e.g. 4 for\n box prediction, #classes for classification prediction.\n\n Returns:\n List[List[Tensor]]: each prediction is transposed to (N, Hi x Wi x Ai, K).\n \"\"\"\n assert len(predictions) == len(dims_per_anchor)\n res: List[List[Tensor]] = []\n for pred, dim_per_anchor in zip(predictions, dims_per_anchor):\n pred = [permute_to_N_HWA_K(x, dim_per_anchor) for x in pred]\n res.append(pred)\n return res\n\n def _ema_update(self, name: str, value: float, initial_value: float, momentum: float = 0.9):\n \"\"\"\n Apply EMA update to `self.name` using `value`.\n\n This is mainly used for loss normalizer. In Detectron1, loss is normalized by number\n of foreground samples in the batch. When batch size is 1 per GPU, #foreground has a\n large variance and using it lead to lower performance. Therefore we maintain an EMA of\n #foreground to stabilize the normalizer.\n\n Args:\n name: name of the normalizer\n value: the new value to update\n initial_value: the initial value to start with\n momentum: momentum of EMA\n\n Returns:\n float: the updated EMA value\n \"\"\"\n if hasattr(self, name):\n old = getattr(self, name)\n else:\n old = initial_value\n new = old * momentum + value * (1 - momentum)\n setattr(self, name, new)\n return new\n\n def _decode_per_level_predictions(\n self,\n anchors: Boxes,\n pred_scores: Tensor,\n pred_deltas: Tensor,\n score_thresh: float,\n topk_candidates: int,\n image_size: Tuple[int, int],\n ) -> Instances:\n \"\"\"\n Decode boxes and classification predictions of one featuer level, by\n the following steps:\n 1. filter the predictions based on score threshold and top K scores.\n 2. transform the box regression outputs\n 3. return the predicted scores, classes and boxes\n\n Args:\n anchors: Boxes, anchor for this feature level\n pred_scores: HxWxA,K\n pred_deltas: HxWxA,4\n\n Returns:\n Instances: with field \"scores\", \"pred_boxes\", \"pred_classes\".\n \"\"\"\n # Apply two filtering to make NMS faster.\n # 1. Keep boxes with confidence score higher than threshold\n keep_idxs = pred_scores > score_thresh\n pred_scores = pred_scores[keep_idxs]\n topk_idxs = torch.nonzero(keep_idxs) # Kx2\n\n # 2. Keep top k top scoring boxes only\n topk_idxs_size = topk_idxs.shape[0]\n if isinstance(topk_idxs_size, Tensor):\n # It's a tensor in tracing\n num_topk = torch.clamp(topk_idxs_size, max=topk_candidates)\n else:\n num_topk = min(topk_idxs_size, topk_candidates)\n pred_scores, idxs = pred_scores.topk(num_topk)\n topk_idxs = topk_idxs[idxs]\n\n anchor_idxs, classes_idxs = topk_idxs.unbind(dim=1)\n\n pred_boxes = self.box2box_transform.apply_deltas(\n pred_deltas[anchor_idxs], anchors.tensor[anchor_idxs]\n )\n return Instances(\n image_size, pred_boxes=Boxes(pred_boxes), scores=pred_scores, pred_classes=classes_idxs\n )\n\n def _decode_multi_level_predictions(\n self,\n anchors: List[Boxes],\n pred_scores: List[Tensor],\n pred_deltas: List[Tensor],\n score_thresh: float,\n topk_candidates: int,\n image_size: Tuple[int, int],\n ) -> Instances:\n \"\"\"\n Run `_decode_per_level_predictions` for all feature levels and concat the results.\n \"\"\"\n predictions = [\n self._decode_per_level_predictions(\n anchors_i,\n box_cls_i,\n box_reg_i,\n score_thresh,\n topk_candidates,\n image_size,\n )\n # Iterate over every feature level\n for box_cls_i, box_reg_i, anchors_i in zip(pred_scores, pred_deltas, anchors)\n ]\n return predictions[0].cat(predictions) # 'Instances.cat' is not scriptale but this is\n\n def visualize_training(self, batched_inputs, results):\n \"\"\"\n A function used to visualize ground truth images and final network predictions.\n It shows ground truth bounding boxes on the original image and up to 20\n predicted object bounding boxes on the original image.\n\n Args:\n batched_inputs (list): a list that contains input to the model.\n results (List[Instances]): a list of #images elements returned by forward_inference().\n \"\"\"\n from detectron2.utils.visualizer import Visualizer\n\n assert len(batched_inputs) == len(\n results\n ), \"Cannot visualize inputs and results of different sizes\"\n storage = get_event_storage()\n max_boxes = 20\n\n image_index = 0 # only visualize a single image\n img = batched_inputs[image_index][\"image\"]\n img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)\n v_gt = Visualizer(img, None)\n v_gt = v_gt.overlay_instances(boxes=batched_inputs[image_index][\"instances\"].gt_boxes)\n anno_img = v_gt.get_image()\n processed_results = detector_postprocess(results[image_index], img.shape[0], img.shape[1])\n predicted_boxes = processed_results.pred_boxes.tensor.detach().cpu().numpy()\n\n v_pred = Visualizer(img, None)\n v_pred = v_pred.overlay_instances(boxes=predicted_boxes[0:max_boxes])\n prop_img = v_pred.get_image()\n vis_img = np.vstack((anno_img, prop_img))\n vis_img = vis_img.transpose(2, 0, 1)\n vis_name = f\"Top: GT bounding boxes; Bottom: {max_boxes} Highest Scoring Results\"\n storage.put_image(vis_name, vis_img)" }, { "identifier": "permute_to_N_HWA_K", "path": "nativedancer/third_part/detectron2/modeling/meta_arch/dense_detector.py", "snippet": "def permute_to_N_HWA_K(tensor, K: int):\n \"\"\"\n Transpose/reshape a tensor from (N, (Ai x K), H, W) to (N, (HxWxAi), K)\n \"\"\"\n assert tensor.dim() == 4, tensor.shape\n N, _, H, W = tensor.shape\n tensor = tensor.view(N, -1, K, H, W)\n tensor = tensor.permute(0, 3, 4, 1, 2)\n tensor = tensor.reshape(N, -1, K) # Size=(N,HWA,K)\n return tensor" } ]
import logging import math import torch from typing import List, Tuple from fvcore.nn import sigmoid_focal_loss_jit from torch import Tensor, nn from torch.nn import functional as F from ...config import configurable from ...layers import CycleBatchNormList, ShapeSpec, batched_nms, cat, get_norm from ...structures import Boxes, ImageList, Instances, pairwise_iou from ...utils.events import get_event_storage from ..anchor_generator import build_anchor_generator from ..backbone import Backbone, build_backbone from ..box_regression import Box2BoxTransform, _dense_box_regression_loss from ..matcher import Matcher from .build import META_ARCH_REGISTRY from .dense_detector import DenseDetector, permute_to_N_HWA_K # noqa
16,303
backbone: a backbone module, must follow detectron2's backbone interface head (nn.Module): a module that predicts logits and regression deltas for each level from a list of per-level features head_in_features (Tuple[str]): Names of the input feature maps to be used in head anchor_generator (nn.Module): a module that creates anchors from a list of features. Usually an instance of :class:`AnchorGenerator` box2box_transform (Box2BoxTransform): defines the transform from anchors boxes to instance boxes anchor_matcher (Matcher): label the anchors by matching them with ground truth. num_classes (int): number of classes. Used to label background proposals. # Loss parameters: focal_loss_alpha (float): focal_loss_alpha focal_loss_gamma (float): focal_loss_gamma smooth_l1_beta (float): smooth_l1_beta box_reg_loss_type (str): Options are "smooth_l1", "giou", "diou", "ciou" # Inference parameters: test_score_thresh (float): Inference cls score threshold, only anchors with score > INFERENCE_TH are considered for inference (to improve speed) test_topk_candidates (int): Select topk candidates before NMS test_nms_thresh (float): Overlap threshold used for non-maximum suppression (suppress boxes with IoU >= this threshold) max_detections_per_image (int): Maximum number of detections to return per image during inference (100 is based on the limit established for the COCO dataset). pixel_mean, pixel_std: see :class:`DenseDetector`. """ super().__init__( backbone, head, head_in_features, pixel_mean=pixel_mean, pixel_std=pixel_std ) self.num_classes = num_classes # Anchors self.anchor_generator = anchor_generator self.box2box_transform = box2box_transform self.anchor_matcher = anchor_matcher # Loss parameters: self.focal_loss_alpha = focal_loss_alpha self.focal_loss_gamma = focal_loss_gamma self.smooth_l1_beta = smooth_l1_beta self.box_reg_loss_type = box_reg_loss_type # Inference parameters: self.test_score_thresh = test_score_thresh self.test_topk_candidates = test_topk_candidates self.test_nms_thresh = test_nms_thresh self.max_detections_per_image = max_detections_per_image # Vis parameters self.vis_period = vis_period self.input_format = input_format @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) backbone_shape = backbone.output_shape() feature_shapes = [backbone_shape[f] for f in cfg.MODEL.RETINANET.IN_FEATURES] head = RetinaNetHead(cfg, feature_shapes) anchor_generator = build_anchor_generator(cfg, feature_shapes) return { "backbone": backbone, "head": head, "anchor_generator": anchor_generator, "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.RETINANET.BBOX_REG_WEIGHTS), "anchor_matcher": Matcher( cfg.MODEL.RETINANET.IOU_THRESHOLDS, cfg.MODEL.RETINANET.IOU_LABELS, allow_low_quality_matches=True, ), "pixel_mean": cfg.MODEL.PIXEL_MEAN, "pixel_std": cfg.MODEL.PIXEL_STD, "num_classes": cfg.MODEL.RETINANET.NUM_CLASSES, "head_in_features": cfg.MODEL.RETINANET.IN_FEATURES, # Loss parameters: "focal_loss_alpha": cfg.MODEL.RETINANET.FOCAL_LOSS_ALPHA, "focal_loss_gamma": cfg.MODEL.RETINANET.FOCAL_LOSS_GAMMA, "smooth_l1_beta": cfg.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA, "box_reg_loss_type": cfg.MODEL.RETINANET.BBOX_REG_LOSS_TYPE, # Inference parameters: "test_score_thresh": cfg.MODEL.RETINANET.SCORE_THRESH_TEST, "test_topk_candidates": cfg.MODEL.RETINANET.TOPK_CANDIDATES_TEST, "test_nms_thresh": cfg.MODEL.RETINANET.NMS_THRESH_TEST, "max_detections_per_image": cfg.TEST.DETECTIONS_PER_IMAGE, # Vis parameters "vis_period": cfg.VIS_PERIOD, "input_format": cfg.INPUT.FORMAT, } def forward_training(self, images, features, predictions, gt_instances): # Transpose the Hi*Wi*A dimension to the middle: pred_logits, pred_anchor_deltas = self._transpose_dense_predictions( predictions, [self.num_classes, 4] ) anchors = self.anchor_generator(features) gt_labels, gt_boxes = self.label_anchors(anchors, gt_instances) return self.losses(anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes) def losses(self, anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes): """ Args: anchors (list[Boxes]): a list of #feature level Boxes gt_labels, gt_boxes: see output of :meth:`RetinaNet.label_anchors`. Their shapes are (N, R) and (N, R, 4), respectively, where R is the total number of anchors across levels, i.e. sum(Hi x Wi x Ai) pred_logits, pred_anchor_deltas: both are list[Tensor]. Each element in the list corresponds to one level and has shape (N, Hi * Wi * Ai, K or 4). Where K is the number of classes used in `pred_logits`. Returns: dict[str, Tensor]: mapping from a named loss to a scalar tensor storing the loss. Used during training only. The dict keys are: "loss_cls" and "loss_box_reg" """ num_images = len(gt_labels) gt_labels = torch.stack(gt_labels) # (N, R) valid_mask = gt_labels >= 0 pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes) num_pos_anchors = pos_mask.sum().item()
# Copyright (c) Facebook, Inc. and its affiliates. __all__ = ["RetinaNet"] logger = logging.getLogger(__name__) @META_ARCH_REGISTRY.register() class RetinaNet(DenseDetector): """ Implement RetinaNet in :paper:`RetinaNet`. """ @configurable def __init__( self, *, backbone: Backbone, head: nn.Module, head_in_features, anchor_generator, box2box_transform, anchor_matcher, num_classes, focal_loss_alpha=0.25, focal_loss_gamma=2.0, smooth_l1_beta=0.0, box_reg_loss_type="smooth_l1", test_score_thresh=0.05, test_topk_candidates=1000, test_nms_thresh=0.5, max_detections_per_image=100, pixel_mean, pixel_std, vis_period=0, input_format="BGR", ): """ NOTE: this interface is experimental. Args: backbone: a backbone module, must follow detectron2's backbone interface head (nn.Module): a module that predicts logits and regression deltas for each level from a list of per-level features head_in_features (Tuple[str]): Names of the input feature maps to be used in head anchor_generator (nn.Module): a module that creates anchors from a list of features. Usually an instance of :class:`AnchorGenerator` box2box_transform (Box2BoxTransform): defines the transform from anchors boxes to instance boxes anchor_matcher (Matcher): label the anchors by matching them with ground truth. num_classes (int): number of classes. Used to label background proposals. # Loss parameters: focal_loss_alpha (float): focal_loss_alpha focal_loss_gamma (float): focal_loss_gamma smooth_l1_beta (float): smooth_l1_beta box_reg_loss_type (str): Options are "smooth_l1", "giou", "diou", "ciou" # Inference parameters: test_score_thresh (float): Inference cls score threshold, only anchors with score > INFERENCE_TH are considered for inference (to improve speed) test_topk_candidates (int): Select topk candidates before NMS test_nms_thresh (float): Overlap threshold used for non-maximum suppression (suppress boxes with IoU >= this threshold) max_detections_per_image (int): Maximum number of detections to return per image during inference (100 is based on the limit established for the COCO dataset). pixel_mean, pixel_std: see :class:`DenseDetector`. """ super().__init__( backbone, head, head_in_features, pixel_mean=pixel_mean, pixel_std=pixel_std ) self.num_classes = num_classes # Anchors self.anchor_generator = anchor_generator self.box2box_transform = box2box_transform self.anchor_matcher = anchor_matcher # Loss parameters: self.focal_loss_alpha = focal_loss_alpha self.focal_loss_gamma = focal_loss_gamma self.smooth_l1_beta = smooth_l1_beta self.box_reg_loss_type = box_reg_loss_type # Inference parameters: self.test_score_thresh = test_score_thresh self.test_topk_candidates = test_topk_candidates self.test_nms_thresh = test_nms_thresh self.max_detections_per_image = max_detections_per_image # Vis parameters self.vis_period = vis_period self.input_format = input_format @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) backbone_shape = backbone.output_shape() feature_shapes = [backbone_shape[f] for f in cfg.MODEL.RETINANET.IN_FEATURES] head = RetinaNetHead(cfg, feature_shapes) anchor_generator = build_anchor_generator(cfg, feature_shapes) return { "backbone": backbone, "head": head, "anchor_generator": anchor_generator, "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.RETINANET.BBOX_REG_WEIGHTS), "anchor_matcher": Matcher( cfg.MODEL.RETINANET.IOU_THRESHOLDS, cfg.MODEL.RETINANET.IOU_LABELS, allow_low_quality_matches=True, ), "pixel_mean": cfg.MODEL.PIXEL_MEAN, "pixel_std": cfg.MODEL.PIXEL_STD, "num_classes": cfg.MODEL.RETINANET.NUM_CLASSES, "head_in_features": cfg.MODEL.RETINANET.IN_FEATURES, # Loss parameters: "focal_loss_alpha": cfg.MODEL.RETINANET.FOCAL_LOSS_ALPHA, "focal_loss_gamma": cfg.MODEL.RETINANET.FOCAL_LOSS_GAMMA, "smooth_l1_beta": cfg.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA, "box_reg_loss_type": cfg.MODEL.RETINANET.BBOX_REG_LOSS_TYPE, # Inference parameters: "test_score_thresh": cfg.MODEL.RETINANET.SCORE_THRESH_TEST, "test_topk_candidates": cfg.MODEL.RETINANET.TOPK_CANDIDATES_TEST, "test_nms_thresh": cfg.MODEL.RETINANET.NMS_THRESH_TEST, "max_detections_per_image": cfg.TEST.DETECTIONS_PER_IMAGE, # Vis parameters "vis_period": cfg.VIS_PERIOD, "input_format": cfg.INPUT.FORMAT, } def forward_training(self, images, features, predictions, gt_instances): # Transpose the Hi*Wi*A dimension to the middle: pred_logits, pred_anchor_deltas = self._transpose_dense_predictions( predictions, [self.num_classes, 4] ) anchors = self.anchor_generator(features) gt_labels, gt_boxes = self.label_anchors(anchors, gt_instances) return self.losses(anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes) def losses(self, anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes): """ Args: anchors (list[Boxes]): a list of #feature level Boxes gt_labels, gt_boxes: see output of :meth:`RetinaNet.label_anchors`. Their shapes are (N, R) and (N, R, 4), respectively, where R is the total number of anchors across levels, i.e. sum(Hi x Wi x Ai) pred_logits, pred_anchor_deltas: both are list[Tensor]. Each element in the list corresponds to one level and has shape (N, Hi * Wi * Ai, K or 4). Where K is the number of classes used in `pred_logits`. Returns: dict[str, Tensor]: mapping from a named loss to a scalar tensor storing the loss. Used during training only. The dict keys are: "loss_cls" and "loss_box_reg" """ num_images = len(gt_labels) gt_labels = torch.stack(gt_labels) # (N, R) valid_mask = gt_labels >= 0 pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes) num_pos_anchors = pos_mask.sum().item()
get_event_storage().put_scalar("num_pos_anchors", num_pos_anchors / num_images)
10
2023-12-10 20:14:00+00:00
24k
mkang315/ASF-YOLO
segment/val.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True):\n # Usage:\n # PyTorch: weights = *.pt\n # TorchScript: *.torchscript\n # ONNX Runtime: *.onnx\n # ONNX OpenCV DNN: *.onnx --dnn\n # OpenVINO: *_openvino_model\n # CoreML: *.mlmodel\n # TensorRT: *.engine\n # TensorFlow SavedModel: *_saved_model\n # TensorFlow GraphDef: *.pb\n # TensorFlow Lite: *.tflite\n # TensorFlow Edge TPU: *_edgetpu.tflite\n # PaddlePaddle: *_paddle_model\n from models.experimental import attempt_download, attempt_load # scoped to avoid circular import\n\n super().__init__()\n w = str(weights[0] if isinstance(weights, list) else weights)\n pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)\n fp16 &= pt or jit or onnx or engine # FP16\n nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)\n stride = 32 # default stride\n cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA\n if not (pt or triton):\n w = attempt_download(w) # download if not local\n\n if pt: # PyTorch\n model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)\n stride = max(int(model.stride.max()), 32) # model stride\n names = model.module.names if hasattr(model, 'module') else model.names # get class names\n model.half() if fp16 else model.float()\n self.model = model # explicitly assign for to(), cpu(), cuda(), half()\n elif jit: # TorchScript\n LOGGER.info(f'Loading {w} for TorchScript inference...')\n extra_files = {'config.txt': ''} # model metadata\n model = torch.jit.load(w, _extra_files=extra_files, map_location=device)\n model.half() if fp16 else model.float()\n if extra_files['config.txt']: # load metadata dict\n d = json.loads(extra_files['config.txt'],\n object_hook=lambda d: {int(k) if k.isdigit() else k: v\n for k, v in d.items()})\n stride, names = int(d['stride']), d['names']\n elif dnn: # ONNX OpenCV DNN\n LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')\n check_requirements('opencv-python>=4.5.4')\n net = cv2.dnn.readNetFromONNX(w)\n elif onnx: # ONNX Runtime\n LOGGER.info(f'Loading {w} for ONNX Runtime inference...')\n check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))\n import onnxruntime\n providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']\n session = onnxruntime.InferenceSession(w, providers=providers)\n output_names = [x.name for x in session.get_outputs()]\n meta = session.get_modelmeta().custom_metadata_map # metadata\n if 'stride' in meta:\n stride, names = int(meta['stride']), eval(meta['names'])\n elif xml: # OpenVINO\n LOGGER.info(f'Loading {w} for OpenVINO inference...')\n check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/\n from openvino.runtime import Core, Layout, get_batch\n ie = Core()\n if not Path(w).is_file(): # if not *.xml\n w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir\n network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))\n if network.get_parameters()[0].get_layout().empty:\n network.get_parameters()[0].set_layout(Layout(\"NCHW\"))\n batch_dim = get_batch(network)\n if batch_dim.is_static:\n batch_size = batch_dim.get_length()\n executable_network = ie.compile_model(network, device_name=\"CPU\") # device_name=\"MYRIAD\" for Intel NCS2\n stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata\n elif engine: # TensorRT\n LOGGER.info(f'Loading {w} for TensorRT inference...')\n import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download\n check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0\n if device.type == 'cpu':\n device = torch.device('cuda:0')\n Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))\n logger = trt.Logger(trt.Logger.INFO)\n with open(w, 'rb') as f, trt.Runtime(logger) as runtime:\n model = runtime.deserialize_cuda_engine(f.read())\n context = model.create_execution_context()\n bindings = OrderedDict()\n output_names = []\n fp16 = False # default updated below\n dynamic = False\n for i in range(model.num_bindings):\n name = model.get_binding_name(i)\n dtype = trt.nptype(model.get_binding_dtype(i))\n if model.binding_is_input(i):\n if -1 in tuple(model.get_binding_shape(i)): # dynamic\n dynamic = True\n context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2]))\n if dtype == np.float16:\n fp16 = True\n else: # output\n output_names.append(name)\n shape = tuple(context.get_binding_shape(i))\n im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)\n bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))\n binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())\n batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size\n elif coreml: # CoreML\n LOGGER.info(f'Loading {w} for CoreML inference...')\n import coremltools as ct\n model = ct.models.MLModel(w)\n elif saved_model: # TF SavedModel\n LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')\n import tensorflow as tf\n keras = False # assume TF1 saved_model\n model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)\n elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt\n LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')\n import tensorflow as tf\n\n def wrap_frozen_graph(gd, inputs, outputs):\n x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=\"\"), []) # wrapped\n ge = x.graph.as_graph_element\n return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))\n\n def gd_outputs(gd):\n name_list, input_list = [], []\n for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef\n name_list.append(node.name)\n input_list.extend(node.input)\n return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp'))\n\n gd = tf.Graph().as_graph_def() # TF GraphDef\n with open(w, 'rb') as f:\n gd.ParseFromString(f.read())\n frozen_func = wrap_frozen_graph(gd, inputs=\"x:0\", outputs=gd_outputs(gd))\n elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python\n try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu\n from tflite_runtime.interpreter import Interpreter, load_delegate\n except ImportError:\n import tensorflow as tf\n Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,\n if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime\n LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')\n delegate = {\n 'Linux': 'libedgetpu.so.1',\n 'Darwin': 'libedgetpu.1.dylib',\n 'Windows': 'edgetpu.dll'}[platform.system()]\n interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])\n else: # TFLite\n LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')\n interpreter = Interpreter(model_path=w) # load TFLite model\n interpreter.allocate_tensors() # allocate\n input_details = interpreter.get_input_details() # inputs\n output_details = interpreter.get_output_details() # outputs\n # load metadata\n with contextlib.suppress(zipfile.BadZipFile):\n with zipfile.ZipFile(w, \"r\") as model:\n meta_file = model.namelist()[0]\n meta = ast.literal_eval(model.read(meta_file).decode(\"utf-8\"))\n stride, names = int(meta['stride']), meta['names']\n elif tfjs: # TF.js\n raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported')\n elif paddle: # PaddlePaddle\n LOGGER.info(f'Loading {w} for PaddlePaddle inference...')\n check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')\n import paddle.inference as pdi\n if not Path(w).is_file(): # if not *.pdmodel\n w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir\n weights = Path(w).with_suffix('.pdiparams')\n config = pdi.Config(str(w), str(weights))\n if cuda:\n config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)\n predictor = pdi.create_predictor(config)\n input_handle = predictor.get_input_handle(predictor.get_input_names()[0])\n output_names = predictor.get_output_names()\n elif triton: # NVIDIA Triton Inference Server\n LOGGER.info(f'Using {w} as Triton Inference Server...')\n check_requirements('tritonclient[all]')\n from utils.triton import TritonRemoteModel\n model = TritonRemoteModel(url=w)\n nhwc = model.runtime.startswith(\"tensorflow\")\n else:\n raise NotImplementedError(f'ERROR: {w} is not a supported format')\n\n # class names\n if 'names' not in locals():\n names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)}\n if names[0] == 'n01440764' and len(names) == 1000: # ImageNet\n names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names\n\n self.__dict__.update(locals()) # assign all variables to self\n\n def forward(self, im, augment=False, visualize=False):\n # YOLOv5 MultiBackend inference\n b, ch, h, w = im.shape # batch, channel, height, width\n if self.fp16 and im.dtype != torch.float16:\n im = im.half() # to FP16\n if self.nhwc:\n im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)\n\n if self.pt: # PyTorch\n y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)\n elif self.jit: # TorchScript\n y = self.model(im)\n elif self.dnn: # ONNX OpenCV DNN\n im = im.cpu().numpy() # torch to numpy\n self.net.setInput(im)\n y = self.net.forward()\n elif self.onnx: # ONNX Runtime\n im = im.cpu().numpy() # torch to numpy\n y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})\n elif self.xml: # OpenVINO\n im = im.cpu().numpy() # FP32\n y = list(self.executable_network([im]).values())\n elif self.engine: # TensorRT\n if self.dynamic and im.shape != self.bindings['images'].shape:\n i = self.model.get_binding_index('images')\n self.context.set_binding_shape(i, im.shape) # reshape if dynamic\n self.bindings['images'] = self.bindings['images']._replace(shape=im.shape)\n for name in self.output_names:\n i = self.model.get_binding_index(name)\n self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))\n s = self.bindings['images'].shape\n assert im.shape == s, f\"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}\"\n self.binding_addrs['images'] = int(im.data_ptr())\n self.context.execute_v2(list(self.binding_addrs.values()))\n y = [self.bindings[x].data for x in sorted(self.output_names)]\n elif self.coreml: # CoreML\n im = im.cpu().numpy()\n im = Image.fromarray((im[0] * 255).astype('uint8'))\n # im = im.resize((192, 320), Image.ANTIALIAS)\n y = self.model.predict({'image': im}) # coordinates are xywh normalized\n if 'confidence' in y:\n box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels\n conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)\n y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)\n else:\n y = list(reversed(y.values())) # reversed for segmentation models (pred, proto)\n elif self.paddle: # PaddlePaddle\n im = im.cpu().numpy().astype(np.float32)\n self.input_handle.copy_from_cpu(im)\n self.predictor.run()\n y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]\n elif self.triton: # NVIDIA Triton Inference Server\n y = self.model(im)\n else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)\n im = im.cpu().numpy()\n if self.saved_model: # SavedModel\n y = self.model(im, training=False) if self.keras else self.model(im)\n elif self.pb: # GraphDef\n y = self.frozen_func(x=self.tf.constant(im))\n else: # Lite or Edge TPU\n input = self.input_details[0]\n int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model\n if int8:\n scale, zero_point = input['quantization']\n im = (im / scale + zero_point).astype(np.uint8) # de-scale\n self.interpreter.set_tensor(input['index'], im)\n self.interpreter.invoke()\n y = []\n for output in self.output_details:\n x = self.interpreter.get_tensor(output['index'])\n if int8:\n scale, zero_point = output['quantization']\n x = (x.astype(np.float32) - zero_point) * scale # re-scale\n y.append(x)\n y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]\n y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels\n\n if isinstance(y, (list, tuple)):\n return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]\n else:\n return self.from_numpy(y)\n\n def from_numpy(self, x):\n return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x\n\n def warmup(self, imgsz=(1, 3, 640, 640)):\n # Warmup model by running inference once\n warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton\n if any(warmup_types) and (self.device.type != 'cpu' or self.triton):\n im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input\n for _ in range(2 if self.jit else 1): #\n self.forward(im) # warmup\n\n @staticmethod\n def _model_type(p='path/to/model.pt'):\n # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx\n # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]\n from export import export_formats\n from utils.downloads import is_url\n sf = list(export_formats().Suffix) # export suffixes\n if not is_url(p, check=False):\n check_suffix(p, sf) # checks\n url = urlparse(p) # if url may be Triton inference server\n types = [s in Path(p).name for s in sf]\n types[8] &= not types[9] # tflite &= not edgetpu\n triton = not any(types) and all([any(s in url.scheme for s in [\"http\", \"grpc\"]), url.netloc])\n return types + [triton]\n\n @staticmethod\n def _load_metadata(f=Path('path/to/meta.yaml')):\n # Load metadata from meta.yaml if it exists\n if f.exists():\n d = yaml_load(f)\n return d['stride'], d['names'] # assign stride, names\n return None, None" }, { "identifier": "SegmentationModel", "path": "models/yolo.py", "snippet": "class SegmentationModel(DetectionModel):\n # YOLOv5 segmentation model\n def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None):\n super().__init__(cfg, ch, nc, anchors)" }, { "identifier": "Callbacks", "path": "utils/callbacks.py", "snippet": "class Callbacks:\n \"\"\"\"\n Handles all registered callbacks for YOLOv5 Hooks\n \"\"\"\n\n def __init__(self):\n # Define the available callbacks\n self._callbacks = {\n 'on_pretrain_routine_start': [],\n 'on_pretrain_routine_end': [],\n 'on_train_start': [],\n 'on_train_epoch_start': [],\n 'on_train_batch_start': [],\n 'optimizer_step': [],\n 'on_before_zero_grad': [],\n 'on_train_batch_end': [],\n 'on_train_epoch_end': [],\n 'on_val_start': [],\n 'on_val_batch_start': [],\n 'on_val_image_end': [],\n 'on_val_batch_end': [],\n 'on_val_end': [],\n 'on_fit_epoch_end': [], # fit = train + val\n 'on_model_save': [],\n 'on_train_end': [],\n 'on_params_update': [],\n 'teardown': [],}\n self.stop_training = False # set True to interrupt training\n\n def register_action(self, hook, name='', callback=None):\n \"\"\"\n Register a new action to a callback hook\n\n Args:\n hook: The callback hook name to register the action to\n name: The name of the action for later reference\n callback: The callback to fire\n \"\"\"\n assert hook in self._callbacks, f\"hook '{hook}' not found in callbacks {self._callbacks}\"\n assert callable(callback), f\"callback '{callback}' is not callable\"\n self._callbacks[hook].append({'name': name, 'callback': callback})\n\n def get_registered_actions(self, hook=None):\n \"\"\"\"\n Returns all the registered actions by callback hook\n\n Args:\n hook: The name of the hook to check, defaults to all\n \"\"\"\n return self._callbacks[hook] if hook else self._callbacks\n\n def run(self, hook, *args, thread=False, **kwargs):\n \"\"\"\n Loop through the registered actions and fire all callbacks on main thread\n\n Args:\n hook: The name of the hook to check, defaults to all\n args: Arguments to receive from YOLOv5\n thread: (boolean) Run callbacks in daemon thread\n kwargs: Keyword Arguments to receive from YOLOv5\n \"\"\"\n\n assert hook in self._callbacks, f\"hook '{hook}' not found in callbacks {self._callbacks}\"\n for logger in self._callbacks[hook]:\n if thread:\n threading.Thread(target=logger['callback'], args=args, kwargs=kwargs, daemon=True).start()\n else:\n logger['callback'](*args, **kwargs)" }, { "identifier": "LOGGER", "path": "utils/general.py", "snippet": "LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.)" }, { "identifier": "NUM_THREADS", "path": "utils/general.py", "snippet": "NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads" }, { "identifier": "TQDM_BAR_FORMAT", "path": "utils/general.py", "snippet": "TQDM_BAR_FORMAT = '{l_bar}{bar:10}| {n_fmt}/{total_fmt} {elapsed}' # tqdm bar format" }, { "identifier": "Profile", "path": "utils/general.py", "snippet": "class Profile(contextlib.ContextDecorator):\n # YOLOv5 Profile class. Usage: @Profile() decorator or 'with Profile():' context manager\n def __init__(self, t=0.0):\n self.t = t\n self.cuda = torch.cuda.is_available()\n\n def __enter__(self):\n self.start = self.time()\n return self\n\n def __exit__(self, type, value, traceback):\n self.dt = self.time() - self.start # delta-time\n self.t += self.dt # accumulate dt\n\n def time(self):\n if self.cuda:\n torch.cuda.synchronize()\n return time.time()" }, { "identifier": "check_dataset", "path": "utils/general.py", "snippet": "def check_dataset(data, autodownload=True):\n # Download, check and/or unzip dataset if not found locally\n\n # Download (optional)\n extract_dir = ''\n if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)):\n download(data, dir=f'{DATASETS_DIR}/{Path(data).stem}', unzip=True, delete=False, curl=False, threads=1)\n data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml'))\n extract_dir, autodownload = data.parent, False\n\n # Read yaml (optional)\n if isinstance(data, (str, Path)):\n data = yaml_load(data) # dictionary\n\n # Checks\n for k in 'train', 'val', 'names':\n assert k in data, emojis(f\"data.yaml '{k}:' field missing ❌\")\n if isinstance(data['names'], (list, tuple)): # old array format\n data['names'] = dict(enumerate(data['names'])) # convert to dict\n assert all(isinstance(k, int) for k in data['names'].keys()), 'data.yaml names keys must be integers, i.e. 2: car'\n data['nc'] = len(data['names'])\n\n # Resolve paths\n path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.'\n if not path.is_absolute():\n path = (ROOT / path).resolve()\n data['path'] = path # download scripts\n for k in 'train', 'val', 'test':\n if data.get(k): # prepend path\n if isinstance(data[k], str):\n x = (path / data[k]).resolve()\n if not x.exists() and data[k].startswith('../'):\n x = (path / data[k][3:]).resolve()\n data[k] = str(x)\n else:\n data[k] = [str((path / x).resolve()) for x in data[k]]\n\n # Parse yaml\n train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download'))\n if val:\n val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path\n if not all(x.exists() for x in val):\n LOGGER.info('\\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()])\n if not s or not autodownload:\n raise Exception('Dataset not found ❌')\n t = time.time()\n if s.startswith('http') and s.endswith('.zip'): # URL\n f = Path(s).name # filename\n LOGGER.info(f'Downloading {s} to {f}...')\n torch.hub.download_url_to_file(s, f)\n Path(DATASETS_DIR).mkdir(parents=True, exist_ok=True) # create root\n unzip_file(f, path=DATASETS_DIR) # unzip\n Path(f).unlink() # remove zip\n r = None # success\n elif s.startswith('bash '): # bash script\n LOGGER.info(f'Running {s} ...')\n r = os.system(s)\n else: # python script\n r = exec(s, {'yaml': data}) # return None\n dt = f'({round(time.time() - t, 1)}s)'\n s = f\"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}\" if r in (0, None) else f\"failure {dt} ❌\"\n LOGGER.info(f\"Dataset download {s}\")\n check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts\n return data # dictionary" }, { "identifier": "check_img_size", "path": "utils/general.py", "snippet": "def check_img_size(imgsz, s=32, floor=0):\n # Verify image size is a multiple of stride s in each dimension\n if isinstance(imgsz, int): # integer i.e. img_size=640\n new_size = max(make_divisible(imgsz, int(s)), floor)\n else: # list i.e. img_size=[640, 480]\n imgsz = list(imgsz) # convert to list if tuple\n new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]\n if new_size != imgsz:\n LOGGER.warning(f'WARNING ⚠️ --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')\n return new_size" }, { "identifier": "check_requirements", "path": "utils/general.py", "snippet": "@TryExcept()\ndef check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''):\n # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages or single package str)\n prefix = colorstr('red', 'bold', 'requirements:')\n check_python() # check python version\n if isinstance(requirements, Path): # requirements.txt file\n file = requirements.resolve()\n assert file.exists(), f\"{prefix} {file} not found, check failed.\"\n with file.open() as f:\n requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude]\n elif isinstance(requirements, str):\n requirements = [requirements]\n\n s = ''\n n = 0\n for r in requirements:\n try:\n pkg.require(r)\n except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met\n s += f'\"{r}\" '\n n += 1\n\n if s and install and AUTOINSTALL: # check environment variable\n LOGGER.info(f\"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...\")\n try:\n # assert check_online(), \"AutoUpdate skipped (offline)\"\n LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode())\n source = file if 'file' in locals() else requirements\n s = f\"{prefix} {n} package{'s' * (n > 1)} updated per {source}\\n\" \\\n f\"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\\n\"\n LOGGER.info(s)\n except Exception as e:\n LOGGER.warning(f'{prefix} ❌ {e}')" }, { "identifier": "check_yaml", "path": "utils/general.py", "snippet": "def check_yaml(file, suffix=('.yaml', '.yml')):\n # Search/download YAML file (if necessary) and return path, checking suffix\n return check_file(file, suffix)" }, { "identifier": "coco80_to_coco91_class", "path": "utils/general.py", "snippet": "def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)\n # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/\n # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\\n')\n # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\\n')\n # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco\n # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet\n return [\n 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,\n 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,\n 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]" }, { "identifier": "colorstr", "path": "utils/general.py", "snippet": "def colorstr(*input):\n # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')\n *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string\n colors = {\n 'black': '\\033[30m', # basic colors\n 'red': '\\033[31m',\n 'green': '\\033[32m',\n 'yellow': '\\033[33m',\n 'blue': '\\033[34m',\n 'magenta': '\\033[35m',\n 'cyan': '\\033[36m',\n 'white': '\\033[37m',\n 'bright_black': '\\033[90m', # bright colors\n 'bright_red': '\\033[91m',\n 'bright_green': '\\033[92m',\n 'bright_yellow': '\\033[93m',\n 'bright_blue': '\\033[94m',\n 'bright_magenta': '\\033[95m',\n 'bright_cyan': '\\033[96m',\n 'bright_white': '\\033[97m',\n 'end': '\\033[0m', # misc\n 'bold': '\\033[1m',\n 'underline': '\\033[4m'}\n return ''.join(colors[x] for x in args) + f'{string}' + colors['end']" }, { "identifier": "increment_path", "path": "utils/general.py", "snippet": "def increment_path(path, exist_ok=False, sep='', mkdir=False):\n # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.\n path = Path(path) # os-agnostic\n if path.exists() and not exist_ok:\n path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '')\n\n # Method 1\n for n in range(2, 9999):\n p = f'{path}{sep}{n}{suffix}' # increment path\n if not os.path.exists(p): #\n break\n path = Path(p)\n\n # Method 2 (deprecated)\n # dirs = glob.glob(f\"{path}{sep}*\") # similar paths\n # matches = [re.search(rf\"{path.stem}{sep}(\\d+)\", d) for d in dirs]\n # i = [int(m.groups()[0]) for m in matches if m] # indices\n # n = max(i) + 1 if i else 2 # increment number\n # path = Path(f\"{path}{sep}{n}{suffix}\") # increment path\n\n if mkdir:\n path.mkdir(parents=True, exist_ok=True) # make directory\n\n return path" }, { "identifier": "non_max_suppression", "path": "utils/general.py", "snippet": "def non_max_suppression(\n prediction,\n conf_thres=0.25,\n iou_thres=0.45,\n classes=None,\n agnostic=False,\n multi_label=False,\n labels=(),\n max_det=300,\n nm=0, # number of masks\n):\n \"\"\"Non-Maximum Suppression (NMS) on inference results to reject overlapping detections\n\n Returns:\n list of detections, on (n,6) tensor per image [xyxy, conf, cls]\n \"\"\"\n\n if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out)\n prediction = prediction[0] # select only inference output\n\n device = prediction.device\n mps = 'mps' in device.type # Apple MPS\n if mps: # MPS not fully supported yet, convert tensors to CPU before NMS\n prediction = prediction.cpu()\n bs = prediction.shape[0] # batch size\n nc = prediction.shape[2] - nm - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Checks\n assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'\n assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'\n\n # Settings\n # min_wh = 2 # (pixels) minimum box width and height\n max_wh = 7680 # (pixels) maximum box width and height\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 0.5 + 0.05 * bs # seconds to quit after\n redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n merge = False # use merge-NMS\n\n t = time.time()\n mi = 5 + nc # mask start index\n output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n lb = labels[xi]\n v = torch.zeros((len(lb), nc + nm + 5), device=x.device)\n v[:, :4] = lb[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box/Mask\n box = xywh2xyxy(x[:, :4]) # center_x, center_y, width, height) to (x1, y1, x2, y2)\n mask = x[:, mi:] # zero columns if no masks\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:mi] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, 5 + j, None], j[:, None].float(), mask[i]), 1)\n else: # best class only\n conf, j = x[:, 5:mi].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence\n else:\n x = x[x[:, 4].argsort(descending=True)] # sort by confidence\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n #i = my_soft_nms(boxes, scores, iou_thres) \n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n weights = iou * scores[None] # box weights\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n if redundant:\n i = i[iou.sum(1) > 1] # require redundancy\n\n output[xi] = x[i]\n if mps:\n output[xi] = output[xi].to(device)\n if (time.time() - t) > time_limit:\n LOGGER.warning(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded')\n break # time limit exceeded\n\n return output" }, { "identifier": "print_args", "path": "utils/general.py", "snippet": "def print_args(args: Optional[dict] = None, show_file=True, show_func=False):\n # Print function arguments (optional args dict)\n x = inspect.currentframe().f_back # previous frame\n file, _, func, _, _ = inspect.getframeinfo(x)\n if args is None: # get args automatically\n args, _, _, frm = inspect.getargvalues(x)\n args = {k: v for k, v in frm.items() if k in args}\n try:\n file = Path(file).resolve().relative_to(ROOT).with_suffix('')\n except ValueError:\n file = Path(file).stem\n s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '')\n LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items()))" }, { "identifier": "scale_boxes", "path": "utils/general.py", "snippet": "def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):\n # Rescale boxes (xyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0]\n pad = ratio_pad[1]\n\n boxes[:, [0, 2]] -= pad[0] # x padding\n boxes[:, [1, 3]] -= pad[1] # y padding\n boxes[:, :4] /= gain\n clip_boxes(boxes, img0_shape)\n return boxes" }, { "identifier": "xywh2xyxy", "path": "utils/general.py", "snippet": "def xywh2xyxy(x):\n # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x\n y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y\n y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x\n y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y\n return y" }, { "identifier": "xyxy2xywh", "path": "utils/general.py", "snippet": "def xyxy2xywh(x):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center\n y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center\n y[:, 2] = x[:, 2] - x[:, 0] # width\n y[:, 3] = x[:, 3] - x[:, 1] # height\n return y" }, { "identifier": "ConfusionMatrix", "path": "utils/metrics.py", "snippet": "class ConfusionMatrix:\n # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix\n def __init__(self, nc, conf=0.25, iou_thres=0.45):\n self.matrix = np.zeros((nc + 1, nc + 1))\n self.nc = nc # number of classes\n self.conf = conf\n self.iou_thres = iou_thres\n\n def process_batch(self, detections, labels):\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n detections (Array[N, 6]), x1, y1, x2, y2, conf, class\n labels (Array[M, 5]), class, x1, y1, x2, y2\n Returns:\n None, updates confusion matrix accordingly\n \"\"\"\n if detections is None:\n gt_classes = labels.int()\n for gc in gt_classes:\n self.matrix[self.nc, gc] += 1 # background FN\n return\n\n detections = detections[detections[:, 4] > self.conf]\n gt_classes = labels[:, 0].int()\n detection_classes = detections[:, 5].int()\n iou = box_iou(labels[:, 1:], detections[:, :4])\n\n x = torch.where(iou > self.iou_thres)\n if x[0].shape[0]:\n matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()\n if x[0].shape[0] > 1:\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 1], return_index=True)[1]]\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 0], return_index=True)[1]]\n else:\n matches = np.zeros((0, 3))\n\n n = matches.shape[0] > 0\n m0, m1, _ = matches.transpose().astype(int)\n for i, gc in enumerate(gt_classes):\n j = m0 == i\n if n and sum(j) == 1:\n self.matrix[detection_classes[m1[j]], gc] += 1 # correct\n else:\n self.matrix[self.nc, gc] += 1 # true background\n\n if n:\n for i, dc in enumerate(detection_classes):\n if not any(m1 == i):\n self.matrix[dc, self.nc] += 1 # predicted background\n\n def tp_fp(self):\n tp = self.matrix.diagonal() # true positives\n fp = self.matrix.sum(1) - tp # false positives\n # fn = self.matrix.sum(0) - tp # false negatives (missed detections)\n return tp[:-1], fp[:-1] # remove background class\n\n @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure')\n def plot(self, normalize=True, save_dir='', names=()):\n import seaborn as sn\n\n array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns\n array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)\n\n fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True)\n nc, nn = self.nc, len(names) # number of classes, names\n sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size\n labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels\n ticklabels = (names + ['background']) if labels else \"auto\"\n with warnings.catch_warnings():\n warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered\n sn.heatmap(array,\n ax=ax,\n annot=nc < 30,\n annot_kws={\n \"size\": 8},\n cmap='Blues',\n fmt='.2f',\n square=True,\n vmin=0.0,\n xticklabels=ticklabels,\n yticklabels=ticklabels).set_facecolor((1, 1, 1))\n ax.set_ylabel('True')\n ax.set_ylabel('Predicted')\n ax.set_title('Confusion Matrix')\n fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)\n plt.close(fig)\n\n def print(self):\n for i in range(self.nc + 1):\n print(' '.join(map(str, self.matrix[i])))" }, { "identifier": "box_iou", "path": "utils/metrics.py", "snippet": "def box_iou(box1, box2, eps=1e-7):\n # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n box1 (Tensor[N, 4])\n box2 (Tensor[M, 4])\n Returns:\n iou (Tensor[N, M]): the NxM matrix containing the pairwise\n IoU values for every element in boxes1 and boxes2\n \"\"\"\n\n # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)\n (a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2)\n inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2)\n\n # IoU = inter / (area1 + area2 - inter)\n return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps)" }, { "identifier": "output_to_target", "path": "utils/plots.py", "snippet": "def output_to_target(output, max_det=300):\n # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] for plotting\n targets = []\n for i, o in enumerate(output):\n box, conf, cls = o[:max_det, :6].cpu().split((4, 1, 1), 1)\n j = torch.full((conf.shape[0], 1), i)\n targets.append(torch.cat((j, cls, xyxy2xywh(box), conf), 1))\n return torch.cat(targets, 0).numpy()" }, { "identifier": "plot_val_study", "path": "utils/plots.py", "snippet": "def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()\n # Plot file=study.txt generated by val.py (or plot all study*.txt in dir)\n save_dir = Path(file).parent if file else Path(dir)\n plot2 = False # plot additional results\n if plot2:\n ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()\n\n fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)\n # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:\n for f in sorted(save_dir.glob('study*.txt')):\n y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T\n x = np.arange(y.shape[1]) if x is None else np.array(x)\n if plot2:\n s = ['P', 'R', '[email protected]', '[email protected]:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)']\n for i in range(7):\n ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)\n ax[i].set_title(s[i])\n\n j = y[3].argmax() + 1\n ax2.plot(y[5, 1:j],\n y[3, 1:j] * 1E2,\n '.-',\n linewidth=2,\n markersize=8,\n label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))\n\n ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],\n 'k.-',\n linewidth=2,\n markersize=8,\n alpha=.25,\n label='EfficientDet')\n\n ax2.grid(alpha=0.2)\n ax2.set_yticks(np.arange(20, 60, 5))\n ax2.set_xlim(0, 57)\n ax2.set_ylim(25, 55)\n ax2.set_xlabel('GPU Speed (ms/img)')\n ax2.set_ylabel('COCO AP val')\n ax2.legend(loc='lower right')\n f = save_dir / 'study.png'\n print(f'Saving {f}...')\n plt.savefig(f, dpi=300)" }, { "identifier": "create_dataloader", "path": "utils/segment/dataloaders.py", "snippet": "def create_dataloader(path,\n imgsz,\n batch_size,\n stride,\n single_cls=False,\n hyp=None,\n augment=False,\n cache=False,\n pad=0.0,\n rect=False,\n rank=-1,\n workers=8,\n image_weights=False,\n quad=False,\n prefix='',\n shuffle=False,\n mask_downsample_ratio=1,\n overlap_mask=False):\n if rect and shuffle:\n LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False')\n shuffle = False\n with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP\n dataset = LoadImagesAndLabelsAndMasks(\n path,\n imgsz,\n batch_size,\n augment=augment, # augmentation\n hyp=hyp, # hyperparameters\n rect=rect, # rectangular batches\n cache_images=cache,\n single_cls=single_cls,\n stride=int(stride),\n pad=pad,\n image_weights=image_weights,\n prefix=prefix,\n downsample_ratio=mask_downsample_ratio,\n overlap=overlap_mask)\n\n batch_size = min(batch_size, len(dataset))\n nd = torch.cuda.device_count() # number of CUDA devices\n nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers\n sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)\n loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates\n generator = torch.Generator()\n generator.manual_seed(6148914691236517205 + RANK)\n return loader(\n dataset,\n batch_size=batch_size,\n shuffle=shuffle and sampler is None,\n num_workers=nw,\n sampler=sampler,\n pin_memory=True,\n collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn,\n worker_init_fn=seed_worker,\n generator=generator,\n ), dataset" }, { "identifier": "mask_iou", "path": "utils/segment/general.py", "snippet": "def mask_iou(mask1, mask2, eps=1e-7):\n \"\"\"\n mask1: [N, n] m1 means number of predicted objects\n mask2: [M, n] m2 means number of gt objects\n Note: n means image_w x image_h\n\n return: masks iou, [N, M]\n \"\"\"\n intersection = torch.matmul(mask1, mask2.t()).clamp(0)\n union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection\n return intersection / (union + eps)" }, { "identifier": "process_mask", "path": "utils/segment/general.py", "snippet": "def process_mask(protos, masks_in, bboxes, shape, upsample=False):\n \"\"\"\n Crop before upsample.\n proto_out: [mask_dim, mask_h, mask_w]\n out_masks: [n, mask_dim], n is number of masks after nms\n bboxes: [n, 4], n is number of masks after nms\n shape:input_image_size, (h, w)\n\n return: h, w, n\n \"\"\"\n\n c, mh, mw = protos.shape # CHW\n ih, iw = shape\n masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW\n\n downsampled_bboxes = bboxes.clone()\n downsampled_bboxes[:, 0] *= mw / iw\n downsampled_bboxes[:, 2] *= mw / iw\n downsampled_bboxes[:, 3] *= mh / ih\n downsampled_bboxes[:, 1] *= mh / ih\n\n masks = crop_mask(masks, downsampled_bboxes) # CHW\n if upsample:\n masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW\n return masks.gt_(0.5)" }, { "identifier": "process_mask_upsample", "path": "utils/segment/general.py", "snippet": "def process_mask_upsample(protos, masks_in, bboxes, shape):\n \"\"\"\n Crop after upsample.\n proto_out: [mask_dim, mask_h, mask_w]\n out_masks: [n, mask_dim], n is number of masks after nms\n bboxes: [n, 4], n is number of masks after nms\n shape:input_image_size, (h, w)\n\n return: h, w, n\n \"\"\"\n\n c, mh, mw = protos.shape # CHW\n masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)\n masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW\n masks = crop_mask(masks, bboxes) # CHW\n return masks.gt_(0.5)" }, { "identifier": "scale_image", "path": "utils/segment/general.py", "snippet": "def scale_image(im1_shape, masks, im0_shape, ratio_pad=None):\n \"\"\"\n img1_shape: model input shape, [h, w]\n img0_shape: origin pic shape, [h, w, 3]\n masks: [h, w, num]\n \"\"\"\n # Rescale coordinates (xyxy) from im1_shape to im0_shape\n if ratio_pad is None: # calculate from im0_shape\n gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new\n pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding\n else:\n pad = ratio_pad[1]\n top, left = int(pad[1]), int(pad[0]) # y, x\n bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0])\n\n if len(masks.shape) < 2:\n raise ValueError(f'\"len of masks shape\" should be 2 or 3, but got {len(masks.shape)}')\n masks = masks[top:bottom, left:right]\n # masks = masks.permute(2, 0, 1).contiguous()\n # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0]\n # masks = masks.permute(1, 2, 0).contiguous()\n masks = cv2.resize(masks, (im0_shape[1], im0_shape[0]))\n\n if len(masks.shape) == 2:\n masks = masks[:, :, None]\n return masks" }, { "identifier": "Metrics", "path": "utils/segment/metrics.py", "snippet": "class Metrics:\n \"\"\"Metric for boxes and masks.\"\"\"\n\n def __init__(self) -> None:\n self.metric_box = Metric()\n self.metric_mask = Metric()\n\n def update(self, results):\n \"\"\"\n Args:\n results: Dict{'boxes': Dict{}, 'masks': Dict{}}\n \"\"\"\n self.metric_box.update(list(results[\"boxes\"].values()))\n self.metric_mask.update(list(results[\"masks\"].values()))\n\n def mean_results(self):\n return self.metric_box.mean_results() + self.metric_mask.mean_results()\n\n def class_result(self, i):\n return self.metric_box.class_result(i) + self.metric_mask.class_result(i)\n\n def get_maps(self, nc):\n return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc)\n\n @property\n def ap_class_index(self):\n # boxes and masks have the same ap_class_index\n return self.metric_box.ap_class_index" }, { "identifier": "ap_per_class_box_and_mask", "path": "utils/segment/metrics.py", "snippet": "def ap_per_class_box_and_mask(\n tp_m,\n tp_b,\n conf,\n pred_cls,\n target_cls,\n plot=False,\n save_dir=\".\",\n names=(),\n):\n \"\"\"\n Args:\n tp_b: tp of boxes.\n tp_m: tp of masks.\n other arguments see `func: ap_per_class`.\n \"\"\"\n results_boxes = ap_per_class(tp_b,\n conf,\n pred_cls,\n target_cls,\n plot=plot,\n save_dir=save_dir,\n names=names,\n prefix=\"Box\")[2:]\n results_masks = ap_per_class(tp_m,\n conf,\n pred_cls,\n target_cls,\n plot=plot,\n save_dir=save_dir,\n names=names,\n prefix=\"Mask\")[2:]\n\n results = {\n \"boxes\": {\n \"p\": results_boxes[0],\n \"r\": results_boxes[1],\n \"ap\": results_boxes[3],\n \"f1\": results_boxes[2],\n \"ap_class\": results_boxes[4]},\n \"masks\": {\n \"p\": results_masks[0],\n \"r\": results_masks[1],\n \"ap\": results_masks[3],\n \"f1\": results_masks[2],\n \"ap_class\": results_masks[4]}}\n return results" }, { "identifier": "plot_images_and_masks", "path": "utils/segment/plots.py", "snippet": "@threaded\ndef plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg', names=None):\n # Plot image grid with labels\n if isinstance(images, torch.Tensor):\n images = images.cpu().float().numpy()\n if isinstance(targets, torch.Tensor):\n targets = targets.cpu().numpy()\n if isinstance(masks, torch.Tensor):\n masks = masks.cpu().numpy().astype(int)\n\n max_size = 1920 # max image size\n max_subplots = 16 # max image subplots, i.e. 4x4\n bs, _, h, w = images.shape # batch size, _, height, width\n bs = min(bs, max_subplots) # limit plot images\n ns = np.ceil(bs ** 0.5) # number of subplots (square)\n if np.max(images[0]) <= 1:\n images *= 255 # de-normalise (optional)\n\n # Build Image\n mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init\n for i, im in enumerate(images):\n if i == max_subplots: # if last batch has fewer images than we expect\n break\n x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin\n im = im.transpose(1, 2, 0)\n mosaic[y:y + h, x:x + w, :] = im\n\n # Resize (optional)\n scale = max_size / ns / max(h, w)\n if scale < 1:\n h = math.ceil(scale * h)\n w = math.ceil(scale * w)\n mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))\n\n # Annotate\n fs = int((h + w) * ns * 0.01) # font size\n annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names)\n for i in range(i + 1):\n x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin\n annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders\n if paths:\n annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames\n if len(targets) > 0:\n idx = targets[:, 0] == i\n ti = targets[idx] # image targets\n\n boxes = xywh2xyxy(ti[:, 2:6]).T\n classes = ti[:, 1].astype('int')\n labels = ti.shape[1] == 6 # labels if no conf column\n conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred)\n\n if boxes.shape[1]:\n if boxes.max() <= 1.01: # if normalized with tolerance 0.01\n boxes[[0, 2]] *= w # scale to pixels\n boxes[[1, 3]] *= h\n elif scale < 1: # absolute coords need scale if image scales\n boxes *= scale\n boxes[[0, 2]] += x\n boxes[[1, 3]] += y\n for j, box in enumerate(boxes.T.tolist()):\n cls = classes[j]\n color = colors(cls)\n cls = names[cls] if names else cls\n if labels or conf[j] > 0.25: # 0.25 conf thresh\n label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}'\n annotator.box_label(box, label, color=color)\n\n # Plot masks\n if len(masks):\n if masks.max() > 1.0: # mean that masks are overlap\n image_masks = masks[[i]] # (1, 640, 640)\n nl = len(ti)\n index = np.arange(nl).reshape(nl, 1, 1) + 1\n image_masks = np.repeat(image_masks, nl, axis=0)\n image_masks = np.where(image_masks == index, 1.0, 0.0)\n else:\n image_masks = masks[idx]\n\n im = np.asarray(annotator.im).copy()\n for j, box in enumerate(boxes.T.tolist()):\n if labels or conf[j] > 0.25: # 0.25 conf thresh\n color = colors(classes[j])\n mh, mw = image_masks[j].shape\n if mh != h or mw != w:\n mask = image_masks[j].astype(np.uint8)\n mask = cv2.resize(mask, (w, h))\n mask = mask.astype(bool)\n else:\n mask = image_masks[j].astype(bool)\n with contextlib.suppress(Exception):\n im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6\n annotator.fromarray(im)\n annotator.im.save(fname) # save" }, { "identifier": "de_parallel", "path": "utils/torch_utils.py", "snippet": "def de_parallel(model):\n # De-parallelize a model: returns single-GPU model if model is of type DP or DDP\n return model.module if is_parallel(model) else model" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=0, newline=True):\n # device = None or 'cpu' or 0 or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} '\n device = str(device).strip().lower().replace('cuda:', '').replace('none', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n mps = device == 'mps' # Apple Metal Performance Shaders (MPS)\n if cpu or mps:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available()\n assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \\\n f\"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)\"\n\n if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size > 0: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\\n\" # bytes to MB\n arg = 'cuda:0'\n elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available\n s += 'MPS\\n'\n arg = 'mps'\n else: # revert to CPU\n s += 'CPU\\n'\n arg = 'cpu'\n\n if not newline:\n s = s.rstrip()\n LOGGER.info(s)\n return torch.device(arg)" }, { "identifier": "smart_inference_mode", "path": "utils/torch_utils.py", "snippet": "def smart_inference_mode(torch_1_9=check_version(torch.__version__, '1.9.0')):\n # Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator\n def decorate(fn):\n return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn)\n\n return decorate" } ]
import argparse import json import os import sys import numpy as np import torch import torch.nn.functional as F import time from multiprocessing.pool import ThreadPool from pathlib import Path from tqdm import tqdm from models.common import DetectMultiBackend from models.yolo import SegmentationModel from utils.callbacks import Callbacks from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, box_iou from utils.plots import output_to_target, plot_val_study from utils.segment.dataloaders import create_dataloader from utils.segment.general import mask_iou, process_mask, process_mask_upsample, scale_image from utils.segment.metrics import Metrics, ap_per_class_box_and_mask from utils.segment.plots import plot_images_and_masks from utils.torch_utils import de_parallel, select_device, smart_inference_mode from pycocotools.mask import encode from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
19,567
rle["counts"] = rle["counts"].decode("utf-8") return rle image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner pred_masks = np.transpose(pred_masks, (2, 0, 1)) with ThreadPool(NUM_THREADS) as pool: rles = pool.map(single_encode, pred_masks) for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): jdict.append({ 'image_id': image_id, 'category_id': class_map[int(p[5])], 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5), 'segmentation': rles[i]}) def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): """ Return correct prediction matrix Arguments: detections (array[N, 6]), x1, y1, x2, y2, conf, class labels (array[M, 5]), class, x1, y1, x2, y2 Returns: correct (array[N, 10]), for 10 IoU levels """ if masks: if overlap: nl = len(labels) index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes iou = box_iou(labels[:, 1:], detections[:, :4]) correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) correct_class = labels[:, 0:1] == detections[:, 5] for i in range(len(iouv)): x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] correct[matches[:, 1].astype(int), i] = True return torch.tensor(correct, dtype=torch.bool, device=iouv.device) @smart_inference_mode() def run( data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) conf_thres=0.001, # confidence threshold iou_thres=0.6, # NMS IoU threshold max_det=300, # maximum detections per image task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val-seg', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, overlap=False, mask_downsample_ratio=1, compute_loss=None, callbacks=Callbacks(), ): if save_json: check_requirements(['pycocotools']) process = process_mask_upsample # more accurate else: process = process_mask # faster # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() nm = de_parallel(model).model[-1].nm # number of masks else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half = model.fp16 # FP16 supported on limited backends with CUDA nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks if engine: batch_size = model.batch_size else: device = model.device if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Validate a trained YOLOv5 segment model on a segment dataset Usage: $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images) $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate COCO-segments Usage - formats: $ python segment/val.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s-seg_openvino_label # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel yolov5s-seg.pb # TensorFlow GraphDef yolov5s-seg.tflite # TensorFlow Lite yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU yolov5s-seg_paddle_model # PaddlePaddle """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative def save_one_txt(predn, save_conf, shape, file): # Save one txt result gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(file, 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') def save_one_json(predn, jdict, path, class_map, pred_masks): # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} def single_encode(x): rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0] rle["counts"] = rle["counts"].decode("utf-8") return rle image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner pred_masks = np.transpose(pred_masks, (2, 0, 1)) with ThreadPool(NUM_THREADS) as pool: rles = pool.map(single_encode, pred_masks) for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): jdict.append({ 'image_id': image_id, 'category_id': class_map[int(p[5])], 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5), 'segmentation': rles[i]}) def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): """ Return correct prediction matrix Arguments: detections (array[N, 6]), x1, y1, x2, y2, conf, class labels (array[M, 5]), class, x1, y1, x2, y2 Returns: correct (array[N, 10]), for 10 IoU levels """ if masks: if overlap: nl = len(labels) index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes iou = box_iou(labels[:, 1:], detections[:, :4]) correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) correct_class = labels[:, 0:1] == detections[:, 5] for i in range(len(iouv)): x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] correct[matches[:, 1].astype(int), i] = True return torch.tensor(correct, dtype=torch.bool, device=iouv.device) @smart_inference_mode() def run( data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) conf_thres=0.001, # confidence threshold iou_thres=0.6, # NMS IoU threshold max_det=300, # maximum detections per image task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val-seg', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, overlap=False, mask_downsample_ratio=1, compute_loss=None, callbacks=Callbacks(), ): if save_json: check_requirements(['pycocotools']) process = process_mask_upsample # more accurate else: process = process_mask # faster # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() nm = de_parallel(model).model[-1].nm # number of masks else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half = model.fp16 # FP16 supported on limited backends with CUDA nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks if engine: batch_size = model.batch_size else: device = model.device if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1
LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models')
3
2023-12-10 14:18:29+00:00
24k
youngskkim/CRN
exps/base_exp.py
[ { "identifier": "NuscDatasetRadarDet", "path": "datasets/nusc_det_dataset.py", "snippet": "class NuscDatasetRadarDet(Dataset):\n def __init__(self,\n ida_aug_conf,\n bda_aug_conf,\n rda_aug_conf,\n classes,\n data_root,\n info_paths,\n is_train,\n load_interval=1,\n num_sweeps=1,\n img_conf=dict(img_mean=[123.675, 116.28, 103.53],\n img_std=[58.395, 57.12, 57.375],\n to_rgb=True),\n img_backbone_conf=dict(\n x_bound=[-51.2, 51.2, 0.8],\n y_bound=[-51.2, 51.2, 0.8],\n z_bound=[-5, 3, 8],\n d_bound=[2.0, 58.0, 0.5]\n ),\n drop_aug_conf=None,\n return_image=True,\n return_depth=False,\n return_radar_pv=False,\n depth_path='depth_gt',\n radar_pv_path='radar_pv_filter',\n remove_z_axis=False,\n use_cbgs=False,\n gt_for_radar_only=False,\n sweep_idxes=list(),\n key_idxes=list()):\n \"\"\"Dataset used for bevdetection task.\n Args:\n ida_aug_conf (dict): Config for ida augmentation.\n bda_aug_conf (dict): Config for bda augmentation.\n classes (list): Class names.\n use_cbgs (bool): Whether to use cbgs strategy,\n Default: False.\n num_sweeps (int): Number of sweeps to be used for each sample.\n default: 1.\n img_conf (dict): Config for image.\n return_depth (bool): Whether to use depth gt.\n default: False.\n sweep_idxes (list): List of sweep idxes to be used.\n default: list().\n key_idxes (list): List of key idxes to be used.\n default: list().\n \"\"\"\n super().__init__()\n if isinstance(info_paths, list):\n self.infos = list()\n for info_path in info_paths:\n self.infos.extend(mmcv.load(info_path))\n else:\n self.infos = mmcv.load(info_paths)\n self.is_train = is_train\n self.ida_aug_conf = ida_aug_conf\n self.bda_aug_conf = bda_aug_conf\n self.rda_aug_conf = rda_aug_conf\n self.drop_aug_conf = drop_aug_conf\n self.data_root = data_root\n self.classes = classes\n self.use_cbgs = use_cbgs\n if self.use_cbgs:\n self.cat2id = {name: i for i, name in enumerate(self.classes)}\n self.sample_indices = self._get_sample_indices()\n self.num_sweeps = num_sweeps\n self.img_mean = np.array(img_conf['img_mean'], np.float32)\n self.img_std = np.array(img_conf['img_std'], np.float32)\n self.to_rgb = img_conf['to_rgb']\n self.img_backbone_conf = img_backbone_conf\n\n self.return_image = return_image\n self.return_depth = return_depth\n self.return_radar_pv = return_radar_pv\n\n self.remove_z_axis = remove_z_axis\n self.gt_for_radar_only = gt_for_radar_only\n\n assert sum([sweep_idx >= 0 for sweep_idx in sweep_idxes]) \\\n == len(sweep_idxes), 'All `sweep_idxes` must greater \\\n than or equal to 0.'\n\n self.sweeps_idx = sweep_idxes\n assert sum([key_idx < 0 for key_idx in key_idxes]) == len(key_idxes),\\\n 'All `key_idxes` must less than 0.'\n self.key_idxes = [0] + key_idxes\n if load_interval > 1:\n self.infos = self.infos[::load_interval]\n self.depth_path = depth_path\n self.radar_pv_path = radar_pv_path\n\n self.max_radar_points_pv = 1536\n self.max_distance_pv = self.img_backbone_conf['d_bound'][1]\n\n def _get_sample_indices(self):\n \"\"\"Load annotations from ann_file.\n\n Args:\n ann_file (str): Path of the annotation file.\n\n Returns:\n list[dict]: List of annotations after class sampling.\n \"\"\"\n class_sample_idxs = {cat_id: [] for cat_id in self.cat2id.values()}\n for idx, info in enumerate(self.infos):\n gt_names = set(\n [ann_info['category_name'] for ann_info in info['ann_infos']])\n for gt_name in gt_names:\n gt_name = map_name_from_general_to_detection[gt_name]\n if gt_name not in self.classes:\n continue\n class_sample_idxs[self.cat2id[gt_name]].append(idx)\n duplicated_samples = sum(\n [len(v) for _, v in class_sample_idxs.items()])\n class_distribution = {\n k: len(v) / duplicated_samples\n for k, v in class_sample_idxs.items()\n }\n\n sample_indices = []\n\n frac = 1.0 / len(self.classes)\n ratios = [frac / v for v in class_distribution.values()]\n for cls_inds, ratio in zip(list(class_sample_idxs.values()), ratios):\n sample_indices += np.random.choice(cls_inds,\n int(len(cls_inds) *\n ratio)).tolist()\n return sample_indices\n\n def sample_ida_augmentation(self):\n \"\"\"Generate ida augmentation values based on ida_config.\"\"\"\n H, W = self.ida_aug_conf['H'], self.ida_aug_conf['W']\n fH, fW = self.ida_aug_conf['final_dim']\n if self.is_train:\n resize = np.random.uniform(*self.ida_aug_conf['resize_lim'])\n resize_dims = (int(W * resize), int(H * resize))\n newW, newH = resize_dims\n crop_h = int(\n (1 - np.random.uniform(*self.ida_aug_conf['bot_pct_lim'])) *\n newH) - fH\n crop_w = int(np.random.uniform(0, max(0, newW - fW)))\n crop = (crop_w, crop_h, crop_w + fW, crop_h + fH)\n flip = False\n if self.ida_aug_conf['rand_flip'] and np.random.choice([0, 1]):\n flip = True\n rotate_ida = np.random.uniform(*self.ida_aug_conf['rot_lim'])\n else:\n resize = max(fH / H, fW / W)\n resize_dims = (int(W * resize), int(H * resize))\n newW, newH = resize_dims\n crop_h = int(\n (1 - np.mean(self.ida_aug_conf['bot_pct_lim'])) * newH) - fH\n crop_w = int(max(0, newW - fW) / 2)\n crop = (crop_w, crop_h, crop_w + fW, crop_h + fH)\n flip = False\n rotate_ida = 0\n return resize, resize_dims, crop, flip, rotate_ida\n\n def sample_bda_augmentation(self):\n \"\"\"Generate bda augmentation values based on bda_config.\"\"\"\n if self.is_train:\n if np.random.uniform() < self.bda_aug_conf['rot_ratio']:\n rotate_bda = np.random.uniform(*self.bda_aug_conf['rot_lim'])\n else:\n rotate_bda = 0\n scale_bda = np.random.uniform(*self.bda_aug_conf['scale_lim'])\n flip_dx = np.random.uniform() < self.bda_aug_conf['flip_dx_ratio']\n flip_dy = np.random.uniform() < self.bda_aug_conf['flip_dy_ratio']\n else:\n rotate_bda = 0\n scale_bda = 1.0\n flip_dx = False\n flip_dy = False\n return rotate_bda, scale_bda, flip_dx, flip_dy\n\n def sample_radar_augmentation(self):\n \"\"\"Generate bda augmentation values based on bda_config.\"\"\"\n if self.is_train:\n radar_idx = np.random.choice(self.rda_aug_conf['N_sweeps'],\n self.rda_aug_conf['N_use'],\n replace=False)\n else:\n radar_idx = np.arange(self.rda_aug_conf['N_sweeps'])\n return radar_idx\n\n def transform_radar_pv(self, points, resize, resize_dims, crop, flip, rotate, radar_idx):\n points = points[points[:, 2] < self.max_distance_pv, :]\n\n H, W = resize_dims\n points[:, :2] = points[:, :2] * resize\n points[:, 0] -= crop[0]\n points[:, 1] -= crop[1]\n if flip:\n points[:, 0] = resize_dims[1] - points[:, 0]\n\n points[:, 0] -= W / 2.0\n points[:, 1] -= H / 2.0\n\n h = rotate / 180 * np.pi\n rot_matrix = [\n [np.cos(h), np.sin(h)],\n [-np.sin(h), np.cos(h)],\n ]\n points[:, :2] = np.matmul(rot_matrix, points[:, :2].T).T\n\n points[:, 0] += W / 2.0\n points[:, 1] += H / 2.0\n\n depth_coords = points[:, :2].astype(np.int16)\n\n valid_mask = ((depth_coords[:, 1] < resize_dims[0])\n & (depth_coords[:, 0] < resize_dims[1])\n & (depth_coords[:, 1] >= 0)\n & (depth_coords[:, 0] >= 0))\n\n points = torch.Tensor(points[valid_mask])\n\n if self.remove_z_axis:\n points[:, 1] = 1. # dummy height value\n\n points_save = []\n for i in radar_idx:\n points_save.append(points[points[:, 6] == i])\n points = torch.cat(points_save, dim=0)\n\n # mean, std of rcs and speed are from train set\n points[:, 3] = (points[:, 3] - 4.783) / 7.576\n points[:, 4] = (torch.norm(points[:, 4:6], dim=1) - 0.677) / 1.976\n\n if self.is_train:\n drop_idx = np.random.uniform(size=points.shape[0]) # randomly drop points\n points = points[drop_idx > self.rda_aug_conf['drop_ratio']]\n\n num_points, num_feat = points.shape\n if num_points > self.max_radar_points_pv:\n choices = np.random.choice(num_points, self.max_radar_points_pv, replace=False)\n points = points[choices]\n else:\n num_append = self.max_radar_points_pv - num_points\n points = torch.cat([points, -999*torch.ones(num_append, num_feat)], dim=0)\n\n if num_points == 0:\n points[0, :] = points.new_tensor([0.1, 0.1, self.max_distance_pv-1, 0, 0, 0, 0])\n\n points[..., [0, 1, 2]] = points[..., [0, 2, 1]] # convert [w, h, d] to [w, d, h]\n\n return points[..., :5]\n\n def depth_transform(self, cam_depth, resize, resize_dims, crop, flip, rotate):\n \"\"\"Transform depth based on ida augmentation configuration.\n\n Args:\n cam_depth (np array): Nx3, 3: x,y,d.\n resize (float): Resize factor.\n resize_dims (tuple): Final dimension.\n crop (tuple): x1, y1, x2, y2\n flip (bool): Whether to flip.\n rotate (float): Rotation value.\n\n Returns:\n np array: [h/down_ratio, w/down_ratio, d]\n \"\"\"\n valid_depth = cam_depth[:, 2] < self.img_backbone_conf['d_bound'][1]\n cam_depth = cam_depth[valid_depth, :]\n\n H, W = resize_dims\n cam_depth[:, :2] = cam_depth[:, :2] * resize\n cam_depth[:, 0] -= crop[0]\n cam_depth[:, 1] -= crop[1]\n if flip:\n cam_depth[:, 0] = resize_dims[1] - cam_depth[:, 0]\n\n cam_depth[:, 0] -= W / 2.0\n cam_depth[:, 1] -= H / 2.0\n\n h = rotate / 180 * np.pi\n rot_matrix = [\n [np.cos(h), np.sin(h)],\n [-np.sin(h), np.cos(h)],\n ]\n cam_depth[:, :2] = np.matmul(rot_matrix, cam_depth[:, :2].T).T\n\n cam_depth[:, 0] += W / 2.0\n cam_depth[:, 1] += H / 2.0\n\n depth_coords = cam_depth[:, :2].astype(np.int16)\n\n depth_map = np.zeros(resize_dims)\n valid_mask = ((depth_coords[:, 1] < resize_dims[0])\n & (depth_coords[:, 0] < resize_dims[1])\n & (depth_coords[:, 1] >= 0)\n & (depth_coords[:, 0] >= 0))\n depth_map[depth_coords[valid_mask, 1],\n depth_coords[valid_mask, 0]] = cam_depth[valid_mask, 2]\n\n return torch.Tensor(depth_map)\n\n def get_image(self, cam_infos, cams):\n \"\"\"Given data and cam_names, return image data needed.\n\n Args:\n sweeps_data (list): Raw data used to generate the data we needed.\n cams (list): Camera names.\n\n Returns:\n Tensor: Image data after processing.\n Tensor: Transformation matrix from camera to ego.\n Tensor: Intrinsic matrix.\n Tensor: Transformation matrix for ida.\n Tensor: Transformation matrix from key\n frame camera to sweep frame camera.\n Tensor: timestamps.\n dict: meta infos needed for evaluation.\n \"\"\"\n assert len(cam_infos) > 0\n sweep_imgs = list()\n sweep_sensor2ego_mats = list()\n sweep_intrin_mats = list()\n sweep_ida_mats = list()\n sweep_sensor2sensor_mats = list()\n sweep_timestamps = list()\n sweep_gt_depths = list()\n sweep_radar_points = list()\n for cam in cams:\n imgs = list()\n sensor2ego_mats = list()\n intrin_mats = list()\n ida_mats = list()\n sensor2sensor_mats = list()\n timestamps = list()\n gt_depths = list()\n radar_points = list()\n key_info = cam_infos[0]\n resize, resize_dims, crop, flip, \\\n rotate_ida = self.sample_ida_augmentation()\n radar_idx = self.sample_radar_augmentation()\n\n for sweep_idx, cam_info in enumerate(cam_infos):\n img = Image.open(\n os.path.join(self.data_root, cam_info[cam]['filename']))\n\n w, x, y, z = cam_info[cam]['calibrated_sensor']['rotation']\n # sweep sensor to sweep ego\n sweepsensor2sweepego_rot = torch.Tensor(\n Quaternion(w, x, y, z).rotation_matrix)\n sweepsensor2sweepego_tran = torch.Tensor(\n cam_info[cam]['calibrated_sensor']['translation'])\n sweepsensor2sweepego = sweepsensor2sweepego_rot.new_zeros(\n (4, 4))\n sweepsensor2sweepego[3, 3] = 1\n sweepsensor2sweepego[:3, :3] = sweepsensor2sweepego_rot\n sweepsensor2sweepego[:3, -1] = sweepsensor2sweepego_tran\n # sweep ego to global\n w, x, y, z = cam_info[cam]['ego_pose']['rotation']\n sweepego2global_rot = torch.Tensor(\n Quaternion(w, x, y, z).rotation_matrix)\n sweepego2global_tran = torch.Tensor(\n cam_info[cam]['ego_pose']['translation'])\n sweepego2global = sweepego2global_rot.new_zeros((4, 4))\n sweepego2global[3, 3] = 1\n sweepego2global[:3, :3] = sweepego2global_rot\n sweepego2global[:3, -1] = sweepego2global_tran\n\n # global sensor to cur ego\n w, x, y, z = key_info[cam]['ego_pose']['rotation']\n keyego2global_rot = torch.Tensor(\n Quaternion(w, x, y, z).rotation_matrix)\n keyego2global_tran = torch.Tensor(\n key_info[cam]['ego_pose']['translation'])\n keyego2global = keyego2global_rot.new_zeros((4, 4))\n keyego2global[3, 3] = 1\n keyego2global[:3, :3] = keyego2global_rot\n keyego2global[:3, -1] = keyego2global_tran\n global2keyego = keyego2global.inverse()\n\n # cur ego to sensor\n w, x, y, z = key_info[cam]['calibrated_sensor']['rotation']\n keysensor2keyego_rot = torch.Tensor(\n Quaternion(w, x, y, z).rotation_matrix)\n keysensor2keyego_tran = torch.Tensor(\n key_info[cam]['calibrated_sensor']['translation'])\n keysensor2keyego = keysensor2keyego_rot.new_zeros((4, 4))\n keysensor2keyego[3, 3] = 1\n keysensor2keyego[:3, :3] = keysensor2keyego_rot\n keysensor2keyego[:3, -1] = keysensor2keyego_tran\n keyego2keysensor = keysensor2keyego.inverse()\n keysensor2sweepsensor = (\n keyego2keysensor @ global2keyego @ sweepego2global\n @ sweepsensor2sweepego).inverse()\n sweepsensor2keyego = global2keyego @ sweepego2global @\\\n sweepsensor2sweepego\n sensor2ego_mats.append(sweepsensor2keyego)\n sensor2sensor_mats.append(keysensor2sweepsensor)\n intrin_mat = torch.zeros((4, 4))\n intrin_mat[3, 3] = 1\n intrin_mat[:3, :3] = torch.Tensor(\n cam_info[cam]['calibrated_sensor']['camera_intrinsic'])\n\n file_name = os.path.split(cam_info[cam]['filename'])[-1]\n if self.return_depth:\n point_depth = np.fromfile(os.path.join(\n self.data_root, self.depth_path, f'{file_name}.bin'),\n dtype=np.float32,\n count=-1)\n point_depth = point_depth.reshape(-1, 3)\n point_depth_augmented = self.depth_transform(\n point_depth, resize, self.ida_aug_conf['final_dim'],\n crop, flip, rotate_ida)\n gt_depths.append(point_depth_augmented)\n\n if self.return_radar_pv:\n radar_point = np.fromfile(os.path.join(\n self.data_root, self.radar_pv_path, f'{file_name}.bin'),\n dtype=np.float32,\n count=-1).reshape(-1, 7)\n radar_point_augmented = self.transform_radar_pv(\n radar_point, resize, self.ida_aug_conf['final_dim'],\n crop, flip, rotate_ida, radar_idx)\n radar_points.append(radar_point_augmented)\n\n img, ida_mat = img_transform(\n img,\n resize=resize,\n resize_dims=resize_dims,\n crop=crop,\n flip=flip,\n rotate=rotate_ida,\n )\n ida_mats.append(ida_mat)\n img = mmcv.imnormalize(np.array(img), self.img_mean,\n self.img_std, self.to_rgb)\n img = torch.from_numpy(img).permute(2, 0, 1)\n imgs.append(img)\n intrin_mats.append(intrin_mat)\n timestamps.append(cam_info[cam]['timestamp'])\n sweep_imgs.append(torch.stack(imgs))\n sweep_sensor2ego_mats.append(torch.stack(sensor2ego_mats))\n sweep_intrin_mats.append(torch.stack(intrin_mats))\n sweep_ida_mats.append(torch.stack(ida_mats))\n sweep_sensor2sensor_mats.append(torch.stack(sensor2sensor_mats))\n sweep_timestamps.append(torch.tensor(timestamps))\n if self.return_depth:\n sweep_gt_depths.append(torch.stack(gt_depths))\n if self.return_radar_pv:\n sweep_radar_points.append(torch.stack(radar_points))\n\n ret_list = [\n torch.stack(sweep_imgs).permute(1, 0, 2, 3, 4),\n torch.stack(sweep_sensor2ego_mats).permute(1, 0, 2, 3),\n torch.stack(sweep_intrin_mats).permute(1, 0, 2, 3),\n torch.stack(sweep_ida_mats).permute(1, 0, 2, 3),\n torch.stack(sweep_sensor2sensor_mats).permute(1, 0, 2, 3),\n torch.stack(sweep_timestamps).permute(1, 0),\n ]\n if self.return_depth:\n ret_list.append(torch.stack(sweep_gt_depths).permute(1, 0, 2, 3),)\n else:\n ret_list.append(None)\n if self.return_radar_pv:\n ret_list.append(torch.stack(sweep_radar_points).permute(1, 0, 2, 3),)\n else:\n ret_list.append(None)\n return ret_list\n\n def get_image_meta(self, cam_infos, cams):\n key_info = cam_infos[0]\n\n # Get mean pose of all cams.\n ego2global_rotation = np.mean(\n [key_info[cam]['ego_pose']['rotation'] for cam in cams], 0)\n ego2global_translation = np.mean(\n [key_info[cam]['ego_pose']['translation'] for cam in cams], 0)\n img_metas = dict(\n box_type_3d=LiDARInstance3DBoxes,\n ego2global_translation=ego2global_translation,\n ego2global_rotation=ego2global_rotation,\n )\n return img_metas\n\n def get_image_sensor2ego_mats(self, cam_infos, cams):\n sweep_sensor2ego_mats = list()\n for cam in cams:\n sensor2ego_mats = list()\n key_info = cam_infos[0]\n for sweep_idx, cam_info in enumerate(cam_infos):\n w, x, y, z = cam_info[cam]['calibrated_sensor']['rotation']\n # sweep sensor to sweep ego\n sweepsensor2sweepego_rot = torch.Tensor(\n Quaternion(w, x, y, z).rotation_matrix)\n sweepsensor2sweepego_tran = torch.Tensor(\n cam_info[cam]['calibrated_sensor']['translation'])\n sweepsensor2sweepego = sweepsensor2sweepego_rot.new_zeros(\n (4, 4))\n sweepsensor2sweepego[3, 3] = 1\n sweepsensor2sweepego[:3, :3] = sweepsensor2sweepego_rot\n sweepsensor2sweepego[:3, -1] = sweepsensor2sweepego_tran\n # sweep ego to global\n w, x, y, z = cam_info[cam]['ego_pose']['rotation']\n sweepego2global_rot = torch.Tensor(\n Quaternion(w, x, y, z).rotation_matrix)\n sweepego2global_tran = torch.Tensor(\n cam_info[cam]['ego_pose']['translation'])\n sweepego2global = sweepego2global_rot.new_zeros((4, 4))\n sweepego2global[3, 3] = 1\n sweepego2global[:3, :3] = sweepego2global_rot\n sweepego2global[:3, -1] = sweepego2global_tran\n\n # global sensor to cur ego\n w, x, y, z = key_info[cam]['ego_pose']['rotation']\n keyego2global_rot = torch.Tensor(\n Quaternion(w, x, y, z).rotation_matrix)\n keyego2global_tran = torch.Tensor(\n key_info[cam]['ego_pose']['translation'])\n keyego2global = keyego2global_rot.new_zeros((4, 4))\n keyego2global[3, 3] = 1\n keyego2global[:3, :3] = keyego2global_rot\n keyego2global[:3, -1] = keyego2global_tran\n global2keyego = keyego2global.inverse()\n\n # cur ego to sensor\n w, x, y, z = key_info[cam]['calibrated_sensor']['rotation']\n keysensor2keyego_rot = torch.Tensor(\n Quaternion(w, x, y, z).rotation_matrix)\n keysensor2keyego_tran = torch.Tensor(\n key_info[cam]['calibrated_sensor']['translation'])\n keysensor2keyego = keysensor2keyego_rot.new_zeros((4, 4))\n keysensor2keyego[3, 3] = 1\n keysensor2keyego[:3, :3] = keysensor2keyego_rot\n keysensor2keyego[:3, -1] = keysensor2keyego_tran\n sweepsensor2keyego = global2keyego @ sweepego2global @\\\n sweepsensor2sweepego\n sensor2ego_mats.append(sweepsensor2keyego)\n sweep_sensor2ego_mats.append(torch.stack(sensor2ego_mats))\n return torch.stack(sweep_sensor2ego_mats).permute(1, 0, 2, 3)\n\n def get_gt(self, info, cams, return_corners=False):\n \"\"\"Generate gt labels from info.\n\n Args:\n info(dict): Infos needed to generate gt labels.\n cams(list): Camera names.\n\n Returns:\n Tensor: GT bboxes.\n Tensor: GT labels.\n \"\"\"\n ego2global_rotation = np.mean(\n [info['cam_infos'][cam]['ego_pose']['rotation'] for cam in cams],\n 0)\n ego2global_translation = np.mean([\n info['cam_infos'][cam]['ego_pose']['translation'] for cam in cams\n ], 0)\n trans = -np.array(ego2global_translation)\n rot = Quaternion(ego2global_rotation).inverse\n gt_boxes = list()\n gt_labels = list()\n if return_corners: # for debugging and visualization\n gt_corners = list()\n else:\n gt_corners = None\n for ann_info in info['ann_infos']:\n # Use ego coordinate.\n if self.gt_for_radar_only:\n if ann_info['num_radar_pts'] == 0:\n continue\n if map_name_from_general_to_detection[ann_info['category_name']] not in self.classes:\n continue\n if ann_info['num_lidar_pts'] + ann_info['num_radar_pts'] == 0:\n continue\n\n box = Box(\n ann_info['translation'],\n ann_info['size'],\n Quaternion(ann_info['rotation']),\n velocity=ann_info['velocity'],\n )\n box.translate(trans)\n box.rotate(rot)\n box_xyz = np.array(box.center)\n box_dxdydz = np.array(box.wlh)[[1, 0, 2]]\n box_yaw = np.array([box.orientation.yaw_pitch_roll[0]])\n box_velo = np.array(box.velocity[:2])\n gt_box = np.concatenate([box_xyz, box_dxdydz, box_yaw, box_velo])\n gt_boxes.append(gt_box)\n gt_labels.append(\n self.classes.index(map_name_from_general_to_detection[\n ann_info['category_name']]))\n if return_corners: # for debugging and visualization\n gt_corners.append(box.corners())\n\n return torch.Tensor(gt_boxes), torch.tensor(gt_labels), gt_corners\n\n def choose_cams(self):\n \"\"\"Choose cameras randomly.\n\n Returns:\n list: Cameras to be used.\n \"\"\"\n if self.is_train and self.ida_aug_conf['Ncams'] < len(\n self.ida_aug_conf['cams']):\n cams = np.random.choice(self.ida_aug_conf['cams'],\n self.ida_aug_conf['Ncams'],\n replace=False)\n else:\n cams = self.ida_aug_conf['cams']\n return cams\n\n def __getitem__(self, idx):\n if self.use_cbgs:\n idx = self.sample_indices[idx]\n cam_infos = list()\n pts_infos = list()\n cams = self.choose_cams()\n for key_idx in self.key_idxes:\n cur_idx = key_idx + idx\n # Handle scenarios when current idx doesn't have previous key\n # frame or previous key frame is from another scene.\n while self.infos[cur_idx]['scene_token'] != self.infos[idx]['scene_token']:\n cur_idx += 1\n info = self.infos[cur_idx]\n cam_infos.append(info['cam_infos'])\n pts_infos.append([info['lidar_infos']] + info['lidar_sweeps'])\n for sweep_idx in self.sweeps_idx:\n if len(info['cam_sweeps']) == 0:\n cam_infos.append(info['cam_infos'])\n else:\n # Handle scenarios when current sweep doesn't have all cam keys.\n for i in range(min(len(info['cam_sweeps']) - 1, sweep_idx), -1,\n -1):\n if sum([cam in info['cam_sweeps'][i]\n for cam in cams]) == len(cams):\n cam_infos.append(info['cam_sweeps'][i])\n break\n\n if self.return_image or self.return_depth or self.return_radar_pv:\n image_data_list = self.get_image(cam_infos, cams)\n (\n sweep_imgs,\n sweep_sensor2ego_mats,\n sweep_intrins,\n sweep_ida_mats,\n sweep_sensor2sensor_mats,\n sweep_timestamps,\n ) = image_data_list[:6]\n else:\n (\n sweep_imgs,\n sweep_intrins,\n sweep_ida_mats,\n sweep_sensor2sensor_mats,\n sweep_timestamps,\n ) = None, None, None, None, None\n sweep_sensor2ego_mats = self.get_image_sensor2ego_mats(cam_infos, cams)\n\n img_metas = self.get_image_meta(cam_infos, cams)\n img_metas['token'] = self.infos[idx]['sample_token']\n gt_boxes_3d, gt_labels_3d, gt_corners = self.get_gt(self.infos[idx], cams, return_corners=False)\n\n rotate_bda, scale_bda, flip_dx, flip_dy = self.sample_bda_augmentation()\n gt_boxes_3d, bda_rot = bev_det_transform(gt_boxes_3d, rotate_bda, scale_bda, flip_dx, flip_dy)\n\n bda_mat = torch.zeros(4, 4, dtype=torch.float32)\n bda_mat[:3, :3] = bda_rot\n bda_mat[3, 3] = 1\n\n ret_list = [\n sweep_imgs,\n sweep_sensor2ego_mats,\n sweep_intrins,\n sweep_ida_mats,\n sweep_sensor2sensor_mats,\n bda_mat,\n sweep_timestamps,\n img_metas,\n gt_boxes_3d,\n gt_labels_3d,\n ]\n\n if self.return_depth:\n ret_list.append(image_data_list[6])\n else:\n ret_list.append(None)\n if self.return_radar_pv:\n ret_list.append(image_data_list[7])\n else:\n ret_list.append(None)\n\n return ret_list\n\n def __str__(self):\n return f\"\"\"NuscData: {len(self)} samples. Split: \\\n {\"train\" if self.is_train else \"val\"}.\n Augmentation Conf: {self.ida_aug_conf}\"\"\"\n\n def __len__(self):\n if self.use_cbgs:\n return len(self.sample_indices)\n else:\n return len(self.infos)" }, { "identifier": "collate_fn", "path": "datasets/nusc_det_dataset.py", "snippet": "def collate_fn(data,\n is_return_image=True,\n is_return_depth=False,\n is_return_radar_pv=False):\n assert (is_return_image or is_return_depth or is_return_radar_pv) is True\n imgs_batch = list()\n sensor2ego_mats_batch = list()\n intrin_mats_batch = list()\n ida_mats_batch = list()\n sensor2sensor_mats_batch = list()\n bda_mat_batch = list()\n gt_boxes_3d_batch = list()\n gt_labels_3d_batch = list()\n img_metas_batch = list()\n depth_labels_batch = list()\n radar_pv_batch = list()\n\n for iter_data in data:\n (\n sweep_imgs,\n sweep_sensor2ego_mats,\n sweep_intrins,\n sweep_ida_mats,\n sweep_sensor2sensor_mats,\n bda_mat,\n sweep_timestamps,\n img_metas,\n gt_boxes,\n gt_labels,\n ) = iter_data[:10]\n if is_return_depth:\n gt_depth = iter_data[10]\n depth_labels_batch.append(gt_depth)\n if is_return_radar_pv:\n radar_pv = iter_data[11]\n radar_pv_batch.append(radar_pv)\n\n imgs_batch.append(sweep_imgs)\n sensor2ego_mats_batch.append(sweep_sensor2ego_mats)\n intrin_mats_batch.append(sweep_intrins)\n ida_mats_batch.append(sweep_ida_mats)\n sensor2sensor_mats_batch.append(sweep_sensor2sensor_mats)\n bda_mat_batch.append(bda_mat)\n img_metas_batch.append(img_metas)\n gt_boxes_3d_batch.append(gt_boxes)\n gt_labels_3d_batch.append(gt_labels)\n\n if is_return_image:\n mats_dict = dict()\n mats_dict['sensor2ego_mats'] = torch.stack(sensor2ego_mats_batch)\n mats_dict['intrin_mats'] = torch.stack(intrin_mats_batch)\n mats_dict['ida_mats'] = torch.stack(ida_mats_batch)\n mats_dict['sensor2sensor_mats'] = torch.stack(sensor2sensor_mats_batch)\n mats_dict['bda_mat'] = torch.stack(bda_mat_batch)\n ret_list = [\n torch.stack(imgs_batch),\n mats_dict,\n img_metas_batch,\n gt_boxes_3d_batch,\n gt_labels_3d_batch,\n None, # reserve for segmentation\n ]\n else:\n ret_list = [\n None,\n None,\n img_metas_batch,\n gt_boxes_3d_batch,\n gt_labels_3d_batch,\n None,\n ]\n if is_return_depth:\n ret_list.append(torch.stack(depth_labels_batch))\n else:\n ret_list.append(None)\n if is_return_radar_pv:\n ret_list.append(torch.stack(radar_pv_batch))\n else:\n ret_list.append(None)\n\n return ret_list" }, { "identifier": "DetNuscEvaluator", "path": "evaluators/det_evaluators.py", "snippet": "class DetNuscEvaluator():\n ErrNameMapping = {\n 'trans_err': 'mATE',\n 'scale_err': 'mASE',\n 'orient_err': 'mAOE',\n 'vel_err': 'mAVE',\n 'attr_err': 'mAAE',\n }\n\n DefaultAttribute = {\n 'car': 'vehicle.parked',\n 'pedestrian': 'pedestrian.moving',\n 'trailer': 'vehicle.parked',\n 'truck': 'vehicle.parked',\n 'bus': 'vehicle.moving',\n 'motorcycle': 'cycle.without_rider',\n 'construction_vehicle': 'vehicle.parked',\n 'bicycle': 'cycle.without_rider',\n 'barrier': '',\n 'traffic_cone': '',\n }\n\n def __init__(\n self,\n class_names,\n eval_version='detection_cvpr_2019',\n data_root='./data/nuScenes',\n version='v1.0-trainval',\n modality=dict(use_lidar=False,\n use_camera=True,\n use_radar=True,\n use_map=False,\n use_external=False),\n output_dir=None,\n ) -> None:\n self.eval_version = eval_version\n self.data_root = data_root\n\n # Load config file and deserialize it.\n this_dir = osp.dirname(osp.abspath(__file__))\n with open(osp.join(this_dir, 'configs', '%s.json' % eval_version), 'r') as f:\n data = json.load(f)\n self.eval_detection_configs = DetectionConfig.deserialize(data)\n\n self.version = version\n self.class_names = class_names\n self.modality = modality\n self.output_dir = output_dir\n\n def _evaluate_single(self,\n result_path,\n logger=None,\n metric='bbox',\n result_name='pts_bbox'):\n \"\"\"Evaluation for a single model in nuScenes protocol.\n\n Args:\n result_path (str): Path of the result file.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n metric (str): Metric name used for evaluation. Default: 'bbox'.\n result_name (str): Result name in the metric prefix.\n Default: 'pts_bbox'.\n\n Returns:\n dict: Dictionary of evaluation details.\n \"\"\"\n from nuscenes import NuScenes\n from nuscenes.eval.detection.evaluate import NuScenesEval\n\n output_dir = osp.join(*osp.split(result_path)[:-1])\n nusc = NuScenes(version=self.version,\n dataroot=self.data_root,\n verbose=False)\n eval_set_map = {\n 'v1.0-mini': 'mini_val',\n 'v1.0-trainval': 'val',\n }\n nusc_eval = NuScenesEval(nusc,\n config=self.eval_detection_configs,\n result_path=result_path,\n eval_set=eval_set_map[self.version],\n output_dir=output_dir,\n verbose=False)\n nusc_eval.main(render_curves=False)\n # nusc_eval.main(render_curves=True, plot_examples=40)\n\n # record metrics\n metrics = mmcv.load(osp.join(output_dir, 'metrics_summary.json'))\n detail = dict()\n metric_prefix = f'{result_name}_NuScenes'\n for class_name in self.class_names:\n for k, v in metrics['label_aps'][class_name].items():\n val = float('{:.4f}'.format(v))\n detail['{}/{}_AP_dist_{}'.format(metric_prefix, class_name,\n k)] = val\n for k, v in metrics['label_tp_errors'][class_name].items():\n val = float('{:.4f}'.format(v))\n detail['{}/{}_{}'.format(metric_prefix, class_name, k)] = val\n for k, v in metrics['tp_errors'].items():\n val = float('{:.4f}'.format(v))\n detail['{}/{}'.format(metric_prefix,\n self.ErrNameMapping[k])] = val\n\n detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score']\n detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap']\n return detail\n\n def format_results(self,\n results,\n img_metas,\n result_names=['img_bbox'],\n jsonfile_prefix=None,\n **kwargs):\n \"\"\"Format the results to json (standard format for COCO evaluation).\n\n Args:\n results (list[tuple | numpy.ndarray]): Testing results of the\n dataset.\n jsonfile_prefix (str | None): The prefix of json files. It includes\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n\n Returns:\n tuple: (result_files, tmp_dir), result_files is a dict containing \\\n the json filepaths, tmp_dir is the temporal directory created \\\n for saving json files when jsonfile_prefix is not specified.\n \"\"\"\n assert isinstance(results, list), 'results must be a list'\n\n if jsonfile_prefix is None:\n tmp_dir = tempfile.TemporaryDirectory()\n jsonfile_prefix = osp.join(tmp_dir.name, 'results')\n else:\n tmp_dir = None\n\n # currently the output prediction results could be in two formats\n # 1. list of dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...)\n # 2. list of dict('pts_bbox' or 'img_bbox':\n # dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...))\n # this is a workaround to enable evaluation of both formats on nuScenes\n # refer to https://github.com/open-mmlab/mmdetection3d/issues/449\n # should take the inner dict out of 'pts_bbox' or 'img_bbox' dict\n result_files = dict()\n # refactor this.\n for rasult_name in result_names:\n # not evaluate 2D predictions on nuScenes\n if '2d' in rasult_name:\n continue\n print(f'\\nFormating bboxes of {rasult_name}')\n tmp_file_ = osp.join(jsonfile_prefix, rasult_name)\n if self.output_dir:\n result_files.update({\n rasult_name:\n self._format_bbox(results, img_metas, self.output_dir)\n })\n else:\n result_files.update({\n rasult_name:\n self._format_bbox(results, img_metas, tmp_file_)\n })\n return result_files, tmp_dir\n\n def evaluate(\n self,\n results,\n img_metas,\n metric='bbox',\n logger=None,\n jsonfile_prefix=None,\n result_names=['img_bbox'],\n show=False,\n out_dir=None,\n pipeline=None,\n ):\n \"\"\"Evaluation in nuScenes protocol.\n\n Args:\n results (list[dict]): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n jsonfile_prefix (str | None): The prefix of json files. It includes\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n show (bool): Whether to visualize.\n Default: False.\n out_dir (str): Path to save the visualization results.\n Default: None.\n pipeline (list[dict], optional): raw data loading for showing.\n Default: None.\n\n Returns:\n dict[str, float]: Results of each evaluation metric.\n \"\"\"\n result_files, tmp_dir = self.format_results(results, img_metas,\n result_names,\n jsonfile_prefix)\n if isinstance(result_files, dict):\n for name in result_names:\n print('Evaluating bboxes of {}'.format(name))\n print()\n self._evaluate_single(result_files[name])\n elif isinstance(result_files, str):\n self._evaluate_single(result_files)\n\n if tmp_dir is not None:\n tmp_dir.cleanup()\n\n def _format_bbox(self, results, img_metas, jsonfile_prefix=None):\n \"\"\"Convert the results to the standard format.\n\n Args:\n results (list[dict]): Testing results of the dataset.\n jsonfile_prefix (str): The prefix of the output jsonfile.\n You can specify the output directory/filename by\n modifying the jsonfile_prefix. Default: None.\n\n Returns:\n str: Path of the output json file.\n \"\"\"\n nusc_annos = {}\n mapped_class_names = self.class_names\n\n print('Start to convert detection format...')\n\n for sample_id, det in enumerate(mmcv.track_iter_progress(results)):\n boxes, scores, labels = det\n\n order = np.argsort(scores)[::-1]\n order = order[:500]\n\n boxes = boxes[order]\n scores = scores[order]\n labels = labels[order]\n\n sample_token = img_metas[sample_id]['token']\n trans = np.array(img_metas[sample_id]['ego2global_translation'])\n rot = Quaternion(img_metas[sample_id]['ego2global_rotation'])\n annos = list()\n for i, box in enumerate(boxes):\n name = mapped_class_names[labels[i]]\n center = box[:3]\n wlh = box[[4, 3, 5]]\n box_yaw = box[6]\n box_vel = box[7:].tolist()\n box_vel.append(0)\n quat = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box_yaw)\n nusc_box = Box(center, wlh, quat, velocity=box_vel)\n nusc_box.rotate(rot)\n nusc_box.translate(trans)\n if np.sqrt(nusc_box.velocity[0]**2 +\n nusc_box.velocity[1]**2) > 0.2:\n if name in [\n 'car',\n 'construction_vehicle',\n 'bus',\n 'truck',\n 'trailer',\n ]:\n attr = 'vehicle.moving'\n elif name in ['bicycle', 'motorcycle']:\n attr = 'cycle.with_rider'\n else:\n attr = self.DefaultAttribute[name]\n else:\n if name in ['pedestrian']:\n attr = 'pedestrian.standing'\n elif name in ['bus']:\n attr = 'vehicle.stopped'\n else:\n attr = self.DefaultAttribute[name]\n nusc_anno = dict(\n sample_token=sample_token,\n translation=nusc_box.center.tolist(),\n size=nusc_box.wlh.tolist(),\n rotation=nusc_box.orientation.elements.tolist(),\n velocity=nusc_box.velocity[:2],\n detection_name=name,\n detection_score=float(scores[i]),\n attribute_name=attr,\n )\n annos.append(nusc_anno)\n # other views results of the same frame should be concatenated\n if sample_token in nusc_annos:\n nusc_annos[sample_token].extend(annos)\n else:\n nusc_annos[sample_token] = annos\n nusc_submissions = {\n 'meta': self.modality,\n 'results': nusc_annos,\n }\n mmcv.mkdir_or_exist(jsonfile_prefix)\n res_path = osp.join(jsonfile_prefix, 'results_nusc.json')\n print('Results writes to', res_path)\n mmcv.dump(nusc_submissions, res_path)\n return res_path" }, { "identifier": "BaseBEVDepth", "path": "models/base_bev_depth.py", "snippet": "class BaseBEVDepth(nn.Module):\n \"\"\"Source code of `BEVDepth`, `https://arxiv.org/abs/2112.11790`.\n\n Args:\n backbone_conf (dict): Config of backbone.\n head_conf (dict): Config of head.\n \"\"\"\n\n def __init__(self, backbone_conf, head_conf):\n super(BaseBEVDepth, self).__init__()\n self.backbone_img = BaseLSSFPN(**backbone_conf)\n self.head = BEVDepthHead(**head_conf)\n\n # for inference time measurement\n self.idx = 0\n self.times_dict = {\n 'img': [],\n 'img_backbone': [],\n 'img_dep': [],\n 'img_transform': [],\n 'img_pool': [],\n\n 'head': [],\n 'head_backbone': [],\n 'head_head': [],\n }\n\n def forward(self,\n sweep_imgs,\n mats_dict,\n is_train=False\n ):\n \"\"\"Forward function for BEVDepth\n\n Args:\n sweep_imgs (Tensor): Input images.\n mats_dict(dict):\n sensor2ego_mats(Tensor): Transformation matrix from\n camera to ego with shape of (B, num_sweeps,\n num_cameras, 4, 4).\n intrin_mats(Tensor): Intrinsic matrix with shape\n of (B, num_sweeps, num_cameras, 4, 4).\n ida_mats(Tensor): Transformation matrix for ida with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n sensor2sensor_mats(Tensor): Transformation matrix\n from key frame camera to sweep frame camera with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n bda_mat(Tensor): Rotation matrix for bda with shape\n of (B, 4, 4).\n\n Returns:\n tuple(list[dict]): Output results for tasks.\n \"\"\"\n if is_train:\n self.time = None\n\n x, depth, _ = self.backbone_img(sweep_imgs, mats_dict,\n is_return_depth=True)\n preds, _ = self.head(x)\n return preds, depth\n else:\n if self.idx < 100: # skip few iterations for warmup\n self.times = None\n elif self.idx == 100:\n self.times = self.times_dict\n\n x, self.times = self.backbone_img(sweep_imgs, mats_dict,\n times=self.times)\n preds, self.times = self.head(x, times=self.times)\n\n if self.idx == 1000:\n time_mean = {}\n for k, v in self.times.items():\n time_mean[k] = sum(v) / len(v)\n print('img: %.2f' % time_mean['img'])\n print(' img_backbone: %.2f' % time_mean['img_backbone'])\n print(' img_dep: %.2f' % time_mean['img_dep'])\n print(' img_transform: %.2f' % time_mean['img_transform'])\n print(' img_pool: %.2f' % time_mean['img_pool'])\n print('head: %.2f' % time_mean['head'])\n print(' head_backbone: %.2f' % time_mean['head_backbone'])\n print(' head_head: %.2f' % time_mean['head_head'])\n total = time_mean['img'] + time_mean['head']\n print('total: %.2f' % total)\n print(' ')\n print('FPS: %.2f' % (1000/total))\n\n self.idx += 1\n return preds\n\n def get_targets(self, gt_boxes, gt_labels):\n \"\"\"Generate training targets for a single sample.\n\n Args:\n gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): Ground truth gt boxes.\n gt_labels_3d (torch.Tensor): Labels of boxes.\n\n Returns:\n tuple[list[torch.Tensor]]: Tuple of target including \\\n the following results in order.\n\n - list[torch.Tensor]: Heatmap scores.\n - list[torch.Tensor]: Ground truth boxes.\n - list[torch.Tensor]: Indexes indicating the position \\\n of the valid boxes.\n - list[torch.Tensor]: Masks indicating which boxes \\\n are valid.\n \"\"\"\n return self.head.get_targets(gt_boxes, gt_labels)\n\n def loss(self, targets, preds_dicts):\n \"\"\"Loss function for BEVDepth.\n\n Args:\n gt_bboxes_3d (list[:obj:`LiDARInstance3DBoxes`]): Ground\n truth gt boxes.\n gt_labels_3d (list[torch.Tensor]): Labels of boxes.\n preds_dicts (dict): Output of forward function.\n\n Returns:\n dict[str:torch.Tensor]: Loss of heatmap and bbox of each task.\n \"\"\"\n return self.head.loss(targets, preds_dicts)\n\n def get_bboxes(self, preds_dicts, img_metas=None, img=None, rescale=False):\n \"\"\"Generate bboxes from bbox head predictions.\n\n Args:\n preds_dicts (tuple[list[dict]]): Prediction results.\n img_metas (list[dict]): Point cloud and image's meta info.\n\n Returns:\n list[dict]: Decoded bbox, scores and labels after nms.\n \"\"\"\n return self.head.get_bboxes(preds_dicts, img_metas, img, rescale)" }, { "identifier": "all_gather_object", "path": "utils/torch_dist.py", "snippet": "def all_gather_object(obj):\n world_size = get_world_size()\n if world_size < 2:\n return [obj]\n output = [None for _ in range(world_size)]\n dist.all_gather_object(output, obj)\n return output" }, { "identifier": "synchronize", "path": "utils/torch_dist.py", "snippet": "def synchronize():\n \"\"\"Helper function to synchronize (barrier)\n among all processes when using distributed training\"\"\"\n if not dist.is_available():\n return\n if not dist.is_initialized():\n return\n current_world_size = dist.get_world_size()\n if current_world_size == 1:\n return\n dist.barrier()" } ]
from functools import partial from pytorch_lightning.core import LightningModule from torch.cuda.amp.autocast_mode import autocast from torch.optim.lr_scheduler import MultiStepLR from mmcv.runner import build_optimizer from datasets.nusc_det_dataset import NuscDatasetRadarDet, collate_fn from evaluators.det_evaluators import DetNuscEvaluator from models.base_bev_depth import BaseBEVDepth from utils.torch_dist import all_gather_object, synchronize import mmcv import torch import torch.nn.functional as F import torch.nn.parallel import torch.utils.data import torch.utils.data.distributed import torchvision.models as models
14,786
out_indices=[0, 1, 2, 3], norm_eval=False, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), ), 'img_neck_conf': dict( type='SECONDFPN', in_channels=[256, 512, 1024, 2048], upsample_strides=[0.25, 0.5, 1, 2], out_channels=[128, 128, 128, 128], ), 'depth_net_conf': dict(in_channels=512, mid_channels=512), 'camera_aware': True } CLASSES = [ 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone', ] head_conf = { 'bev_backbone_conf': dict( type='ResNet', in_channels=80, depth=18, num_stages=3, strides=(1, 2, 2), dilations=(1, 1, 1), out_indices=[0, 1, 2], norm_eval=False, base_channels=160), 'bev_neck_conf': dict( type='SECONDFPN', in_channels=[80, 160, 320, 640], upsample_strides=[1, 2, 4, 8], out_channels=[64, 64, 64, 64]), 'tasks': [ dict(num_class=1, class_names=['car']), dict(num_class=2, class_names=['truck', 'construction_vehicle']), dict(num_class=2, class_names=['bus', 'trailer']), dict(num_class=1, class_names=['barrier']), dict(num_class=2, class_names=['motorcycle', 'bicycle']), dict(num_class=2, class_names=['pedestrian', 'traffic_cone']),], 'common_heads': dict( reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2)), 'bbox_coder': dict( type='CenterPointBBoxCoder', post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_num=500, score_threshold=0.1, out_size_factor=4, voxel_size=[0.2, 0.2, 8], pc_range=[-51.2, -51.2, -5, 51.2, 51.2, 3], code_size=9), 'train_cfg': dict( point_cloud_range=[-51.2, -51.2, -5, 51.2, 51.2, 3], grid_size=[512, 512, 1], voxel_size=[0.2, 0.2, 8], out_size_factor=4, dense_reg=1, gaussian_overlap=0.1, max_objs=500, min_radius=2, code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5, 0.5]), 'test_cfg': dict( post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_per_img=500, max_pool_nms=False, min_radius=[4, 12, 10, 1, 0.85, 0.175], score_threshold=0.1, out_size_factor=4, voxel_size=[0.2, 0.2, 8], nms_type='circle', pre_max_size=1000, post_max_size=83, nms_thr=0.2), 'in_channels': 256, # Equal to bev_neck output_channels. 'loss_cls': dict(type='GaussianFocalLoss', reduction='mean'), 'loss_bbox': dict(type='L1Loss', reduction='mean', loss_weight=0.25), 'gaussian_overlap': 0.1, 'min_radius': 2, } class BEVDepthLightningModel(LightningModule): MODEL_NAMES = sorted(name for name in models.__dict__ if name.islower() and not name.startswith('__') and callable(models.__dict__[name])) def __init__(self, gpus: int = 1, data_root='data/nuScenes', eval_interval=1, batch_size_per_device=8, class_names=CLASSES, backbone_img_conf=backbone_img_conf, head_conf=head_conf, ida_aug_conf=ida_aug_conf, bda_aug_conf=bda_aug_conf, rda_aug_conf=rda_aug_conf, default_root_dir='./outputs/', **kwargs): super().__init__() self.save_hyperparameters() self.gpus = gpus self.optimizer_config = optimizer_config self.pretrain_config = pretrain_config self.eval_interval = eval_interval self.batch_size_per_device = batch_size_per_device self.data_root = data_root self.class_names = class_names self.backbone_img_conf = backbone_img_conf self.head_conf = head_conf self.ida_aug_conf = ida_aug_conf self.bda_aug_conf = bda_aug_conf self.rda_aug_conf = rda_aug_conf mmcv.mkdir_or_exist(default_root_dir) self.default_root_dir = default_root_dir self.evaluator = DetNuscEvaluator(class_names=self.class_names, output_dir=self.default_root_dir)
# Copyright (c) Megvii Inc. All rights reserved. pretrain_config = dict( img_model_path=None, img_load_key=[], img_freeze_key=None, pts_model_path=None, pts_load_key=[]) optimizer_config = dict( type='AdamW', lr=2e-4, weight_decay=1e-2) H = 900 W = 1600 final_dim = (256, 704) img_conf = dict(img_mean=[123.675, 116.28, 103.53], img_std=[58.395, 57.12, 57.375], to_rgb=True) ida_aug_conf = { 'resize_lim': (0.386, 0.55), 'final_dim': final_dim, 'rot_lim': (-5.4, 5.4), 'H': 900, 'W': 1600, 'rand_flip': True, 'bot_pct_lim': (0.0, 0.0), 'cams': ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT'], 'Ncams': 6, } bda_aug_conf = { 'rot_ratio': 1.0, 'rot_lim': (-22.5, 22.5), 'scale_lim': (0.95, 1.05), 'flip_dx_ratio': 0.5, 'flip_dy_ratio': 0.5 } rda_aug_conf = { 'N_sweeps': 6, 'N_use': 5, 'drop_ratio': 0.1, } backbone_img_conf = { 'x_bound': [-51.2, 51.2, 0.8], 'y_bound': [-51.2, 51.2, 0.8], 'z_bound': [-5, 3, 8], 'd_bound': [2.0, 58.0, 0.8], 'final_dim': final_dim, 'output_channels': 80, 'downsample_factor': 16, 'img_backbone_conf': dict( type='ResNet', depth=50, frozen_stages=0, out_indices=[0, 1, 2, 3], norm_eval=False, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), ), 'img_neck_conf': dict( type='SECONDFPN', in_channels=[256, 512, 1024, 2048], upsample_strides=[0.25, 0.5, 1, 2], out_channels=[128, 128, 128, 128], ), 'depth_net_conf': dict(in_channels=512, mid_channels=512), 'camera_aware': True } CLASSES = [ 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone', ] head_conf = { 'bev_backbone_conf': dict( type='ResNet', in_channels=80, depth=18, num_stages=3, strides=(1, 2, 2), dilations=(1, 1, 1), out_indices=[0, 1, 2], norm_eval=False, base_channels=160), 'bev_neck_conf': dict( type='SECONDFPN', in_channels=[80, 160, 320, 640], upsample_strides=[1, 2, 4, 8], out_channels=[64, 64, 64, 64]), 'tasks': [ dict(num_class=1, class_names=['car']), dict(num_class=2, class_names=['truck', 'construction_vehicle']), dict(num_class=2, class_names=['bus', 'trailer']), dict(num_class=1, class_names=['barrier']), dict(num_class=2, class_names=['motorcycle', 'bicycle']), dict(num_class=2, class_names=['pedestrian', 'traffic_cone']),], 'common_heads': dict( reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2)), 'bbox_coder': dict( type='CenterPointBBoxCoder', post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_num=500, score_threshold=0.1, out_size_factor=4, voxel_size=[0.2, 0.2, 8], pc_range=[-51.2, -51.2, -5, 51.2, 51.2, 3], code_size=9), 'train_cfg': dict( point_cloud_range=[-51.2, -51.2, -5, 51.2, 51.2, 3], grid_size=[512, 512, 1], voxel_size=[0.2, 0.2, 8], out_size_factor=4, dense_reg=1, gaussian_overlap=0.1, max_objs=500, min_radius=2, code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5, 0.5]), 'test_cfg': dict( post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_per_img=500, max_pool_nms=False, min_radius=[4, 12, 10, 1, 0.85, 0.175], score_threshold=0.1, out_size_factor=4, voxel_size=[0.2, 0.2, 8], nms_type='circle', pre_max_size=1000, post_max_size=83, nms_thr=0.2), 'in_channels': 256, # Equal to bev_neck output_channels. 'loss_cls': dict(type='GaussianFocalLoss', reduction='mean'), 'loss_bbox': dict(type='L1Loss', reduction='mean', loss_weight=0.25), 'gaussian_overlap': 0.1, 'min_radius': 2, } class BEVDepthLightningModel(LightningModule): MODEL_NAMES = sorted(name for name in models.__dict__ if name.islower() and not name.startswith('__') and callable(models.__dict__[name])) def __init__(self, gpus: int = 1, data_root='data/nuScenes', eval_interval=1, batch_size_per_device=8, class_names=CLASSES, backbone_img_conf=backbone_img_conf, head_conf=head_conf, ida_aug_conf=ida_aug_conf, bda_aug_conf=bda_aug_conf, rda_aug_conf=rda_aug_conf, default_root_dir='./outputs/', **kwargs): super().__init__() self.save_hyperparameters() self.gpus = gpus self.optimizer_config = optimizer_config self.pretrain_config = pretrain_config self.eval_interval = eval_interval self.batch_size_per_device = batch_size_per_device self.data_root = data_root self.class_names = class_names self.backbone_img_conf = backbone_img_conf self.head_conf = head_conf self.ida_aug_conf = ida_aug_conf self.bda_aug_conf = bda_aug_conf self.rda_aug_conf = rda_aug_conf mmcv.mkdir_or_exist(default_root_dir) self.default_root_dir = default_root_dir self.evaluator = DetNuscEvaluator(class_names=self.class_names, output_dir=self.default_root_dir)
self.model = BaseBEVDepth(self.backbone_img_conf,
3
2023-12-06 14:57:49+00:00
24k
jinxixiang/magic_animate_unofficial
animatediff/magic_animate/pipeline.py
[ { "identifier": "UNet3DConditionModel", "path": "animatediff/magic_animate/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n\n # Additional\n use_motion_module=False,\n motion_module_resolutions=(1, 2, 4, 8),\n motion_module_mid_block=False,\n motion_module_decoder_only=False,\n motion_module_type=None,\n motion_module_kwargs={},\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n\n # Addition for image embeddings\n use_image_condition=False,\n # Additional for dwpose adapter\n use_dwpose_adapter=False,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n # dwpose condition\n if use_dwpose_adapter:\n self.dwpose_adapter = ControlNetConditioningEmbedding(conditioning_embedding_channels=4) # pose guider net\n else:\n self.dwpose_adapter = None\n\n self.use_image_condition = False\n if use_image_condition:\n self.use_image_condition = True\n self.image_proj_model = Resampler(\n dim=cross_attention_dim,\n depth=4,\n dim_head=64,\n heads=12,\n num_queries=16,\n embedding_dim=1024,\n output_dim=cross_attention_dim,\n ff_mult=4,\n )\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (\n not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n\n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n # for controlnet\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n # for pose_guider\n dwpose_conditions: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2 ** self.num_upsamplers\n\n # if self.use_image_condition:\n # # project global image to 16 tokens for cross-attention\n # encoder_hidden_states = self.image_proj(encoder_hidden_states)\n # encoder_hidden_states = encoder_hidden_states.reshape(-1, 16, 768)\n # encoder_hidden_states = self.image_norm(encoder_hidden_states)\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # add pose conditions\n if dwpose_conditions is not None:\n conditions = self.dwpose_adapter(dwpose_conditions)\n sample += conditions\n\n # pre-process\n sample = self.conv_in(sample)\n\n # down\n is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None\n\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb,\n encoder_hidden_states=encoder_hidden_states)\n\n down_block_res_samples += res_samples\n\n if is_controlnet:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n if is_controlnet:\n sample = sample + mid_block_additional_residual\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets):]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size,\n encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n # config[\"mid_block_type\"] = \"UNetMidBlock3DCrossAttn\"\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n\n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n\n return model" }, { "identifier": "ControlNetModel", "path": "animatediff/magic_animate/controlnet.py", "snippet": "class ControlNetModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n in_channels: int = 4,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"DownBlock2D\",\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n projection_class_embeddings_input_dim: Optional[int] = None,\n controlnet_conditioning_channel_order: str = \"rgb\",\n conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),\n ):\n super().__init__()\n\n # Check inputs\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n time_embed_dim = block_out_channels[0] * 4\n\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n )\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n elif class_embed_type == \"projection\":\n if projection_class_embeddings_input_dim is None:\n raise ValueError(\n \"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set\"\n )\n # The projection `class_embed_type` is the same as the timestep `class_embed_type` except\n # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings\n # 2. it projects from an arbitrary input dimension.\n #\n # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.\n # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.\n # As a result, `TimestepEmbedding` can be passed arbitrary vectors.\n self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n # control net conditioning embedding\n self.controlnet_cond_embedding = ControlNetConditioningEmbedding(\n conditioning_embedding_channels=block_out_channels[0],\n block_out_channels=conditioning_embedding_out_channels,\n )\n\n self.down_blocks = nn.ModuleList([])\n self.controlnet_down_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=attention_head_dim[i],\n downsample_padding=downsample_padding,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n self.down_blocks.append(down_block)\n\n for _ in range(layers_per_block):\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n if not is_final_block:\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n # mid\n mid_block_channel = block_out_channels[-1]\n\n controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_mid_block = controlnet_block\n\n self.mid_block = UNetMidBlock2DCrossAttn(\n in_channels=mid_block_channel,\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n )\n\n @classmethod\n def from_unet(\n cls,\n unet: UNet2DConditionModel,\n controlnet_conditioning_channel_order: str = \"rgb\",\n conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),\n load_weights_from_unet: bool = True,\n ):\n r\"\"\"\n Instantiate Controlnet class from UNet2DConditionModel.\n\n Parameters:\n unet (`UNet2DConditionModel`):\n UNet model which weights are copied to the ControlNet. Note that all configuration options are also\n copied where applicable.\n \"\"\"\n controlnet = cls(\n in_channels=unet.config.in_channels,\n flip_sin_to_cos=unet.config.flip_sin_to_cos,\n freq_shift=unet.config.freq_shift,\n down_block_types=unet.config.down_block_types,\n only_cross_attention=unet.config.only_cross_attention,\n block_out_channels=unet.config.block_out_channels,\n layers_per_block=unet.config.layers_per_block,\n downsample_padding=unet.config.downsample_padding,\n mid_block_scale_factor=unet.config.mid_block_scale_factor,\n act_fn=unet.config.act_fn,\n norm_num_groups=unet.config.norm_num_groups,\n norm_eps=unet.config.norm_eps,\n cross_attention_dim=unet.config.cross_attention_dim,\n attention_head_dim=unet.config.attention_head_dim,\n use_linear_projection=unet.config.use_linear_projection,\n class_embed_type=unet.config.class_embed_type,\n num_class_embeds=unet.config.num_class_embeds,\n upcast_attention=unet.config.upcast_attention,\n resnet_time_scale_shift=unet.config.resnet_time_scale_shift,\n projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim,\n controlnet_conditioning_channel_order=controlnet_conditioning_channel_order,\n conditioning_embedding_out_channels=conditioning_embedding_out_channels,\n )\n\n if load_weights_from_unet:\n controlnet.conv_in.load_state_dict(unet.conv_in.state_dict())\n controlnet.time_proj.load_state_dict(unet.time_proj.state_dict())\n controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict())\n\n if controlnet.class_embedding:\n controlnet.class_embedding.load_state_dict(unet.class_embedding.state_dict())\n\n controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict())\n controlnet.mid_block.load_state_dict(unet.mid_block.state_dict())\n\n return controlnet\n\n # @property\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors\n # def attn_processors(self) -> Dict[str, AttentionProcessor]:\n # r\"\"\"\n # Returns:\n # `dict` of attention processors: A dictionary containing all attention processors used in the model with\n # indexed by its weight name.\n # \"\"\"\n # # set recursively\n # processors = {}\n\n # def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):\n # if hasattr(module, \"set_processor\"):\n # processors[f\"{name}.processor\"] = module.processor\n\n # for sub_name, child in module.named_children():\n # fn_recursive_add_processors(f\"{name}.{sub_name}\", child, processors)\n\n # return processors\n\n # for name, module in self.named_children():\n # fn_recursive_add_processors(name, module, processors)\n\n # return processors\n\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor\n # def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):\n # r\"\"\"\n # Parameters:\n # `processor (`dict` of `AttentionProcessor` or `AttentionProcessor`):\n # The instantiated processor class or a dictionary of processor classes that will be set as the processor\n # of **all** `Attention` layers.\n # In case `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors.:\n\n # \"\"\"\n # count = len(self.attn_processors.keys())\n\n # if isinstance(processor, dict) and len(processor) != count:\n # raise ValueError(\n # f\"A dict of processors was passed, but the number of processors {len(processor)} does not match the\"\n # f\" number of attention layers: {count}. Please make sure to pass {count} processor classes.\"\n # )\n\n # def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):\n # if hasattr(module, \"set_processor\"):\n # if not isinstance(processor, dict):\n # module.set_processor(processor)\n # else:\n # module.set_processor(processor.pop(f\"{name}.processor\"))\n\n # for sub_name, child in module.named_children():\n # fn_recursive_attn_processor(f\"{name}.{sub_name}\", child, processor)\n\n # for name, module in self.named_children():\n # fn_recursive_attn_processor(name, module, processor)\n\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor\n # def set_default_attn_processor(self):\n # \"\"\"\n # Disables custom attention processors and sets the default attention implementation.\n # \"\"\"\n # self.set_attn_processor(AttnProcessor())\n\n # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attention_slice\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maximum amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_sliceable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_sliceable_dims(module)\n\n num_sliceable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_sliceable_layers * [1]\n\n slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n controlnet_cond: torch.FloatTensor,\n conditioning_scale: float = 1.0,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n return_dict: bool = True,\n ) -> Union[ControlNetOutput, Tuple]:\n # check channel order\n channel_order = self.config.controlnet_conditioning_channel_order\n\n if channel_order == \"rgb\":\n # in rgb order by default\n ...\n elif channel_order == \"bgr\":\n controlnet_cond = torch.flip(controlnet_cond, dims=[1])\n else:\n raise ValueError(f\"unknown `controlnet_conditioning_channel_order`: {channel_order}\")\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n\n emb = self.time_embedding(t_emb, timestep_cond)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # 2. pre-process\n sample = self.conv_in(sample)\n\n controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)\n\n sample += controlnet_cond\n\n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n # cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\n\n down_block_res_samples += res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n # cross_attention_kwargs=cross_attention_kwargs,\n )\n\n # 5. Control net blocks\n\n controlnet_down_block_res_samples = ()\n\n for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):\n down_block_res_sample = controlnet_block(down_block_res_sample)\n controlnet_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = controlnet_down_block_res_samples\n\n mid_block_res_sample = self.controlnet_mid_block(sample)\n\n # 6. scaling\n down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]\n mid_block_res_sample *= conditioning_scale\n\n if not return_dict:\n return (down_block_res_samples, mid_block_res_sample)\n\n return ControlNetOutput(\n down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample\n )" }, { "identifier": "ReferenceAttentionControl", "path": "animatediff/magic_animate/mutual_self_attention.py", "snippet": "class ReferenceAttentionControl():\n \n def __init__(self, \n unet,\n mode=\"write\",\n do_classifier_free_guidance=False,\n attention_auto_machine_weight = float('inf'),\n gn_auto_machine_weight = 1.0,\n style_fidelity = 1.0,\n reference_attn=True,\n reference_adain=False,\n fusion_blocks=\"midup\",\n batch_size=1,\n clip_length=8,\n is_image=False,\n ) -> None:\n # 10. Modify self attention and group norm\n self.unet = unet\n assert mode in [\"read\", \"write\"]\n assert fusion_blocks in [\"midup\", \"full\"]\n self.reference_attn = reference_attn\n self.reference_adain = reference_adain\n self.fusion_blocks = fusion_blocks\n self.register_reference_hooks(\n mode, \n do_classifier_free_guidance,\n clip_length,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n fusion_blocks=fusion_blocks,\n batch_size=batch_size,\n is_image=is_image,\n )\n\n def register_reference_hooks(\n self, \n mode, \n do_classifier_free_guidance,\n clip_length,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n dtype=torch.float16,\n batch_size=1, \n num_images_per_prompt=1, \n device=torch.device(\"cpu\"), \n fusion_blocks='midup',\n is_image=False,\n ):\n MODE = mode\n do_classifier_free_guidance = do_classifier_free_guidance\n attention_auto_machine_weight = attention_auto_machine_weight\n gn_auto_machine_weight = gn_auto_machine_weight\n style_fidelity = style_fidelity\n reference_attn = reference_attn\n reference_adain = reference_adain\n fusion_blocks = fusion_blocks\n num_images_per_prompt = num_images_per_prompt\n dtype=dtype\n if do_classifier_free_guidance:\n # uc_mask = (\n # torch.Tensor([1] * batch_size * num_images_per_prompt * 16 + [0] * batch_size * num_images_per_prompt * 16)\n # .to(device)\n # .bool()\n # )\n\n uc_mask = (\n torch.Tensor(\n [1] * batch_size * num_images_per_prompt * clip_length + [0] * batch_size * num_images_per_prompt * clip_length)\n .to(device)\n .bool()\n )\n\n else:\n uc_mask = (\n torch.Tensor([0] * batch_size * num_images_per_prompt * 2)\n .to(device)\n .bool()\n )\n \n def hacked_basic_transformer_inner_forward(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n timestep: Optional[torch.LongTensor] = None,\n cross_attention_kwargs: Dict[str, Any] = None,\n class_labels: Optional[torch.LongTensor] = None,\n video_length=None,\n ):\n if self.use_ada_layer_norm:\n norm_hidden_states = self.norm1(hidden_states, timestep)\n elif self.use_ada_layer_norm_zero:\n norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(\n hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype\n )\n else:\n norm_hidden_states = self.norm1(hidden_states)\n\n # 1. Self-Attention\n cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}\n if self.only_cross_attention:\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n else:\n if MODE == \"write\":\n self.bank.append(norm_hidden_states.clone())\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n if MODE == \"read\":\n if not is_image:\n self.bank = [rearrange(d.unsqueeze(1).repeat(1, video_length, 1, 1), \"b t l c -> (b t) l c\")[:hidden_states.shape[0]] for d in self.bank]\n\n hidden_states_uc = self.attn1(norm_hidden_states,\n encoder_hidden_states=torch.cat([norm_hidden_states] + self.bank, dim=1),\n attention_mask=attention_mask) + hidden_states\n hidden_states_c = hidden_states_uc.clone()\n _uc_mask = uc_mask.clone()\n if do_classifier_free_guidance:\n if hidden_states.shape[0] != _uc_mask.shape[0]:\n _uc_mask = (\n torch.Tensor([1] * (hidden_states.shape[0]//2) + [0] * (hidden_states.shape[0]//2))\n .to(device)\n .bool()\n )\n hidden_states_c[_uc_mask] = self.attn1(\n norm_hidden_states[_uc_mask],\n encoder_hidden_states=norm_hidden_states[_uc_mask],\n attention_mask=attention_mask,\n ) + hidden_states[_uc_mask]\n hidden_states = hidden_states_c.clone()\n \n self.bank.clear()\n if self.attn2 is not None:\n # Cross-Attention\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n hidden_states = (\n self.attn2(\n norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n + hidden_states\n )\n\n # Feed-forward\n hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states\n\n # Temporal-Attention\n if not is_image:\n if self.unet_use_temporal_attention:\n d = hidden_states.shape[1]\n hidden_states = rearrange(hidden_states, \"(b f) d c -> (b d) f c\", f=video_length)\n norm_hidden_states = (\n self.norm_temp(hidden_states, timestep) if self.use_ada_layer_norm else self.norm_temp(hidden_states)\n )\n hidden_states = self.attn_temp(norm_hidden_states) + hidden_states\n hidden_states = rearrange(hidden_states, \"(b d) f c -> (b f) d c\", d=d)\n\n return hidden_states\n \n if self.use_ada_layer_norm_zero:\n attn_output = gate_msa.unsqueeze(1) * attn_output\n hidden_states = attn_output + hidden_states\n\n if self.attn2 is not None:\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n\n # 2. Cross-Attention\n attn_output = self.attn2(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n **cross_attention_kwargs,\n )\n hidden_states = attn_output + hidden_states\n\n # 3. Feed-forward\n norm_hidden_states = self.norm3(hidden_states)\n\n if self.use_ada_layer_norm_zero:\n norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]\n\n ff_output = self.ff(norm_hidden_states)\n\n if self.use_ada_layer_norm_zero:\n ff_output = gate_mlp.unsqueeze(1) * ff_output\n\n hidden_states = ff_output + hidden_states\n\n return hidden_states\n\n def hacked_mid_forward(self, *args, **kwargs):\n eps = 1e-6\n x = self.original_forward(*args, **kwargs)\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append(mean)\n self.var_bank.append(var)\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank) / float(len(self.mean_bank))\n var_acc = sum(self.var_bank) / float(len(self.var_bank))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n x_uc = (((x - mean) / std) * std_acc) + mean_acc\n x_c = x_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n x_c[uc_mask] = x[uc_mask]\n x = style_fidelity * x_c + (1.0 - style_fidelity) * x_uc\n self.mean_bank = []\n self.var_bank = []\n return x\n\n def hack_CrossAttnDownBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n\n # TODO(Patrick, William) - attention mask is not used\n output_states = ()\n\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_DownBlock2D_forward(self, hidden_states, temb=None):\n eps = 1e-6\n\n output_states = ()\n\n for i, resnet in enumerate(self.resnets):\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_CrossAttnUpBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n # TODO(Patrick, William) - attention mask is not used\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n def hacked_UpBlock2D_forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):\n eps = 1e-6\n for i, resnet in enumerate(self.resnets):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)] \n attn_modules = sorted(attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n\n for i, module in enumerate(attn_modules):\n module._original_inner_forward = module.forward\n module.forward = hacked_basic_transformer_inner_forward.__get__(module, BasicTransformerBlock)\n module.bank = []\n module.attn_weight = float(i) / float(len(attn_modules))\n\n if self.reference_adain:\n gn_modules = [self.unet.mid_block]\n self.unet.mid_block.gn_weight = 0\n\n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n module.gn_weight = 1.0 - float(w) / float(len(down_blocks))\n gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n module.gn_weight = float(w) / float(len(up_blocks))\n gn_modules.append(module)\n\n for i, module in enumerate(gn_modules):\n if getattr(module, \"original_forward\", None) is None:\n module.original_forward = module.forward\n if i == 0:\n # mid_block\n module.forward = hacked_mid_forward.__get__(module, torch.nn.Module)\n elif isinstance(module, CrossAttnDownBlock2D):\n module.forward = hack_CrossAttnDownBlock2D_forward.__get__(module, CrossAttnDownBlock2D)\n elif isinstance(module, DownBlock2D):\n module.forward = hacked_DownBlock2D_forward.__get__(module, DownBlock2D)\n elif isinstance(module, CrossAttnUpBlock2D):\n module.forward = hacked_CrossAttnUpBlock2D_forward.__get__(module, CrossAttnUpBlock2D)\n elif isinstance(module, UpBlock2D):\n module.forward = hacked_UpBlock2D_forward.__get__(module, UpBlock2D)\n module.mean_bank = []\n module.var_bank = []\n module.gn_weight *= 2\n \n def update(self, writer, dtype=torch.float16):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in (torch_dfs(writer.unet.mid_block)+torch_dfs(writer.unet.up_blocks)) if isinstance(module, BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in torch_dfs(writer.unet) if isinstance(module, BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0]) \n writer_attn_modules = sorted(writer_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r, w in zip(reader_attn_modules, writer_attn_modules):\n r.bank = [v.clone().to(dtype) for v in w.bank]\n # w.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n writer_gn_modules = [writer.unet.mid_block]\n \n down_blocks = writer.unet.down_blocks\n for w, module in enumerate(down_blocks):\n writer_gn_modules.append(module)\n\n up_blocks = writer.unet.up_blocks\n for w, module in enumerate(up_blocks):\n writer_gn_modules.append(module)\n \n for r, w in zip(reader_gn_modules, writer_gn_modules):\n if len(w.mean_bank) > 0 and isinstance(w.mean_bank[0], list):\n r.mean_bank = [[v.clone().to(dtype) for v in vl] for vl in w.mean_bank]\n r.var_bank = [[v.clone().to(dtype) for v in vl] for vl in w.var_bank]\n else:\n r.mean_bank = [v.clone().to(dtype) for v in w.mean_bank]\n r.var_bank = [v.clone().to(dtype) for v in w.var_bank]\n \n def clear(self):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r in reader_attn_modules:\n r.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n for r in reader_gn_modules:\n r.mean_bank.clear()\n r.var_bank.clear()" }, { "identifier": "get_context_scheduler", "path": "animatediff/magic_animate/context.py", "snippet": "def get_context_scheduler(name: str) -> Callable:\n if name == \"uniform\":\n return uniform\n else:\n raise ValueError(f\"Unknown context_overlap policy {name}\")" }, { "identifier": "get_total_steps", "path": "animatediff/magic_animate/context.py", "snippet": "def get_total_steps(\n scheduler,\n timesteps: List[int],\n num_steps: Optional[int] = None,\n num_frames: int = ...,\n context_size: Optional[int] = None,\n context_stride: int = 3,\n context_overlap: int = 4,\n closed_loop: bool = True,\n):\n return sum(\n len(\n list(\n scheduler(\n i,\n num_steps,\n num_frames,\n context_size,\n context_stride,\n context_overlap,\n )\n )\n )\n for i in range(len(timesteps))\n )" }, { "identifier": "get_tensor_interpolation_method", "path": "animatediff/utils/util.py", "snippet": "def get_tensor_interpolation_method():\n return tensor_interpolation" } ]
import inspect, math import numpy as np import torch import torch.distributed as dist import einops from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from animatediff.magic_animate.unet_controlnet import UNet3DConditionModel from animatediff.magic_animate.controlnet import ControlNetModel from animatediff.magic_animate.mutual_self_attention import ReferenceAttentionControl from animatediff.magic_animate.context import ( get_context_scheduler, get_total_steps ) from animatediff.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
17,193
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer,
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer,
unet: UNet3DConditionModel,
0
2023-12-12 00:16:39+00:00
24k
qitan/devops-backend-lite
common/ext_fun.py
[ { "identifier": "generate_docu", "path": "common/utils/ElasticSearchAPI.py", "snippet": "def generate_docu(table, index_version=None):\n index_name = f\"{table.name}-{index_version}\" if index_version else table.name\n _tbindex = Index(index_name)\n _tbindex.analyzer(my_normalizer)\n _tbindex.settings(number_of_shards=3, number_of_replicas=1)\n _fields = Mapping().generate_data_mapping(table)\n docu = type(index_name, (CustomDocument,), _fields)\n return _tbindex.document(docu)" }, { "identifier": "Search", "path": "common/utils/ElasticSearchAPI.py", "snippet": "class Search(BaseSearch):\n def __init__(self, prefix=False, **kwargs):\n if kwargs.get('index', None) and prefix:\n if isinstance(kwargs['index'], string_types):\n kwargs['index'] = f\"{ELASTICSEARCH_PREFIX}{kwargs['index']}\"\n elif isinstance(kwargs['index'], list):\n kwargs['index'] = [\n f\"{ELASTICSEARCH_PREFIX}{i}\" for i in kwargs['index']]\n elif isinstance(kwargs['index'], tuple):\n kwargs['index'] = tuple(\n f\"{ELASTICSEARCH_PREFIX}{i}\" for i in kwargs['index'])\n else:\n raise Exception('索引名称格式错误!')\n super(Search, self).__init__(**kwargs)" }, { "identifier": "GitLabAPI", "path": "common/utils/GitLabAPI.py", "snippet": "class GitLabAPI(object):\n def __init__(self, url, user=None, password=None, token=None, oauth=False):\n self.__url = url\n if token:\n self.__token = token\n if oauth:\n params = {'oauth_token': self.__token}\n else:\n params = {'private_token': self.__token}\n self.__gl = gitlab.Gitlab(self.__url, **params)\n else:\n self.__gl = gitlab.Gitlab(\n self.__url, http_username=user, http_password=password)\n self.__gl.auth()\n\n def get_gl(self):\n return self.__gl\n\n def list_projects(self, get_all=False, key=None, per_page=20, page=1):\n params = {'per_page': per_page, 'page': page}\n if get_all:\n params = {'get_all': True, 'per_page': per_page}\n if key:\n params['search'] = key\n projects = self.__gl.projects.list(**params)\n return projects\n\n def get_project(self, project_id=None, project_name_with_namespace=None):\n if any([project_id, project_name_with_namespace]) is False:\n raise Exception('缺少参数,project_id或project_name_with_namespace必选其一.')\n condition = project_id or project_name_with_namespace\n try:\n project = self.__gl.projects.get(condition)\n return project\n except BaseException as e:\n logger.info(e)\n return None\n\n def create_project(self, name, namespace_id=None, initialize_with_readme=False):\n payload = {'name': name, 'path': name,\n 'initialize_with_readme': initialize_with_readme}\n if namespace_id:\n payload['namespace_id'] = namespace_id\n try:\n ret = self.__gl.projects.create(payload)\n return True, ret\n except BaseException as e:\n logger.exception(f'创建分支请求异常,原因:{e.__dict__}')\n return False, e\n\n def get_commit(self, commit_id, project_id=None, project_name_with_namespace=None):\n try:\n commit = self.get_project(\n project_id, project_name_with_namespace).get(commit_id)\n return commit\n except BaseException as e:\n logger.info(e)\n return None\n\n def list_groups(self, get_all=False, key=None, per_page=20, page=1):\n params = {'per_page': per_page, 'page': page}\n if get_all:\n params = {'get_all': True, 'all': True, 'per_page': per_page}\n if key:\n params['search'] = key\n groups = self.__gl.groups.list(**params)\n return [{'id': i.id, 'name': i.name, 'description': i.description} for i in groups if not i.parent_id]\n\n def create_group(self, name, path=None, desc=None, parent=None):\n \"\"\"\n 创建组\n \"\"\"\n payload = {'name': name, 'path': path or name,\n 'description': desc or ''}\n if parent:\n payload['parent_id'] = parent\n try:\n group = self.__gl.groups.create(payload)\n return True, group\n except BaseException as e:\n logger.info(e)\n return False, e\n\n def create_branch(self, project, src_branch, target_branch):\n payload = {'branch': target_branch,\n 'ref': src_branch}\n if isinstance(project, (int,)):\n project = self.get_project(project)\n try:\n ret = project.branches.create(payload)\n return True, ret\n except BaseException as e:\n logger.exception(f'创建分支请求异常,原因:{e.__dict__}')\n return False, e\n\n def list_branches(self, project_id=None, project_name_with_namespace=None, get_all=False, key=None, per_page=20,\n page=1, protected='0', *args, **kwargs):\n params = {'per_page': per_page, 'page': page}\n if not protected:\n protected = '0'\n if get_all:\n params = {'get_all': True, 'per_page': per_page}\n if key:\n params['search'] = key\n params.update(kwargs)\n branches = self.get_project(project_id=project_id,\n project_name_with_namespace=project_name_with_namespace).branches.list(**params)\n branches = [{'uid': f\"{G_COMMIT[0][0]}:{i.name}\", 'name': i.name, 'commit': i.commit, 'label': G_COMMIT[0][0], 'protected': i.protected}\n for i in branches]\n if protected != '0':\n # 过滤受保护分支\n _map = {'1': True, '2': False}\n branches = [i for i in branches if i['protected']\n == _map[protected]]\n return branches\n\n def list_protected_branches(self, project_id=None, project_name_with_namespace=None, get_all=False, key=None, per_page=20,\n page=1, *args, **kwargs):\n params = {'per_page': per_page, 'page': page}\n if get_all:\n params = {'get_all': True, 'per_page': per_page}\n if key:\n params['search'] = key\n params.update(kwargs)\n branches = self.get_project(project_id=project_id,\n project_name_with_namespace=project_name_with_namespace).protectedbranches.list(**params)\n branches = [{'uid': f\"{G_COMMIT[0][0]}:{i.name}\", 'name': i.name, 'commit': i.commit, 'label': G_COMMIT[0][0], 'protected': i.protected}\n for i in branches]\n return branches\n\n def list_tags(self, project_id=None, project_name_with_namespace=None, get_all=False, key=None, per_page=20,\n page=1):\n params = {'per_page': per_page, 'page': page}\n if get_all:\n params = {'get_all': True, 'per_page': per_page}\n if key:\n params['search'] = key\n tags = self.get_project(\n project_id, project_name_with_namespace).tags.list(**params)\n tags = [{'uid': f\"{G_COMMIT[1][0]}:{i.name}\", 'name': i.name, 'message': i.message, 'commit': i.commit,\n 'label': G_COMMIT[1][0]} for i in tags]\n return tags\n\n def list_commits(self, project_id=None, project_name_with_namespace=None, get_all=False, key=None, per_page=20,\n page=1, ref_name=None, since=None):\n params = {'per_page': per_page, 'page': page}\n if get_all:\n params = {'get_all': True, 'per_page': per_page}\n if key:\n params['search'] = key\n if ref_name:\n params['ref_name'] = ref_name\n if since:\n params['since'] = since\n commits = self.get_project(\n project_id, project_name_with_namespace).commits.list(**params)\n commits = [\n {'title': i.title, 'short_id': i.short_id, 'author_name': i.author_name, 'committer_name': i.committer_name,\n 'committed_date': i.committed_date, 'message': i.message, 'web_url': i.web_url} for i in commits]\n return commits\n\n def repo_checkout(self, repo):\n import subprocess\n git_url = repo.split('//')\n subprocess.call(\n ['git', 'clone', f\"{git_url[0]}//oauth2:{self.__token}@{git_url[1]}\"])\n\n def get_user_id(self, username):\n user_list = self.__gl.users.list(username=username)\n if user_list:\n return user_list[0].id\n else:\n return None\n\n def get_project_from_name(self, project_name):\n projects = self.__gl.projects.list(search=project_name)\n for p in projects:\n if p.name == project_name:\n return p\n return None\n\n def add_project_member(self, project, user_id, access_level):\n try:\n project.members.create(\n {'user_id': user_id, 'access_level': access_level})\n return True, '成功'\n except Exception as error:\n return False, error\n\n def del_project_member(self, project, user_id):\n try:\n project.members.delete(user_id)\n return True, '成功'\n except Exception as error:\n return False, error" }, { "identifier": "HarborAPI", "path": "common/utils/HarborAPI.py", "snippet": "class HarborAPI(object):\n def __init__(self, url, username, password):\n self.__url = url.rstrip('/')\n self.__user = username\n self.__password = password\n self.__token = base64.b64encode(\n bytes('%s:%s' % (self.__user, self.__password), encoding='utf-8'))\n self.__headers = dict()\n self.__headers[\"Accept\"] = \"application/json\"\n self.__headers['authorization'] = 'Basic %s' % str(\n self.__token, encoding='utf-8')\n\n def request(self, method, obj=None, prefix='/'):\n try:\n if method == 'get':\n req = requests.request(method, '%s%s' % (self.__url, prefix), params=obj, headers=self.__headers,\n verify=False)\n if req.status_code > 399:\n return {'ecode': req.status_code, 'message': f'{req.content}\\n{req.reason}'}\n res = {'ecode': req.status_code, 'data': req.json(), 'count': req.headers.get('X-Total-Count', None),\n 'next': req.headers.get('Link', None)}\n if method == 'delete':\n req = requests.request(method, '%s%s' % (\n self.__url, prefix), headers=self.__headers, verify=False)\n if req.status_code > 399:\n return {'ecode': req.status_code, 'message': f'{req.content}\\n{req.reason}'}\n res = {'ecode': req.status_code, 'data': req.content}\n if method in ['put', 'post']:\n req = requests.request(method, '%s%s' % (self.__url, prefix), json=obj, headers=self.__headers,\n verify=False)\n if req.status_code > 399:\n return {'ecode': req.status_code, 'message': f'{req.content}\\n{req.reason}'}\n res = {'ecode': req.status_code, 'data': req.content}\n if method == 'head':\n req = requests.request(method, '%s%s' % (\n self.__url, prefix), headers=self.__headers, verify=False)\n if req.status_code > 399:\n return {'ecode': req.status_code, 'message': f'{req.content}\\n{req.reason}'}\n res = {'ecode': req.status_code, 'data': req.content}\n except BaseException as e:\n raise e\n return res\n\n def systeminfo(self):\n res = self.request('get', prefix='/systeminfo')\n return res\n\n def get_users(self):\n res = self.request('get', prefix='/users')\n return res\n\n def get_projects(self, project_name=None, page=1, page_size=20):\n \"\"\"\n :project_name: The name of project\n :page: default is 1.\n :page_size: default is 10, maximum is 100.\n \"\"\"\n params = {'page': page, 'page_size': page_size}\n if project_name:\n params['name'] = project_name\n try:\n res = self.request('get', params, prefix='/projects')\n return res\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def get_repositories(self, project_id, page=1, page_size=20, repo=None):\n params = {'project_id': project_id,\n 'page': page, 'page_size': page_size}\n if repo:\n params['q'] = repo\n try:\n res = self.request('get', params, '/repositories')\n return res\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def get_tags(self, repo):\n try:\n res = self.request('get', prefix='/repositories/%s/tags' % repo)\n tags = [\n {'name': i['name'], 'created': i['created'], 'push_time': i.get(\n 'push_time', None), 'size': i['size']}\n for i in\n res['data']]\n tags.sort(key=lambda k: (k.get('created')), reverse=True)\n return {'ecode': 200, 'data': tags, 'count': len(tags)}\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def fetch_project(self, project_id):\n \"\"\"\n 获取项目信息\n \"\"\"\n try:\n res = self.request(\n 'get', {'project_id': project_id}, prefix=f'/projects/{project_id}')\n return res\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def fetch_tag(self, repo, tag):\n \"\"\"\n 获取指定镜像标签\n \"\"\"\n try:\n res = self.request(\n 'get', prefix=f'/repositories/{repo}/tags/{tag}')\n return res\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def create_project(self, project_name, public=True):\n \"\"\"\n 创建仓库项目\n \"\"\"\n try:\n data = {'project_name': project_name, 'metadata': {\n 'public': 'true' if public else 'false'}}\n res = self.request('post', obj=data, prefix='/projects')\n return res\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def update_project(self, project_id, *args, **kwargs):\n \"\"\"\n 更新仓库项目\n \"\"\"\n try:\n res = self.request('put', obj=kwargs,\n prefix=f'/projects/{project_id}')\n return res\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def project_exists(self, project_name):\n \"\"\"\n 查询项目是否存在\n \"\"\"\n try:\n res = self.request(\n 'head', prefix=f'/projects?project_name={project_name}')\n return res\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def patch_tag(self, repo, src_image, tag_name):\n \"\"\"\n 镜像打标签\n \"\"\"\n try:\n try:\n # 创建仓库项目\n res = self.create_project(repo.split('/')[0])\n except BaseException as e:\n pass\n data = {'tag': tag_name, 'src_image': src_image, 'override': True}\n res = self.request(\n 'post', obj=data, prefix='/repositories/%s/tags' % repo)\n return res\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def delete_tag(self, repo, tag):\n \"\"\"\n 删除标签\n \"\"\"\n try:\n res = self.request(\n 'delete', prefix=f'/repositories/{repo}/tags/{tag}')\n return res\n except BaseException as e:\n logger.ex\n return {'ecode': 500, 'message': e}\n\n def search(self, query):\n \"\"\"\n 搜索\n \"\"\"\n try:\n res = self.request('get', {'q': query}, prefix='/search')\n return res\n except BaseException as e:\n logger.exception(e)\n return {'ecode': 500, 'message': e}" }, { "identifier": "GlueJenkins", "path": "common/utils/JenkinsAPI.py", "snippet": "class GlueJenkins(Jenkins):\n\n def __init__(self, url=None, username=None, password=None):\n self.__url = url\n self.__username = username\n self.__password = password\n super(GlueJenkins, self).__init__(\n self.__url, self.__username, self.__password)\n\n def _get_encoded_params(self, params):\n for k, v in params.items():\n if k in [\"name\", \"msg\", \"short_name\", \"from_short_name\",\n \"to_short_name\", \"folder_url\", \"from_folder_url\", \"to_folder_url\"]:\n params[k] = quote(v.encode('utf8'))\n return params\n\n def _build_url(self, format_spec, variables=None):\n\n if variables:\n url_path = format_spec % self._get_encoded_params(variables)\n else:\n url_path = format_spec\n return str(urljoin(self.server, url_path))\n\n def assert_credential_exists(self, name, folder_name=None, domain_name='_',\n exception_message='credential[%s] does not exist.'):\n '''Raise an exception if credential does not exist in domain of folder\n\n :param name: Name of credential, ``str``\n :param folder_name: Folder name, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :param exception_message: Message to use for the exception.\n Formatted with ``name``, ``domain_name``,\n and ``folder_name``\n :throws: :class:`JenkinsException` whenever the credentail\n does not exist in domain of folder\n '''\n if not self.credential_exists(name, folder_name, domain_name):\n raise JenkinsException(exception_message\n % name)\n\n def get_credential_global_config(self, name, domain_name='_'):\n '''Get configuration of credential in domain of folder.\n :param name: Name of credentail, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :returns: Credential configuration (XML format)\n '''\n return self.jenkins_open(requests.Request(\n 'GET', self._build_url(CONFIG_CREDENTIAL_GLOBAL, locals())\n ))\n\n def get_credential_info(self, name, folder_name=None, domain_name='_'):\n '''Get credential information dictionary in domain of folder\n\n :param name: Name of credentail, ``str``\n :param folder_name: folder_name, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :returns: Dictionary of credential info, ``dict``\n '''\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(CREDENTIAL_INFO_GLOBAL, locals())\n ))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('credential[%s] does not exist.' % name)\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('credential[%s] does not exist.' % name)\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for credential[%s].' % name\n )\n\n def credential_exists(self, name, folder_name=None, domain_name='_'):\n '''Check whether a credentail exists in domain of folder\n\n :param name: Name of credentail, ``str``\n :param folder_name: Folder name, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :returns: ``True`` if credentail exists, ``False`` otherwise\n '''\n try:\n return self.get_credential_info(name)['id'] == name\n except JenkinsException:\n return False\n\n def create_credential_global(self, name=None, user=None, password=None, secret=None, comment=None, domain_name='_'):\n '''Create credentail in domain of folder\n\n :param name: username\n :param password: password\n :param comment: comment, ``str``\n :param config_xml: New XML configuration, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n '''\n st = shortuuid.ShortUUID()\n st.set_alphabet(\n f\"0123456789{''.join([chr(i) for i in range(ord('a'), ord('z') + 1)])}\")\n if name is None:\n name = '-'.join(['api', st.random(length=8),\n st.random(length=4), st.random(length=12)])\n config_xml = '''<com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl>\n <scope>GLOBAL</scope>\n <id>%s</id>\n <description>[%s] Created by DevOps Platform</description>\n <username>%s</username>\n <password>%s</password>\n</com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl>''' % (name, comment, user, password)\n if user is None:\n config_xml = '''<org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl>\n <scope>GLOBAL</scope>\n <id>%s</id>\n <description>[%s] Created by DevOps Platform</description>\n <secret>%s</secret>\n</org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl>''' % (name, comment, secret)\n if self.credential_exists(name):\n raise JenkinsException('credential[%s] already exists.' % name)\n\n self.jenkins_open(requests.Request(\n 'POST', self._build_url(CREATE_CREDENTIAL_GLOBAL, locals()),\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n self.assert_credential_exists(\n name, exception_message='create credential[%s] failed.')\n return {'status': 0, 'data': name}\n\n def reconfig_credential_global(self, name, user=None, password=None, secret=None, comment=None, domain_name='_'):\n \"\"\"\n Reconfig credential with new config in domain of folder\n :param name: name, ``str``\n :param user:\n :param password:\n :param secret:\n :param comment:\n :param domain_name: Domain name, default is '_', ``str``\n :return:\n \"\"\"\n reconfig_url = self._build_url(CONFIG_CREDENTIAL_GLOBAL, locals())\n config_xml = self.get_credential_global_config(name)\n xml_dict = xmltodict.parse(config_xml)\n if user is None:\n xml_dict['org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl']['secret'] = secret\n if comment:\n xml_dict['org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl']['description'] = comment\n else:\n xml_dict['com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl']['username'] = user\n xml_dict['com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl']['password'] = password\n if comment:\n xml_dict['com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl'][\n 'description'] = comment\n config_xml = xmltodict.unparse(xml_dict, pretty=True)\n self.jenkins_open(requests.Request(\n 'POST', reconfig_url,\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n\n def create_job(self, name, config_xml):\n '''Create a new Jenkins job\n\n :param name: Name of Jenkins job, ``str``\n :param config_xml: config file text, ``str``\n '''\n folder_url, short_name = self._get_job_folder(name)\n if self.job_exists(name):\n raise JenkinsException('job[%s] already exists' % (name))\n\n try:\n self.jenkins_open(requests.Request(\n 'POST', self._build_url(CREATE_JOB, locals()),\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n except NotFoundException:\n raise JenkinsException('Cannot create job[%s] because folder '\n 'for the job does not exist' % (name))\n self.assert_job_exists(name, 'create[%s] failed')\n\n def reconfig_job(self, name, config_xml):\n '''Change configuration of existing Jenkins job.\n\n To create a new job, see :meth:`Jenkins.create_job`.\n\n :param name: Name of Jenkins job, ``str``\n :param config_xml: New XML configuration, ``str``\n '''\n folder_url, short_name = self._get_job_folder(name)\n reconfig_url = self._build_url(CONFIG_JOB, locals())\n self.jenkins_open(requests.Request(\n 'POST', reconfig_url,\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n\n def get_stage_describe(self, name, number, node_number):\n \"\"\" 获取 单个stage 详情 \"\"\"\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(STAGE_DES, locals())\n ))\n\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (name, number)\n )\n\n def get_stage_logs(self, name, number, node_number):\n \"\"\" 获取 stage 执行日志\"\"\"\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(STAGE_LOG, locals())\n ))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (name, number)\n )\n\n def get_stage_info(self, name, number, depth=0):\n\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(STAGE_INFO, locals())\n ))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (name, number)\n )\n\n def get_flow_detail(self, job_name, build_number):\n stage_data = self.get_stage_info(name=job_name, number=build_number)\n stages = stage_data.get('stages')\n for i in stages:\n logs = ''\n try:\n # 获取stage返回信息\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(\n unquote(i['_links']['self']['href']), locals())\n ))\n if response:\n res = json.loads(response)\n for j in res['stageFlowNodes']:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(\n unquote(j['_links']['log']['href']), locals())\n ))\n res = json.loads(response)\n try:\n # 移除href html信息,保留链接文字\n import re\n pat = re.compile('<a href[^>]*>')\n logs = logs + '\\n' + \\\n pat.sub('', res['text'].replace('</a>', ''))\n except:\n pass\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (job_name, build_number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (job_name, build_number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (job_name, build_number)\n )\n\n stage_data[\"stages\"][stages.index(i)]['logs'] = logs\n return stage_data\n\n def get_queue_item(self, number, depth=0):\n '''Get information about a queued item (to-be-created job).\n\n The returned dict will have a \"why\" key if the queued item is still\n waiting for an executor.\n\n The returned dict will have an \"executable\" key if the queued item is\n running on an executor, or has completed running. Use this to\n determine the job number / URL.\n\n :param name: queue number, ``int``\n :returns: dictionary of queued information, ``dict``\n '''\n url = self._build_url(Q_ITEM, locals())\n try:\n response = self.jenkins_open(requests.Request('GET', url))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('queue number[%d] does not exist'\n % number)\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('queue number[%d] does not exist' % number)\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for queue number[%d]' % number\n )\n\n def build_job(self, name, parameters=None, token=None):\n '''Trigger build job.\n\n This method returns a queue item number that you can pass to\n :meth:`Jenkins.get_queue_item`. Note that this queue number is only\n valid for about five minutes after the job completes, so you should\n get/poll the queue information as soon as possible to determine the\n job's URL.\n\n :param name: name of job\n :param parameters: parameters for job, or ``None``, ``dict``\n :param token: Jenkins API token\n :returns: ``int`` queue item\n '''\n response = self.jenkins_request(requests.Request(\n 'POST', self.build_job_url(name, parameters, token)))\n\n if 'Location' not in response.headers:\n raise EmptyResponseException(\n \"Header 'Location' not found in \"\n \"response from server[%s]\" % self.server)\n\n location = response.headers['Location']\n if location.endswith('/'):\n location = location[:-1]\n parts = location.split('/')\n number = int(parts[-1])\n return number\n\n def get_job_config(self, name):\n '''Get configuration of existing Jenkins job.\n\n :param name: Name of Jenkins job, ``str``\n :returns: job configuration (XML format)\n '''\n folder_url, short_name = self._get_job_folder(name)\n request = requests.Request(\n 'GET', self._build_url(CONFIG_JOB, locals()))\n return self.jenkins_open(request)\n\n def get_job_info(self, name, depth=0, fetch_all_builds=False):\n '''Get job information dictionary.\n\n :param name: Job name, ``str``\n :param depth: JSON depth, ``int``\n :param fetch_all_builds: If true, all builds will be retrieved\n from Jenkins. Otherwise, Jenkins will\n only return the most recent 100\n builds. This comes at the expense of\n an additional API call which may\n return significant amounts of\n data. ``bool``\n :returns: dictionary of job information\n '''\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(JOB_INFO, locals())\n ))\n if response:\n if fetch_all_builds:\n return self._add_missing_builds(json.loads(response))\n else:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] does not exist' % name)\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] does not exist' % name)\n except ValueError:\n raise JenkinsException(\n \"Could not parse JSON info for job[%s]\" % name)" }, { "identifier": "convert_xml_to_str_with_pipeline", "path": "common/custom_format.py", "snippet": "def convert_xml_to_str_with_pipeline(xml, url, secret, desc, jenkinsfile, scm=True):\n \"\"\"\n scm\n True: jenkinsfile为指定的git地址\n False: jenkinsfile为具体的pipeline\n \"\"\"\n xml_dict = xmltodict.parse(xml)\n if scm:\n xml_dict['flow-definition']['definition']['@class'] = 'org.jenkinsci.plugins.workflow.cps.CpsScmFlowDefinition'\n xml_dict['flow-definition']['definition']['scm']['userRemoteConfigs']['hudson.plugins.git.UserRemoteConfig'][\n 'url'] = url\n xml_dict['flow-definition']['definition']['scm']['userRemoteConfigs']['hudson.plugins.git.UserRemoteConfig'][\n 'credentialsId'] = secret\n xml_dict['flow-definition']['definition']['scriptPath'] = jenkinsfile\n else:\n xml_dict['flow-definition']['definition']['@class'] = 'org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition'\n xml_dict['flow-definition']['definition']['script'] = jenkinsfile\n xml_dict['flow-definition']['definition']['sandbox'] = 'true'\n xml_dict['flow-definition']['description'] = desc\n result = xmltodict.unparse(\n xml_dict, short_empty_elements=True, pretty=True)\n return result" }, { "identifier": "DASHBOARD_TIME_FORMAT", "path": "common/variables.py", "snippet": "DASHBOARD_TIME_FORMAT = {'year_only': '%Y', 'years': '%Y-%m', 'months': '%Y-%m-%d', 'days': '%Y-%m-%d %H:00:00',\n 'hours': '%Y-%m-%d %H:%M:00', 'minutes': '%Y-%m-%d %H:%M:%S'}" }, { "identifier": "DASHBOARD_TIME_FORMAT_T", "path": "common/variables.py", "snippet": "DASHBOARD_TIME_FORMAT_T = {'years': '%Y', 'months': '%Y-%m', 'days': '%Y-%m-%d', 'hours': \"%Y-%m-%d %H:00:00\",\n 'minutes': \"%Y-%m-%d %H:%M:00\", 'seconds': \"%Y-%m-%d %H:%M:%S\"}" }, { "identifier": "DASHBOARD_TIME_FREQNAMES", "path": "common/variables.py", "snippet": "DASHBOARD_TIME_FREQNAMES = {'year_only': YEARLY, 'years': MONTHLY, 'months': DAILY, 'days': HOURLY, 'hours': MINUTELY,\n 'minutes': SECONDLY}" }, { "identifier": "DASHBOARD_TIME_FREQNAMES_T", "path": "common/variables.py", "snippet": "DASHBOARD_TIME_FREQNAMES_T = {'years': YEARLY, 'months': MONTHLY, 'days': DAILY, 'hours': HOURLY, 'minutes': MINUTELY,\n 'seconds': SECONDLY}" }, { "identifier": "SENSITIVE_KEYS", "path": "common/variables.py", "snippet": "SENSITIVE_KEYS = ['password', 'token', 'access',\n 'refresh', 'AUTHORIZATION', 'COOKIE']" }, { "identifier": "JENKINS_CALLBACK_KEY", "path": "common/variables.py", "snippet": "JENKINS_CALLBACK_KEY = 'jenkins_callback_flag::'" }, { "identifier": "JENKINS_STATUS_MAP", "path": "common/variables.py", "snippet": "JENKINS_STATUS_MAP = {'IN_PROGRESS': 3, 'SUCCESS': 1, 'FAILED': 2, 'ABORTED': 4, 'FAILURE': 2, 'NOT_EXECUTED': 5,\n 'NOT_EXEC_TIMEOUT': 5}" }, { "identifier": "DEV_LANGUAGE_KEY", "path": "common/variables.py", "snippet": "DEV_LANGUAGE_KEY = 'devlanguage:'" }, { "identifier": "AppInfo", "path": "dbapp/models.py", "snippet": "" }, { "identifier": "K8sAPI", "path": "common/utils/K8sAPI.py", "snippet": "class K8sAPI(object):\n def __init__(self, host=None, username=None, password=None, api_key=None, api_key_prefix='Bearer', verify_ssl=False,\n k8s_config=None,\n config_file=None, eks=None):\n \"\"\"\n elk: aws kubernetes\n \"\"\"\n self.__host = host\n self.__username = username\n self.__password = password\n self.__api_key = api_key\n self.__api_key_prefix = api_key_prefix\n self.__verify_ssl = verify_ssl\n if k8s_config is not None:\n config.kube_config.load_kube_config_from_dict(k8s_config)\n self.__client0 = client.CoreApi()\n self.__client = client.CoreV1Api()\n elif config_file is not None:\n config.kube_config.load_kube_config(config_file=config_file)\n self.__client0 = client.CoreApi()\n self.__client = client.CoreV1Api()\n elif self.__host:\n if self.__username and self.__password:\n self.__client = self.get_api()\n else:\n raise Exception('Please input username/password or api_key')\n else:\n raise Exception('Cannot find k8s config')\n self.client = self.__client\n\n def get_token(self):\n pass\n\n def get_api(self):\n configuration = client.Configuration()\n configuration.host = self.__host\n if self.__verify_ssl is False:\n configuration.verify_ssl = False\n configuration.username = self.__username\n configuration.password = self.__password\n basic_auth_token = configuration.get_basic_auth_token()\n api = core_v1_api.CoreV1Api(api_client.ApiClient(configuration=configuration, header_name=\"authorization\",\n header_value=basic_auth_token))\n return api\n\n def get_client(self):\n return self.__client\n\n def set_client(self, obj):\n self.__client = getattr(client, obj)()\n\n def get_apis(self):\n print(\"Supported APIs (* is preferred version):\")\n self.__client2 = client.ApisApi(self.__client0.api_client)\n for api in self.__client2.get_api_versions().groups:\n versions = []\n for v in api.versions:\n name = \"\"\n if v.version == api.preferred_version.version and len(\n api.versions) > 1:\n name += \"*\"\n name += v.version\n versions.append(name)\n\n def get_nodes(self, **kwargs):\n ret = self.__client.list_node(**kwargs)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'err': str(e)}\n\n def get_node_info(self, name):\n ret = self.__client.read_node_status(name)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'err': str(e)}\n\n def get_namespaces(self, **kwargs):\n ret = self.__client.list_namespace(**kwargs)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'err': str(e)}\n\n def create_namespace(self, name):\n payload = {\n \"apiVersion\": \"v1\",\n \"kind\": \"Namespace\",\n \"metadata\": {\n \"name\": name,\n }\n }\n ret = self.__client.create_namespace(body=payload)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n print(rs)\n return rs\n except BaseException as e:\n return {'err': str(e)}\n\n def get_services(self, namespace='default', **kwargs):\n ret = self.__client.list_namespaced_service(namespace, **kwargs)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'err': str(e)}\n\n def fetch_service(self, name, namespace='default', api_version='apps/v1'):\n try:\n ret = self.__client.read_namespaced_service(name, namespace)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except BaseException as e:\n print('reason', e.reason)\n return {'ecode': e.status, 'message': e.body}\n except BaseException as e:\n print('reason', e.reason)\n return {'ecode': e.status, 'message': e.body}\n\n def create_namespace_service(self, name, app=None, targets=list, namespace='default', service_type='NodePort',\n svc_yaml=None):\n \"\"\"\n 目前只支持NodePort类型,对外服务端口随机生成(如手动生成,需配置node_port和endpoints)\n :param name: service name\n :param app: app name\n :param targets: [{port, target_port, protocol, node_port}]\n :param namespace:\n :param service_type:\n :return:\n \"\"\"\n ports = []\n if svc_yaml:\n if isinstance(svc_yaml, str):\n body = yaml.safe_load(svc_yaml)\n else:\n body = svc_yaml\n else:\n for index, target in enumerate(targets):\n port_body = {'name': f\"{name}-{index}\", 'port': target['port'], 'target_port': target['port'],\n 'protocol': target['protocol']}\n if target['node_port'] > 30000:\n port_body['node_port'] = target['node_port']\n ports.append(client.V1ServicePort(**port_body))\n body = client.V1Service(\n api_version=\"v1\",\n kind=\"Service\",\n metadata=client.V1ObjectMeta(\n name=name\n ),\n spec=client.V1ServiceSpec(\n selector={\"app\": app},\n type=service_type,\n ports=ports\n )\n )\n try:\n ret = self.__client.create_namespaced_service(namespace=namespace, body=body,\n **{'_return_http_data_only': False})\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except BaseException as e:\n logger.error('reason', e)\n return {'error': True, 'message': str(e)}\n except ApiException as e:\n if e.status == 409:\n logger.error('reason', e.reason)\n return {'error': True, 'ecode': e.status, 'message': e.body}\n except BaseException as e:\n return {'error': True, 'message': str(e)}\n\n def update_namespace_service(self, name, app=None, targets=Type[list], namespace='default', service_type='NodePort',\n svc_yaml=None):\n ports = []\n if svc_yaml:\n if isinstance(svc_yaml, str):\n body = yaml.safe_load(svc_yaml)\n else:\n body = svc_yaml\n logger.debug(f'svc_yaml body == {body}')\n func = self.__client.replace_namespaced_service\n else:\n for index, target in enumerate(targets):\n port_body = {'name': target['name'], 'port': target['port'], 'target_port': target['port'],\n 'protocol': target['protocol']}\n if target['node_port'] > 30000:\n port_body['node_port'] = target['node_port']\n ports.append(client.V1ServicePort(**port_body))\n body = client.V1Service(\n api_version=\"v1\",\n kind=\"Service\",\n metadata=client.V1ObjectMeta(\n name=name\n ),\n spec=client.V1ServiceSpec(\n selector={\"app\": name},\n type=service_type,\n ports=ports\n )\n )\n func = self.__client.patch_namespaced_service\n try:\n ret = func(\n name, namespace, body=body,\n **{'_return_http_data_only': False}\n )\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except BaseException as e:\n logger.error(f'ApiClient sanitize_for_serialization 异常: {e}', )\n return {'error': True, 'message': str(e)}\n except ApiException as e:\n if e.status == 409:\n logger.error(f'ApiException 异常 409 资源冲突: {e} {e.reason}', )\n return {'error': True, 'ecode': e.status, 'message': e.body}\n except BaseException as e:\n logger.error(f'patch_namespaced_service 异常: {e}', )\n return {'error': True, 'message': str(e)}\n\n def delete_namespace_service(self, name, namespace='default', api_version='apps/v1'):\n try:\n ret = self.__client.delete_namespaced_service(name, namespace)\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except BaseException as e:\n return {'error': True, 'message': str(e)}\n\n def get_configmaps(self, namespace='default', **kwargs):\n ret = self.__client.list_namespaced_config_map(namespace, **kwargs)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'error': True, 'message': str(e)}\n\n def get_configmap(self, name, namespace='default', **kwargs):\n \"\"\"\n get configmap content\n \"\"\"\n try:\n ret = self.__client.read_namespaced_config_map(\n name, namespace, **kwargs)\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'error': True, 'message': str(e)}\n\n def create_namespace_configmap(self, svc_yaml, namespace='default', **kwargs):\n if isinstance(svc_yaml, str):\n body = yaml.safe_load(svc_yaml)\n else:\n body = svc_yaml\n try:\n ret = self.__client.create_namespaced_config_map(\n namespace, body, **kwargs)\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except BaseException as e:\n return {'error': True, 'message': str(e)}\n\n def update_namespace_configmap(self, name, svc_yaml, namespace='default', **kwargs):\n if isinstance(svc_yaml, str):\n body = yaml.safe_load(svc_yaml)\n else:\n body = svc_yaml\n try:\n ret = self.__client.patch_namespaced_config_map(\n name, namespace, body, **kwargs)\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except BaseException as e:\n return {'error': True, 'message': str(e)}\n\n def delete_namespace_configmap(self, name, namespace='default', api_version='apps/v1'):\n try:\n ret = self.__client.delete_namespaced_config_map(name, namespace)\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except BaseException as e:\n return {'error': True, 'message': str(e)}\n\n def get_namespace_deployment(self, namespace='default', api_version='apps/v1', **kwargs):\n self.__client2 = operator.methodcaller(''.join([i.capitalize() for i in api_version.split('/')]) + 'Api',\n self.__client.api_client)(client)\n ret = self.__client2.list_namespaced_deployment(namespace, **kwargs)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'err': str(e)}\n\n def create_namespace_deployment(self, name, image=None, port=list, replicas=1, deploy_yaml=None,\n pod_type='Deployment', namespace='default'):\n \"\"\"\n\n :param name:\n :param image:\n :param port: [{containerPort: 8080, protocol: 'TCP'}]\n :param replicas:\n :param pod_type:\n :param namespace:\n :return:\n \"\"\"\n payload = {'kind': pod_type, 'spec': {'replicas': replicas, 'template': {\n 'spec': {'containers': [{'image': image, 'name': name, 'ports': port}]},\n 'metadata': {'labels': {'app': name}}},\n 'selector': {'matchLabels': {'app': name}}},\n 'apiVersion': 'apps/v1beta2',\n 'metadata': {'labels': {'app': name}, 'namespace': namespace,\n 'name': name}}\n if deploy_yaml is not None:\n payload = yaml.safe_load(deploy_yaml)\n payload['metadata'].pop('resourceVersion', None)\n self.__client2 = operator.methodcaller(\n ''.join([i.capitalize() for i in payload.get(\n 'apiVersion', 'apps/v1beta2').split('/')]) + 'Api',\n self.__client.api_client)(client)\n try:\n ret = self.__client2.create_namespaced_deployment(\n namespace=namespace, body=payload)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except BaseException as e:\n return {'ecode': e.status, 'message': e.body}\n except ApiException as e:\n return {'ecode': e.status, 'message': e.body}\n\n def delete_namespace_deployment(self, name, namespace='default', api_version='apps/v1'):\n self.__client2 = operator.methodcaller(''.join([i.capitalize() for i in api_version.split('/')]) + 'Api',\n self.__client.api_client)(client)\n ret = self.__client2.delete_namespaced_deployment(name, namespace,\n body=client.V1DeleteOptions(grace_period_seconds=0,\n propagation_policy='Foreground'))\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'err': str(e)}\n\n def update_deployment(self, name, replicas=None, image=None, envs=None, deploy_yaml=None, namespace='default',\n api_version='apps/v1', force=False):\n \"\"\"\n force: 强制更新\n \"\"\"\n self.__client2 = operator.methodcaller(''.join([i.capitalize() for i in api_version.split('/')]) + 'Api',\n self.__client.api_client)(client)\n payload = {'spec': {'replicas': replicas, 'template': {}}}\n if replicas is None and image is None and deploy_yaml is None:\n return {'err': '缺少参数'}\n if replicas is not None:\n payload['spec']['replicas'] = replicas\n if image is not None:\n payload['spec']['template'] = {\n 'spec': {'containers': [{'image': image, 'name': name}]}}\n\n if envs is not None:\n payload['spec']['template'] = {\n 'spec': {'containers': [{'env': envs}]}}\n\n if deploy_yaml is not None:\n payload = yaml.safe_load(deploy_yaml)\n payload['metadata'].pop('resourceVersion', None)\n try:\n if force:\n ret = self.__client2.replace_namespaced_deployment(\n name, namespace, body=payload)\n else:\n ret = self.__client2.patch_namespaced_deployment(\n name, namespace, body=payload)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except BaseException as e:\n return {'ecode': e.status, 'message': e.body}\n except ApiException as e:\n return {'ecode': e.status, 'message': e.body}\n\n def update_deployment_replica(self, name, replicas, namespace='default', api_version='apps/v1'):\n self.__client2 = operator.methodcaller(''.join([i.capitalize() for i in api_version.split('/')]) + 'Api',\n self.__client.api_client)(client)\n payload = {'spec': {'replicas': replicas}}\n ret = self.__client2.patch_namespaced_deployment_scale(\n name, namespace, body=payload)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'err': str(e)}\n\n def update_deployment_image(self, name, image, namespace='default', api_version='apps/v1'):\n deploy = self.fetch_deployment(name, namespace)\n if deploy.get('ecode', 200) > 399:\n return deploy\n payload = {'spec': deploy['message']['spec']}\n payload['spec']['template']['spec']['containers'][0]['image'] = image\n self.__client2 = operator.methodcaller(''.join([i.capitalize() for i in api_version.split('/')]) + 'Api',\n self.__client.api_client)(client)\n try:\n ret = self.__client2.patch_namespaced_deployment(name, namespace, body=payload,\n **{'_return_http_data_only': False})\n except ApiException as e:\n return {'ecode': e.status, 'message': e.body}\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except BaseException as e:\n return {'ecode': e.status, 'message': e.body}\n\n def update_deployment_resource(self, name, envs, image_policy, namespace='default', api_version='apps/v1',\n **kwargs):\n payload = {'spec': {'template': {'spec': {'containers': [\n {'name': name, 'env': envs, 'imagePullPolicy': image_policy, 'resources': kwargs['resources']}]}}}}\n self.__client2 = operator.methodcaller(''.join([i.capitalize() for i in api_version.split('/')]) + 'Api',\n self.__client.api_client)(client)\n ret = self.__client2.patch_namespaced_deployment(\n name, namespace, body=payload)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'err': str(e)}\n\n def restart_deployment(self, name, namespace='default', api_version='apps/v1'):\n self.__client2 = operator.methodcaller(''.join([i.capitalize() for i in api_version.split('/')]) + 'Api',\n self.__client.api_client)(client)\n payload = {\n 'spec': {\n 'template': {\n 'spec': {\n 'containers': [\n {\n 'name': name,\n 'env': [\n {\n 'name': 'RESTART_',\n 'value': datetime.now().strftime('%Y%m%d%H%M%S')\n }\n ]\n }\n ]\n }\n }\n }\n }\n\n ret = self.__client2.patch_namespaced_deployment(\n name, namespace, body=payload)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'err': str(e)}\n\n def fetch_deployment(self, name, namespace='default', api_version='apps/v1'):\n self.__client2 = operator.methodcaller(''.join([i.capitalize() for i in api_version.split('/')]) + 'Api',\n self.__client.api_client)(client)\n try:\n ret = self.__client2.read_namespaced_deployment(name, namespace)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except ApiException as e:\n return {'ecode': e.status, 'message': e.body}\n except ApiException as e:\n return {'ecode': e.status, 'message': e.body}\n\n def get_replica(self, namespace='default', api_version='apps/v1', **kwargs):\n self.__client2 = operator.methodcaller(''.join([i.capitalize() for i in api_version.split('/')]) + 'Api',\n self.__client.api_client)(client)\n try:\n ret = self.__client2.list_namespaced_replica_set(\n namespace=namespace, **kwargs)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except ApiException as e:\n return {'ecode': e.status, 'message': e.body}\n except ApiException as e:\n return {'ecode': e.status, 'message': e.body}\n\n def get_pods(self, namespace=None, **kwargs):\n if namespace is None:\n return {}\n try:\n ret = self.__client.list_namespaced_pod(namespace, **kwargs)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except BaseException as e:\n return {'ecode': e.status, 'message': e.body}\n except ApiException as e:\n return {'ecode': e.status, 'message': e.body}\n\n def fetch_pod(self, name, namespace='default'):\n try:\n ret = self.__client.read_namespaced_pod(\n name=name, namespace=namespace)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except BaseException as e:\n return {'ecode': e.status, 'message': e.body}\n except BaseException as e:\n return {'ecode': e.status, 'message': e.body}\n\n def get_secrets(self, namespace='default', **kwargs):\n ret = self.__client.list_namespaced_secret(namespace, **kwargs)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'error': True, 'message': str(e)}\n\n def get_secret(self, name, namespace='default', **kwargs):\n \"\"\"\n get secret content\n \"\"\"\n ret = self.__client.read_namespaced_secret(name, namespace, **kwargs)\n try:\n ret = self.__client.read_namespaced_secret(\n name, namespace, **kwargs)\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'error': True, 'message': str(e)}\n\n def manage_secret(self, name, namespace='default', api_version='v1', **kwargs):\n payload = kwargs.pop('payload', {})\n body = kubernetes.client.V1Secret(api_version=api_version, **payload)\n ret = {}\n try:\n ret = self.__client.replace_namespaced_secret(\n name, namespace, body, **kwargs)\n except ApiException as e:\n if e.status == 404:\n ret = self.__client.create_namespaced_secret(namespace, body)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'error': True, 'message': str(e)}" } ]
from gitlab.exceptions import GitlabGetError from functools import reduce from common.utils.ElasticSearchAPI import generate_docu, Search from common.utils.GitLabAPI import GitLabAPI from common.utils.HarborAPI import HarborAPI from common.utils.JenkinsAPI import GlueJenkins from common.custom_format import convert_xml_to_str_with_pipeline from common.variables import DASHBOARD_TIME_FORMAT, DASHBOARD_TIME_FORMAT_T, DASHBOARD_TIME_FREQNAMES, \ DASHBOARD_TIME_FREQNAMES_T, SENSITIVE_KEYS, JENKINS_CALLBACK_KEY, \ JENKINS_STATUS_MAP, DEV_LANGUAGE_KEY from dbapp.models import AppInfo, Product, KubernetesCluster, KubernetesDeploy, MicroApp, Project, ProjectConfig, DevLanguage, BuildJob, UserProfile, SystemConfig, Role, Permission, Menu, DataDict from django.conf import settings from django.core.cache import cache from django.utils import timezone from django.db.models import Q from social_django.utils import load_strategy from rest_framework.utils.serializer_helpers import ReturnDict from config import SOCIAL_AUTH_GITLAB_API_URL, GITLAB_ADMIN_TOKEN from common.utils.K8sAPI import K8sAPI from urllib.parse import urlparse, quote_plus from dateutil.relativedelta import relativedelta from dateutil.rrule import rrule from ruamel import yaml from datetime import datetime, timedelta from celery import current_app import copy import operator import re import time import pytz import os import json import requests import math import shortuuid import logging
15,168
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Author : Charles Lai @Contact : [email protected] @Time : 2020/12/21 上午10:00 @FileName: ext_fun.py @Blog :https://imaojia.com """ logger = logging.getLogger('drf') class ThirdPartyUser(object): def get_user(self): user = UserProfile.objects.get_or_create(username='thirdparty')[0] self.set_permission(user, self.get_role()) return user def get_role(self): return Role.objects.get_or_create(name='thirdparty')[0] def get_perm(self): return Permission.objects.get_or_create(name='Jenkins回调', method='jenkins_callback')[0] def set_permission(self, user, role): role.permissions.set([self.get_perm().id]) user.roles.set([role.id]) def set_redis_data(name, config): cache.set(f"system:{name}", config, None) def get_redis_data(name): ret = cache.get(f"system:{name}") if not ret: try: if name == 'cicd-harbor': qs = SystemConfig.objects.filter(type=name)[0] else: qs = SystemConfig.objects.get(name=name) except BaseException as e: return None ret = json.loads(qs.config) set_redis_data(name, ret) return ret def get_datadict(name, config=0, default_value=None): """ 从数据字典获取数据 """ try: qs = DataDict.objects.get(key=name) except BaseException as e: return default_value if config: ret = json.loads(qs.extra) else: ret = {'id': qs.id, 'key': qs.key, 'value': qs.value, 'desc': qs.desc} return ret def check_pods(cluster_id, k8s_config, namespace, **kwargs):
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Author : Charles Lai @Contact : [email protected] @Time : 2020/12/21 上午10:00 @FileName: ext_fun.py @Blog :https://imaojia.com """ logger = logging.getLogger('drf') class ThirdPartyUser(object): def get_user(self): user = UserProfile.objects.get_or_create(username='thirdparty')[0] self.set_permission(user, self.get_role()) return user def get_role(self): return Role.objects.get_or_create(name='thirdparty')[0] def get_perm(self): return Permission.objects.get_or_create(name='Jenkins回调', method='jenkins_callback')[0] def set_permission(self, user, role): role.permissions.set([self.get_perm().id]) user.roles.set([role.id]) def set_redis_data(name, config): cache.set(f"system:{name}", config, None) def get_redis_data(name): ret = cache.get(f"system:{name}") if not ret: try: if name == 'cicd-harbor': qs = SystemConfig.objects.filter(type=name)[0] else: qs = SystemConfig.objects.get(name=name) except BaseException as e: return None ret = json.loads(qs.config) set_redis_data(name, ret) return ret def get_datadict(name, config=0, default_value=None): """ 从数据字典获取数据 """ try: qs = DataDict.objects.get(key=name) except BaseException as e: return default_value if config: ret = json.loads(qs.extra) else: ret = {'id': qs.id, 'key': qs.key, 'value': qs.value, 'desc': qs.desc} return ret def check_pods(cluster_id, k8s_config, namespace, **kwargs):
k8s = KubernetesCluster.objects.get(id=cluster_id)
14
2023-12-13 03:09:32+00:00
24k
MarilynKeller/aitviewer-skel
aitviewer/renderables/sdf.py
[ { "identifier": "BoundingBoxes", "path": "aitviewer/renderables/bounding_boxes.py", "snippet": "class BoundingBoxes(Node):\n \"\"\"\n Draw bounding boxes.\n \"\"\"\n\n def __init__(self, vertices, thickness=0.005, color=(0.0, 0.0, 1.0, 1.0), **kwargs):\n \"\"\"\n Initializer.\n :param vertices: Set of 3D coordinates as a np array of shape (N, 8, 3). The vertices will be connected in the\n following way: 0-1-2-3-0 (bottom) 4-5-6-7-4 (top) 0-4 1-5 2-6 3-7 (vertical connections between bottom\n and top).\n :param thickness: Line thickness.\n :param color: Color of the lines.\n \"\"\"\n if not isinstance(vertices, np.ndarray):\n vertices = np.array(vertices)\n if len(vertices.shape) == 2:\n vertices = vertices[np.newaxis]\n else:\n assert len(vertices.shape) == 3\n assert vertices.shape[1] == 8\n super(BoundingBoxes, self).__init__(n_frames=len(vertices), color=color, **kwargs)\n\n self.vertices = vertices\n\n self.lines = Lines(\n lines=self._get_line_coords(),\n mode=\"lines\",\n r_base=thickness,\n color=self.color,\n cast_shadow=False,\n )\n self.spheres = Spheres(positions=self.vertices, radius=thickness, color=self.color, cast_shadow=False)\n self._add_nodes(self.lines, self.spheres, show_in_hierarchy=False)\n\n @property\n def bounds(self):\n return self.get_bounds(self.vertices)\n\n @property\n def current_bounds(self):\n return self.get_bounds(self.vertices[self.current_frame_id])\n\n @staticmethod\n def from_min_max_diagonal(v_min, v_max, **kwargs):\n \"\"\"\n Create an axis-aligned bounding box from the 3D diagonal.\n :param v_min: np array of shape (N, 3).\n :param v_max: np array of shape (N, 3).\n :return: BoundingBoxes corresponding to the given diagonals.\n \"\"\"\n vertices = np.zeros((v_min.shape[0], 8, 3), dtype=v_min.dtype)\n vertices[:, 0:4] = v_min[:, np.newaxis]\n vertices[:, 1, 0] = v_max[:, 0]\n vertices[:, 2, 0:2] = v_max[:, 0:2]\n vertices[:, 3, 1] = v_max[:, 1]\n\n vertices[:, 4:] = v_max[:, np.newaxis]\n vertices[:, 4, 0:2] = v_min[:, 0:2]\n vertices[:, 7, 0] = v_min[:, 0]\n vertices[:, 5, 1] = v_min[:, 1]\n\n return BoundingBoxes(vertices, **kwargs)\n\n def _get_line_coords(self):\n lines = np.zeros((self.n_frames, 12 * 2, 3), dtype=self.vertices.dtype)\n\n # Bottom 0-1-2-3-0.\n lines[:, 0:2] = self.vertices[:, 0:2]\n lines[:, 2:4] = self.vertices[:, 1:3]\n lines[:, 4:6] = self.vertices[:, 2:4]\n lines[:, 6:8] = self.vertices[:, [3, 0]]\n\n # Top 4-5-6-7-4.\n lines[:, 8:10] = self.vertices[:, 4:6]\n lines[:, 10:12] = self.vertices[:, 5:7]\n lines[:, 12:14] = self.vertices[:, 6:8]\n lines[:, 14:16] = self.vertices[:, [7, 4]]\n\n # Vertical Connections.\n lines[:, 16:18] = self.vertices[:, [0, 4]]\n lines[:, 18:20] = self.vertices[:, [1, 5]]\n lines[:, 20:22] = self.vertices[:, [2, 6]]\n lines[:, 22:24] = self.vertices[:, [3, 7]]\n\n return lines\n\n @Node.color.setter\n def color(self, color):\n self.material.color = color\n self.lines.color = color\n self.spheres.color = color" }, { "identifier": "Lines", "path": "aitviewer/renderables/lines.py", "snippet": "class Lines(Node):\n \"\"\"Render lines as cylinders or cones. Can render approx. 600k lines at 40 fps.\"\"\"\n\n def __init__(\n self,\n lines,\n r_base=0.01,\n r_tip=None,\n color=(0.0, 0.0, 1.0, 1.0),\n mode=\"line_strip\",\n cast_shadow=True,\n **kwargs,\n ):\n \"\"\"\n Initializer.\n :param lines: Set of 3D coordinates as a np array of shape (F, L, 3) or (L, 3).\n :param r_base: Thickness of the line.\n :param r_tip: If set, the thickness of the line will taper from r_base to r_tip. If set to 0.0 it will create\n a proper cone.\n :param color: Color of the line (4-tuple) or array of color (N_LINES, 4), one for each line.\n :param mode: 'lines' or 'line_strip'.\n 'lines': a line is drawn from point 0 to 1, from 2 to 3, and so on, number of lines is L / 2.\n 'line_strip': a line is drawn between all adjacent points, 0 to 1, 1 to 2 and so on, number of lines is L - 1.\n :param cast_shadow: If True the mesh casts a shadow on other objects.\n \"\"\"\n if len(lines.shape) == 2:\n lines = lines[np.newaxis]\n assert len(lines.shape) == 3\n assert mode == \"lines\" or mode == \"line_strip\"\n if mode == \"lines\":\n assert lines.shape[1] % 2 == 0\n\n self._lines = lines\n self.mode = mode\n self.r_base = r_base\n self.r_tip = r_tip if r_tip is not None else r_base\n\n self.vertices, self.faces = self.get_mesh()\n self.n_lines = self.lines.shape[1] // 2 if mode == \"lines\" else self.lines.shape[1] - 1\n\n # Define a default material in case there is None.\n if isinstance(color, tuple) or len(color.shape) == 1:\n kwargs[\"material\"] = kwargs.get(\"material\", Material(color=color, ambient=0.2))\n self.line_colors = kwargs[\"material\"].color\n else:\n assert (\n color.shape[1] == 4 and color.shape[0] == self.n_lines\n ), \"Color must be a tuple of 4 values or a numpy array of shape (N_LINES, 4)\"\n self.line_colors = color\n\n super(Lines, self).__init__(n_frames=self.lines.shape[0], **kwargs)\n\n self._need_upload = True\n self.draw_edges = False\n\n # Render passes.\n self.outline = True\n self.fragmap = True\n self.depth_prepass = True\n self.cast_shadow = cast_shadow\n\n @property\n def bounds(self):\n bounds = self.get_bounds(self.lines)\n r = max(self.r_base, self.r_tip)\n bounds[:, 0] -= r\n bounds[:, 1] += r\n return bounds\n\n @property\n def current_bounds(self):\n bounds = self.get_bounds(self.current_lines)\n r = max(self.r_base, self.r_tip)\n bounds[:, 0] -= r\n bounds[:, 1] += r\n return bounds\n\n @property\n def lines(self):\n return self._lines\n\n @lines.setter\n def lines(self, value):\n self._lines = value if len(value.shape) == 3 else value[np.newaxis]\n self.n_frames = self.lines.shape[0]\n self.redraw()\n\n @property\n def current_lines(self):\n idx = self.current_frame_id if self._lines.shape[0] > 1 else 0\n return self._lines[idx]\n\n @current_lines.setter\n def current_lines(self, lines):\n assert len(lines.shape) == 2\n idx = self.current_frame_id if self._lines.shape[0] > 1 else 0\n self._lines[idx] = lines\n self.redraw()\n\n @Node.color.setter\n def color(self, color):\n self.material.color = color\n self.line_colors = color\n self.redraw()\n\n @property\n def line_colors(self):\n if len(self._line_colors.shape) == 1:\n t = np.tile(np.array(self._line_colors), (self.n_lines, 1))\n return t\n else:\n return self._line_colors\n\n @line_colors.setter\n def line_colors(self, color):\n if isinstance(color, tuple):\n color = np.array(color)\n self._line_colors = color\n self.redraw()\n\n def on_frame_update(self):\n self.redraw()\n\n def redraw(self, **kwargs):\n self._need_upload = True\n\n @Node.once\n def make_renderable(self, ctx: moderngl.Context):\n self.prog = get_lines_instanced_program()\n\n vs_path = \"lines_instanced_positions.vs.glsl\"\n self.outline_program = get_outline_program(vs_path)\n self.depth_only_program = get_depth_only_program(vs_path)\n self.fragmap_program = get_fragmap_program(vs_path)\n\n self.vbo_vertices = ctx.buffer(self.vertices.astype(\"f4\").tobytes())\n self.vbo_indices = ctx.buffer(self.faces.astype(\"i4\").tobytes())\n self.vbo_instance_base = ctx.buffer(reserve=self.n_lines * 12)\n self.vbo_instance_tip = ctx.buffer(reserve=self.n_lines * 12)\n self.vbo_instance_color = ctx.buffer(reserve=self.n_lines * 16)\n\n self.vao = VAO()\n self.vao.buffer(self.vbo_vertices, \"3f4\", \"in_position\")\n self.vao.buffer(self.vbo_instance_base, \"3f4/i\", \"instance_base\")\n self.vao.buffer(self.vbo_instance_tip, \"3f4/i\", \"instance_tip\")\n self.vao.buffer(self.vbo_instance_color, \"4f4/i\", \"instance_color\")\n self.vao.index_buffer(self.vbo_indices)\n\n def _upload_buffers(self):\n if not self.is_renderable or not self._need_upload:\n return\n self._need_upload = False\n\n lines = self.current_lines\n if self.mode == \"lines\":\n v0s = lines[::2]\n v1s = lines[1::2]\n else:\n v0s = lines[:-1]\n v1s = lines[1:]\n\n self.vbo_instance_base.write(v0s.astype(\"f4\").tobytes())\n self.vbo_instance_tip.write(v1s.astype(\"f4\").tobytes())\n\n if len(self._line_colors.shape) > 1:\n self.vbo_instance_color.write(self._line_colors.astype(\"f4\").tobytes())\n\n def render(self, camera, **kwargs):\n self._upload_buffers()\n\n prog = self.prog\n prog[\"r_base\"] = self.r_base\n prog[\"r_tip\"] = self.r_tip\n if len(self._line_colors.shape) == 1:\n prog[\"use_uniform_color\"] = True\n prog[\"uniform_color\"] = tuple(self.color)\n else:\n prog[\"use_uniform_color\"] = False\n prog[\"draw_edges\"].value = 1.0 if self.draw_edges else 0.0\n prog[\"win_size\"].value = kwargs[\"window_size\"]\n prog[\"clip_control\"].value = (0, 0, 0)\n\n self.set_camera_matrices(prog, camera, **kwargs)\n set_lights_in_program(\n prog,\n kwargs[\"lights\"],\n kwargs[\"shadows_enabled\"],\n kwargs[\"ambient_strength\"],\n )\n set_material_properties(prog, self.material)\n self.receive_shadow(prog, **kwargs)\n self.vao.render(prog, moderngl.TRIANGLES, instances=self.n_lines)\n\n def render_positions(self, prog):\n if self.is_renderable:\n self._upload_buffers()\n prog[\"r_base\"] = self.r_base\n prog[\"r_tip\"] = self.r_tip\n self.vao.render(prog, moderngl.TRIANGLES, instances=self.n_lines)\n\n def get_mesh(self):\n v0s = np.array([[0, 0, 0]], np.float32)\n v1s = np.array([[0, 0, 1]], np.float32)\n\n # If r_tip is below a certain threshold, we create a proper cone, i.e. with just a single vertex at the top.\n if self.r_tip < 1e-5:\n data = _create_cone_from_to(v0s, v1s, radius=1.0)\n else:\n data = _create_cylinder_from_to(v0s, v1s, radius1=1.0, radius2=1.0)\n\n return data[\"vertices\"][0], data[\"faces\"]\n\n @hooked\n def release(self):\n if self.is_renderable:\n self.vao.release()\n\n def update_frames(self, lines, frames):\n self.lines[frames] = lines\n self.redraw()\n\n def add_frames(self, lines):\n if len(lines.shape) == 2:\n lines = lines[np.newaxis]\n self.lines = np.append(self.lines, lines, axis=0)\n\n def remove_frames(self, frames):\n self.lines = np.delete(self.lines, frames, axis=0)\n self.redraw()\n\n def export_usd(self, stage, usd_path: str, directory: str = None, verbose=False):\n name = f\"{self.name}_{self.uid:03}\".replace(\" \", \"_\")\n usd_path = f\"{usd_path}/{name}\"\n\n if self.mode == \"lines\":\n v0s = self.lines[:, ::2]\n v1s = self.lines[:, 1::2]\n else:\n v0s = self.lines[:, :-1]\n v1s = self.lines[:, 1:]\n\n print(self.lines.shape)\n print(v0s.shape)\n\n # Data is in the form of (F, N_LINES, 3), convert it to (F*N_LINES, 3)\n v0s = np.reshape(v0s, (-1, 3))\n v1s = np.reshape(v1s, (-1, 3))\n\n self.r_tip = self.r_base if self.r_tip is None else self.r_tip\n\n # If r_tip is below a certain threshold, we create a proper cone, i.e. with just a single vertex at the top.\n if self.r_tip < 10e-6:\n data = _create_cone_from_to(v0s, v1s, radius=self.r_base)\n else:\n data = _create_cylinder_from_to(v0s, v1s, radius1=self.r_base, radius2=self.r_tip)\n\n L = self.n_lines\n V = data[\"vertices\"].shape[1]\n\n vertices = data[\"vertices\"].reshape((self.n_frames, -1, 3))\n faces = data[\"faces\"]\n\n fs = faces[np.newaxis].repeat(L, 0).reshape((L, -1))\n offsets = (np.arange(L) * V).reshape((L, 1))\n faces = (fs + offsets).reshape((-1, 3))\n\n mesh = usd.add_mesh(stage, usd_path, self.name, vertices, faces, self.get_local_transform())\n usd.add_color(stage, mesh, usd_path, self.color[:3])\n\n self._export_usd_recursively(stage, usd_path, directory, verbose)" }, { "identifier": "Meshes", "path": "aitviewer/renderables/meshes.py", "snippet": "class Meshes(Node):\n \"\"\"A sequence of triangle meshes. This assumes that the mesh topology is fixed over the sequence.\"\"\"\n\n def __init__(\n self,\n vertices,\n faces,\n vertex_normals=None,\n face_normals=None,\n vertex_colors=None,\n face_colors=None,\n uv_coords=None,\n path_to_texture=None,\n cast_shadow=True,\n pickable=True,\n flat_shading=False,\n draw_edges=False,\n draw_outline=False,\n instance_transforms=None,\n icon=\"\\u008d\",\n **kwargs,\n ):\n \"\"\"\n Initializer.\n :param vertices: A np array of shape (N, V, 3) or (V, 3).\n :param faces: A np array of shape (F, 3).\n :param vertex_normals: A np array of shape (N, V, 3). If not provided, the vertex normals will be computed,\n which incurs some overhead.\n :param face_normals: A np array of shape (N, F, 3). If not provided, the face normals will be computed, which\n incurs some overhead.\n :param vertex_colors: A np array of shape (N, V, 4) overriding the uniform color.\n :param face_colors: A np array of shape (N, F, 4) overriding the uniform or vertex colors.\n :param uv_coords: A np array of shape (V, 2) if the mesh is to be textured.\n :param path_to_texture: Path to an image file that serves as the texture.\n :param cast_shadow: If True the mesh casts a shadow on other objects.\n :param pickable: If True the mesh can be selected with a mouse click.\n :param flat_shading: If True the each face of the mesh is shaded with a constant normal.\n :param draw_edges: If True the normals the edges of the mesh is drawn on top of the mesh.\n :param draw_outline: If true an outline is drawn around the mesh.\n :instance_transforms: np array of size (N, I, 4, 4) or (I, 4, 4) or None. If not None, 'I' instances of\n the same mesh will be rendered, each with its own transformation matrix.\n \"\"\"\n if len(vertices.shape) == 2 and vertices.shape[-1] == 3:\n vertices = vertices[np.newaxis]\n assert len(vertices.shape) == 3\n assert len(faces.shape) == 2\n n_frames = vertices.shape[0]\n\n # Instancing.\n if instance_transforms is not None:\n # Check shape of transforms.\n if len(instance_transforms.shape) == 3:\n instance_transforms = instance_transforms[np.newaxis]\n assert len(instance_transforms.shape) == 4\n\n # Number of instance frames must match number of frames or be 1.\n assert n_frames == 1 or instance_transforms.shape[0] == 1 or n_frames == instance_transforms.shape[0]\n n_frames = max(n_frames, instance_transforms.shape[0])\n\n self._instance_transforms = instance_transforms\n else:\n self._instance_transforms = None\n\n super(Meshes, self).__init__(n_frames=n_frames, icon=icon, **kwargs)\n\n self._vertices = vertices\n self._faces = faces.astype(np.int32)\n\n # Create these first because other setters can call redraw() which uses this fields.\n self._face_colors = None\n self._vertex_colors = None\n self._has_transparent_vertex_or_face_colors = False\n\n def _maybe_unsqueeze(x):\n return x[np.newaxis] if x is not None and x.ndim == 2 else x\n\n self._vertex_normals = _maybe_unsqueeze(vertex_normals)\n self._face_normals = _maybe_unsqueeze(face_normals)\n self.vertex_colors = _maybe_unsqueeze(vertex_colors)\n self.face_colors = _maybe_unsqueeze(face_colors)\n\n # Texture handling.\n self.has_texture = (uv_coords is not None) and (path_to_texture is not None)\n self.uv_coords = uv_coords\n self.texture_path = path_to_texture\n\n if self.has_texture:\n self.use_pickle_texture = path_to_texture.endswith((\".pickle\", \"pkl\"))\n if self.use_pickle_texture:\n self.texture_image = pickle.load(open(path_to_texture, \"rb\"))\n else:\n self.texture_image = Image.open(path_to_texture).transpose(method=Image.FLIP_TOP_BOTTOM).convert(\"RGB\")\n else:\n self.texture_image = None\n\n # Enable rendering passes\n self.cast_shadow = cast_shadow\n self.fragmap = pickable\n self.depth_prepass = True\n self.outline = True\n\n # Misc.\n self._flat_shading = flat_shading\n self.draw_edges = draw_edges\n self.draw_outline = draw_outline\n self.show_texture = self.has_texture\n self.norm_coloring = False\n self.normals_r = None\n self.need_upload = True\n self._use_uniform_color = self._vertex_colors is None and self._face_colors is None\n self._vertex_faces_sparse = trimesh.geometry.index_sparse(self._vertices.shape[1], self._faces)\n\n self.clip_control = np.array((0, 0, 0), np.int32)\n self.clip_value = np.array((0, 0, 0), np.float32)\n\n @classmethod\n def instanced(cls, *args, positions=None, rotations=None, scales=None, **kwargs):\n \"\"\"\n Creates and returns an instanced sequence of N frames and I instances.\n Each instance will have its own position, rotation and scale.\n :param positions: np array of size (N, I, 3) or (I, 3) or None.\n :param rotations: np array of size (N, I, 3, 3) or (I, 3, 3) or None.\n :param scales: np array of size (N, I) or (I) or None.\n\n *args, and **kwargs are forwarded to the Meshes constructor.\n \"\"\"\n assert positions is not None or rotations is not None or scales is not None\n\n n_instances = 0\n n_frames = 0\n\n def check_array(a, dim):\n nonlocal n_instances, n_frames\n if a is not None:\n if len(a.shape) == dim + 1:\n a = a[np.newaxis]\n n_frames = max(n_frames, a.shape[0])\n n_instances = max(n_instances, a.shape[1])\n return a\n\n positions = check_array(positions, 1)\n rotations = check_array(rotations, 2)\n scales = check_array(scales, 0)\n\n if positions is None:\n positions = np.zeros((n_frames, n_instances, 3))\n if rotations is None:\n rotations = np.zeros((n_frames, n_instances, 3, 3))\n rotations[:, :] = np.eye(3)\n if scales is None:\n scales = np.ones((n_frames, n_instances))\n\n transforms = np.zeros((n_frames, n_instances, 4, 4))\n transforms[:, :, :3, :3] = (rotations.reshape((-1, 9)) * scales.reshape((-1, 1))).reshape(\n (n_frames, n_instances, 3, 3)\n )\n transforms[:, :, :3, 3] = positions\n transforms[:, :, 3, 3] = 1.0\n return cls(*args, **kwargs, instance_transforms=transforms)\n\n @classmethod\n def from_file(cls, file, **kwargs):\n \"\"\"\n Loads a mesh from a file that can be loaded by trimesh (e.g. \".obj\", \".ply\", ...)\n See trimesh.available_formats() for a complete list.\n \"\"\"\n mesh = trimesh.load(file)\n\n uvs = None\n vertex_colors = None\n face_colors = None\n if isinstance(mesh.visual, trimesh.visual.ColorVisuals):\n if mesh.visual.kind == \"vertex_colors\":\n vertex_colors = mesh.visual.vertex_colors\n elif mesh.visual.kind == \"face_colors\":\n face_colors = mesh.visual.vertex_colors\n elif isinstance(mesh.visual, trimesh.visual.TextureVisuals):\n uvs = mesh.visual.uv\n\n return Meshes(\n mesh.vertices,\n mesh.faces,\n vertex_normals=mesh.vertex_normals,\n face_colors=face_colors,\n vertex_colors=vertex_colors,\n uv_coords=uvs,\n **kwargs,\n )\n\n @property\n def vertices(self):\n return self._vertices\n\n @vertices.setter\n def vertices(self, vertices):\n if len(vertices.shape) == 2:\n vertices = vertices[np.newaxis]\n\n # Update vertices and redraw\n self._vertices = vertices\n self.n_frames = len(vertices)\n\n # If vertex or face normals were supplied, they are no longer valid.\n self._vertex_normals = None\n self._face_normals = None\n\n # Must clear all LRU caches where the vertices are used.\n self.compute_vertex_and_face_normals.cache_clear()\n\n self.redraw()\n\n @property\n def faces(self):\n return self._faces\n\n @faces.setter\n def faces(self, f):\n self._faces = f.astype(np.int32)\n self._vertex_faces_sparse = trimesh.geometry.index_sparse(self.vertices.shape[1], self._faces)\n\n @property\n def current_vertices(self):\n idx = self.current_frame_id if self.vertices.shape[0] > 1 else 0\n return self.vertices[idx]\n\n @current_vertices.setter\n def current_vertices(self, vertices):\n idx = self.current_frame_id if self.vertices.shape[0] > 1 else 0\n self._vertices[idx] = vertices\n self.compute_vertex_and_face_normals.cache_clear()\n self.redraw()\n\n @property\n def current_transformed_vertices(self):\n return (self.current_vertices @ self.model_matrix[:3, :3].T) + self.model_matrix[:3, 3]\n\n @property\n def transformed_vertices(self):\n return (self.vertices @ self.model_matrix[:3, :3].T) + self.model_matrix[:3, 3]\n\n @property\n def n_faces(self):\n return self.faces.shape[0]\n\n @property\n def n_vertices(self):\n return self.vertices.shape[1]\n\n @property\n def vertex_faces(self):\n # To compute the normals we need to know a mapping from vertex ID to all faces that this vertex is part of.\n # Because we are lazy we abuse trimesh to compute this for us. Not all vertices have the maximum degree, so\n # this array is padded with -1 if necessary.\n return trimesh.Trimesh(self.vertices[0], self.faces, process=False).vertex_faces\n\n @property\n def vertex_normals(self):\n \"\"\"Get or compute all vertex normals (this might take a while for long sequences).\"\"\"\n if self._vertex_normals is None:\n vertex_normals, _ = compute_vertex_and_face_normals_sparse(\n self.vertices, self.faces, self._vertex_faces_sparse, normalize=True\n )\n self._vertex_normals = vertex_normals\n return self._vertex_normals\n\n @property\n def face_normals(self):\n \"\"\"Get or compute all face normals (this might take a while for long sequences).\"\"\"\n if self._face_normals is None:\n _, face_normals = compute_vertex_and_face_normals_sparse(\n self.vertices, self.faces, self._vertex_faces_sparse, normalize=True\n )\n self._face_normals = face_normals\n return self._face_normals\n\n def vertex_normals_at(self, frame_id):\n \"\"\"Get or compute the vertex normals at the given frame.\"\"\"\n if self._vertex_normals is None:\n vn, _ = self.compute_vertex_and_face_normals(frame_id, normalize=True)\n else:\n assert len(self._vertex_normals.shape) == 3, f\"Got shape {self._vertex_normals.shape}\"\n vn = self._vertex_normals[frame_id]\n return vn\n\n def face_normals_at(self, frame_id):\n \"\"\"Get or compute the face normals at the given frame.\"\"\"\n if self._face_normals is None:\n _, fn = self.compute_vertex_and_face_normals(frame_id, normalize=True)\n else:\n assert len(self._face_normals.shape) == 3, f\"Got shape {self._face_normals.shape}\"\n fn = self._face_normals[frame_id]\n return fn\n\n @property\n def vertex_colors(self):\n if self._vertex_colors is None:\n self._vertex_colors = np.full((self.n_frames, self.n_vertices, 4), self.material.color)\n return self._vertex_colors\n\n @vertex_colors.setter\n def vertex_colors(self, vertex_colors):\n # If vertex_colors are None, we resort to the material color.\n if vertex_colors is None:\n self._vertex_colors = None\n self._use_uniform_color = True\n elif isinstance(vertex_colors, tuple) and len(vertex_colors) == 4:\n self.vertex_colors = None\n self._use_uniform_color = True\n self.material.color = vertex_colors\n else:\n if len(vertex_colors.shape) == 2:\n assert vertex_colors.shape[0] == self.n_vertices\n vertex_colors = np.repeat(vertex_colors[np.newaxis], self.n_frames, axis=0)\n assert len(vertex_colors.shape) == 3\n self._vertex_colors = vertex_colors\n self._use_uniform_color = False\n self.redraw()\n\n @property\n def current_vertex_colors(self):\n if self._use_uniform_color:\n return np.full((self.n_vertices, 4), self.material.color)\n else:\n idx = self.current_frame_id if self.vertex_colors.shape[0] > 1 else 0\n return self.vertex_colors[idx]\n\n @property\n def face_colors(self):\n return self._face_colors\n\n @face_colors.setter\n def face_colors(self, face_colors):\n if face_colors is not None:\n if len(face_colors.shape) == 2:\n face_colors = face_colors[np.newaxis]\n self._face_colors = face_colors\n self._use_uniform_color = False\n else:\n self._face_colors = None\n self.redraw()\n\n @property\n def current_face_colors(self):\n if self._use_uniform_color:\n return np.full((self.n_faces, 4), self.material.color)\n else:\n idx = self.current_frame_id if self.face_colors.shape[0] > 1 else 0\n return self.face_colors[idx]\n\n @Node.color.setter\n def color(self, color):\n self.material.color = color\n\n if self.face_colors is None:\n self.vertex_colors = color\n\n @property\n def flat_shading(self):\n return self._flat_shading\n\n @flat_shading.setter\n def flat_shading(self, flat_shading):\n if self._flat_shading != flat_shading:\n self._flat_shading = flat_shading\n self.redraw()\n\n def closest_vertex_in_triangle(self, tri_id, point):\n face_vertex_id = np.linalg.norm((self.current_vertices[self.faces[tri_id]] - point), axis=-1).argmin()\n return self.faces[tri_id][face_vertex_id]\n\n def get_bc_coords_from_points(self, tri_id, points):\n return points_to_barycentric(self.current_vertices[self.faces[[tri_id]]], points)[0]\n\n @lru_cache(2048)\n def compute_vertex_and_face_normals(self, frame_id, normalize=False):\n \"\"\"\n Compute face and vertex normals for the given frame. We use an LRU cache since this is a potentially\n expensive operation. This function exists because computing the normals on all frames can increase the\n startup time of the viewer considerably.\n\n :param frame_id: On which frame to compute the normals.\n :param normalize: Whether or not to normalize the normals. Not doing it is faster and the shaders typically\n enforce unit length of normals anyway.\n :return: The vertex and face normals as a np arrays of shape (V, 3) and (F, 3) respectively.\n \"\"\"\n vs = self.vertices[frame_id : frame_id + 1] if self.vertices.shape[0] > 1 else self.vertices\n vn, fn = compute_vertex_and_face_normals_sparse(vs, self.faces, self._vertex_faces_sparse, normalize)\n return vn.squeeze(0), fn.squeeze(0)\n\n @property\n def bounds(self):\n if self.instance_transforms is None:\n return self.get_bounds(self.vertices)\n else:\n # Get bounds in local coordinates\n bounds = self.get_local_bounds(self.vertices)\n\n # Transform bounds with instance transforms\n min = np.append(bounds[:, 0], 1.0)\n max = np.append(bounds[:, 1], 1.0)\n transforms = self.instance_transforms.reshape((-1, 4, 4))\n mins = transforms @ min\n maxs = transforms @ max\n\n # Return bounds in world coordinates\n return self.get_bounds(np.vstack((mins, maxs)))\n\n @property\n def current_bounds(self):\n if self.instance_transforms is None:\n return self.get_bounds(self.current_vertices)\n else:\n # Get bounds in local coordinates\n bounds = self.get_local_bounds(self.current_vertices)\n\n # Transform bounds with instance transforms\n min = np.append(bounds[:, 0], 1.0)\n max = np.append(bounds[:, 1], 1.0)\n transforms = self.current_instance_transforms.reshape((-1, 4, 4))\n mins = transforms @ min\n maxs = transforms @ max\n\n # Return bounds in world coordinates\n return self.get_bounds(np.vstack((mins[:, :3], maxs[:, :3])))\n\n def is_transparent(self):\n return self.color[3] < 1.0 or self._has_transparent_vertex_or_face_colors\n\n def on_frame_update(self):\n \"\"\"Called whenever a new frame must be displayed.\"\"\"\n super().on_frame_update()\n self.redraw()\n\n @property\n def current_instance_transforms(self):\n if self._instance_transforms is None:\n return None\n idx = self.current_frame_id if self._instance_transforms.shape[0] > 1 else 0\n return self._instance_transforms[idx]\n\n @property\n def instance_transforms(self):\n return self._instance_transforms\n\n @instance_transforms.setter\n def instance_transforms(self, instance_transforms):\n assert self._instance_transforms.shape == instance_transforms\n self._instance_transforms = instance_transforms\n\n @property\n def n_instances(self):\n if self._instance_transforms is None:\n return 1\n else:\n return self._instance_transforms.shape[1]\n\n def _upload_buffers(self):\n \"\"\"Upload the current frame data to the GPU for rendering.\"\"\"\n if not self.is_renderable or not self._need_upload:\n return\n\n self._need_upload = False\n\n # Write positions.\n self.vbo_vertices.write(self.current_vertices.astype(\"f4\").tobytes())\n\n # Write normals.\n if not self.flat_shading:\n vertex_normals = self.vertex_normals_at(self.current_frame_id)\n self.vbo_normals.write(vertex_normals.astype(\"f4\").tobytes())\n\n if self.face_colors is None:\n # Write vertex colors.\n self.vbo_colors.write(self.current_vertex_colors.astype(\"f4\").tobytes())\n else:\n # Write face colors.\n\n # Compute shape of 2D texture.\n shape = (min(self.faces.shape[0], 8192), (self.faces.shape[0] + 8191) // 8192)\n\n # Write texture left justifying the buffer to fill the last row of the texture.\n self.face_colors_texture.write(\n self.current_face_colors.astype(\"f4\").tobytes().ljust(shape[0] * shape[1] * 16)\n )\n\n # Write uvs.\n if self.has_texture:\n self.vbo_uvs.write(self.uv_coords.astype(\"f4\").tobytes())\n\n # Write instance transforms.\n if self.instance_transforms is not None:\n self.vbo_instance_transforms.write(\n np.transpose(self.current_instance_transforms.astype(\"f4\"), (0, 2, 1)).tobytes()\n )\n\n @hooked\n def redraw(self, **kwargs):\n self._need_upload = True\n\n transparent = False\n if self._vertex_colors is not None:\n transparent = transparent or np.any(self.vertex_colors[:, :, 3] < 1.0)\n if self._face_colors is not None:\n transparent = transparent or np.any(self.face_colors[:, :, 3] < 1.0)\n\n self._has_transparent_vertex_or_face_colors = transparent\n\n def _load_programs(self, vs, positions_vs):\n instanced = 1 if self.instance_transforms is not None else 0\n self.smooth_prog = get_smooth_lit_with_edges_program(vs, instanced)\n self.flat_prog = get_flat_lit_with_edges_program(vs, instanced)\n self.smooth_face_prog = get_smooth_lit_with_edges_face_color_program(vs, instanced)\n self.flat_face_prog = get_flat_lit_with_edges_face_color_program(vs, instanced)\n\n self.depth_only_program = get_depth_only_program(positions_vs, instanced)\n self.outline_program = get_outline_program(positions_vs, instanced)\n self.fragmap_program = get_fragmap_program(positions_vs, instanced)\n\n # noinspection PyAttributeOutsideInit\n @Node.once\n def make_renderable(self, ctx: moderngl.Context):\n \"\"\"Prepares this object for rendering. This function must be called before `render` is used.\"\"\"\n vs = \"lit_with_edges.glsl\"\n positions_vs = \"mesh_positions.vs.glsl\"\n self._load_programs(vs, positions_vs)\n\n vertices = self.current_vertices\n vertex_normals = self.vertex_normals_at(self.current_frame_id)\n vertex_colors = self.current_vertex_colors\n\n self.vbo_vertices = ctx.buffer(vertices.astype(\"f4\").tobytes())\n self.vbo_normals = ctx.buffer(vertex_normals.astype(\"f4\").tobytes())\n self.vbo_colors = ctx.buffer(vertex_colors.astype(\"f4\").tobytes())\n self.vbo_indices = ctx.buffer(self.faces.tobytes())\n\n self.vao = VAO()\n self.vao.buffer(self.vbo_vertices, \"3f4\", \"in_position\")\n self.vao.buffer(self.vbo_normals, \"3f4\", \"in_normal\")\n self.vao.buffer(self.vbo_colors, \"4f4\", \"in_color\")\n self.vao.index_buffer(self.vbo_indices)\n\n if self.instance_transforms is not None:\n self.vbo_instance_transforms = ctx.buffer(\n np.transpose(self.current_instance_transforms.astype(\"f4\"), (0, 2, 1)).tobytes()\n )\n self.vao.buffer(self.vbo_instance_transforms, \"16f4/i\", \"instance_transform\")\n\n # Compute shape of 2D texture.\n shape = (min(self.faces.shape[0], 8192), (self.faces.shape[0] + 8191) // 8192)\n self.face_colors_texture = ctx.texture(shape, 4, dtype=\"f4\")\n if self.face_colors is not None:\n # Write texture left justifying the buffer to fill the last row of the texture.\n self.face_colors_texture.write(\n self.current_face_colors.astype(\"f4\").tobytes().ljust(shape[0] * shape[1] * 16)\n )\n\n if self.has_texture:\n img = self.texture_image\n if self.use_pickle_texture:\n self.texture = ctx.texture(img.shape[:2], img.shape[2], img.tobytes())\n else:\n self.texture = ctx.texture(img.size, 3, img.tobytes())\n self.texture_prog = get_smooth_lit_texturized_program(vs)\n self.vbo_uvs = ctx.buffer(self.uv_coords.astype(\"f4\").tobytes())\n self.vao.buffer(self.vbo_uvs, \"2f4\", \"in_uv\")\n\n @hooked\n def release(self):\n if self.is_renderable:\n self.vao.release()\n if self.has_texture:\n self.texture.release()\n\n def _use_program(self, camera, **kwargs):\n if self.has_texture and self.show_texture:\n prog = self.texture_prog\n prog[\"diffuse_texture\"] = 0\n self.texture.use(0)\n else:\n if self.face_colors is None:\n if self.flat_shading:\n prog = self.flat_prog\n else:\n prog = self.smooth_prog\n else:\n if self.flat_shading:\n prog = self.flat_face_prog\n else:\n prog = self.smooth_face_prog\n self.face_colors_texture.use(0)\n prog[\"face_colors\"] = 0\n prog[\"norm_coloring\"].value = self.norm_coloring\n\n prog[\"use_uniform_color\"] = self._use_uniform_color\n prog[\"uniform_color\"] = self.material.color\n prog[\"draw_edges\"].value = 1.0 if self.draw_edges else 0.0\n prog[\"win_size\"].value = kwargs[\"window_size\"]\n\n prog[\"clip_control\"].value = tuple(self.clip_control)\n prog[\"clip_value\"].value = tuple(self.clip_value)\n\n self.set_camera_matrices(prog, camera, **kwargs)\n set_lights_in_program(\n prog,\n kwargs[\"lights\"],\n kwargs[\"shadows_enabled\"],\n kwargs[\"ambient_strength\"],\n )\n set_material_properties(prog, self.material)\n self.receive_shadow(prog, **kwargs)\n return prog\n\n def render(self, camera, **kwargs):\n self._upload_buffers()\n prog = self._use_program(camera, **kwargs)\n self.vao.render(prog, moderngl.TRIANGLES, instances=self.n_instances)\n\n def render_positions(self, prog):\n if self.is_renderable:\n self._upload_buffers()\n\n prog[\"clip_control\"].value = tuple(self.clip_control)\n prog[\"clip_value\"].value = tuple(self.clip_value)\n\n self.vao.render(prog, moderngl.TRIANGLES, instances=self.n_instances)\n\n def _show_normals(self):\n \"\"\"Create and add normals at runtime\"\"\"\n vn = self.vertex_normals\n\n bounds = self.bounds\n diag = np.linalg.norm(bounds[:, 0] - bounds[:, 1])\n\n length = 0.005 * max(diag, 1) / self.scale\n vn = vn / np.linalg.norm(vn, axis=-1, keepdims=True) * length\n\n # Must import here because if we do it at the top we create a circular dependency.\n from aitviewer.renderables.arrows import Arrows\n\n positions = self.vertices\n self.normals_r = Arrows(\n positions,\n positions + vn,\n r_base=length / 10,\n r_head=2 * length / 10,\n p=0.25,\n name=\"Normals\",\n )\n self.normals_r.current_frame_id = self.current_frame_id\n self.add(self.normals_r)\n\n def gui(self, imgui):\n super(Meshes, self).gui(imgui)\n\n _, self.show_texture = imgui.checkbox(\n \"Render Texture##render_texture{}\".format(self.unique_name),\n self.show_texture,\n )\n _, self.norm_coloring = imgui.checkbox(\n \"Norm Coloring##norm_coloring{}\".format(self.unique_name),\n self.norm_coloring,\n )\n _, self.flat_shading = imgui.checkbox(\n \"Flat shading [F]##flat_shading{}\".format(self.unique_name),\n self.flat_shading,\n )\n _, self.draw_edges = imgui.checkbox(\"Draw edges [E]##draw_edges{}\".format(self.unique_name), self.draw_edges)\n _, self.draw_outline = imgui.checkbox(\n \"Draw outline##draw_outline{}\".format(self.unique_name), self.draw_outline\n )\n\n if self.normals_r is None:\n if imgui.button(\"Show Normals ##show_normals{}\".format(self.unique_name)):\n self._show_normals()\n\n def gui_context_menu(self, imgui, x: int, y: int):\n _, self.flat_shading = imgui.menu_item(\"Flat shading\", \"F\", selected=self.flat_shading, enabled=True)\n _, self.draw_edges = imgui.menu_item(\"Draw edges\", \"E\", selected=self.draw_edges, enabled=True)\n _, self.draw_outline = imgui.menu_item(\"Draw outline\", selected=self.draw_outline)\n\n imgui.spacing()\n imgui.separator()\n imgui.spacing()\n super().gui_context_menu(imgui, x, y)\n\n def gui_io(self, imgui):\n if imgui.button(\"Export OBJ##export_{}\".format(self.unique_name)):\n mesh = trimesh.Trimesh(vertices=self.current_vertices, faces=self.faces, process=False)\n mesh.export(\"../export/\" + self.name + \".obj\")\n\n def key_event(self, key, wnd_keys):\n if key == wnd_keys.F:\n self.flat_shading = not self.flat_shading\n elif key == wnd_keys.E:\n self.draw_edges = not self.draw_edges\n\n def update_frames(self, vertices, frames):\n self.vertices[frames] = vertices\n self.redraw()\n\n def add_frames(self, vertices):\n if len(vertices.shape) == 2:\n vertices = vertices[np.newaxis]\n self.vertices = np.append(self.vertices, vertices, axis=0)\n self.n_frames = max(self.n_frames, self.vertices.shape[0])\n\n def remove_frames(self, frames):\n self.vertices = np.delete(self.vertices, frames, axis=0)\n self.redraw()\n\n def export_usd(self, stage, usd_path: str, directory: str = None, verbose=False):\n name = f\"{self.name}_{self.uid:03}\".replace(\" \", \"_\")\n usd_path = f\"{usd_path}/{name}\"\n\n mesh = usd.add_mesh(stage, usd_path, self.name, self.vertices, self.faces, self.get_local_transform())\n if self.has_texture and not self.use_pickle_texture:\n # UVs.\n a_uv = UsdGeom.PrimvarsAPI(mesh).CreatePrimvar(\n \"st\", Sdf.ValueTypeNames.TexCoord2fArray, UsdGeom.Tokens.faceVarying\n )\n a_uv.Set(time=1, value=self.uv_coords[self.faces.flatten()])\n\n if not directory:\n texture_path = os.path.abspath(self.texture_path)\n else:\n texture_path = usd.copy_texture(self.texture_path, name, directory)\n usd.add_texture(stage, mesh, usd_path, texture_path)\n else:\n # NOTE: Per vertex and per face colors using usd displayColor are not currently\n # loaded by Blender. This code path can be enabled once support is there.\n if False:\n a_colors = mesh.GetDisplayColorAttr()\n if self._face_colors is not None:\n # Per face colors.\n if self._face_colors.shape[0] == 1:\n a_colors.Set(self._face_colors[0, :, :3].astype(np.float32))\n else:\n for i in range(self.n_frames):\n a_colors.Set(time=i + 1, value=self._face_colors[i, :, :3].astype(np.float32))\n elif self._vertex_colors is not None:\n # Per vertex colors.\n if self._vertex_colors.shape[0] == 1:\n a_colors.Set(self._vertex_colors[0, :, :3].astype(np.float32))\n else:\n for i in range(self.n_frames):\n a_colors.Set(time=i + 1, value=self._vertex_colors[i, :, :3].astype(np.float32))\n else:\n # Uniform color.\n a_colors.Set(np.array(self.color, np.float32)[:3])\n else:\n usd.add_color(stage, mesh, usd_path, self.color[:3])\n\n self._export_usd_recursively(stage, usd_path, directory, verbose)" }, { "identifier": "Node", "path": "aitviewer/scene/node.py", "snippet": "class Node(object):\n \"\"\"Interface for nodes.\"\"\"\n\n def __init__(\n self,\n name=None,\n icon=None,\n position=None,\n rotation=None,\n scale=1.0,\n color=(0.5, 0.5, 0.5, 1.0),\n material=None,\n is_selectable=True,\n gui_affine=True,\n gui_material=True,\n enabled_frames=None,\n n_frames=1,\n ):\n \"\"\"\n :param name: Name of the node\n :param icon: Custom Node Icon using custom Icon font\n :param position: Starting position in the format (X,Y,Z) or np array of positions with shape (F, 3)\n :param rotation: Starting rotation in rotation matrix representation (3,3) or np array of rotations with shape (F, 3, 3)\n :param scale: Starting scale (scalar) or np array of scale values with shape (F)\n :param color: (R,G,B,A) 0-1 formatted color value.\n :param material: Object material properties. The color specified in the material will override node color\n :param is_selectable: If True the node is selectable when clicked on, otherwise the parent node will be selected.\n :param gui_affine: If True the node will have transform controls (position, rotation, scale) in the GUI.\n :param gui_material: If True the node will have material controls in the GUI.\n :param enabled_frames: Numpy array of boolean values, the object will be enabled only in frames where the value is True,\n the number of ones in the mask must match the number of frames of the object.\n :param n_frames: How many frames this renderable has.\n \"\"\"\n # Transform & Animation\n position = np.zeros(3, dtype=np.float32) if position is None else np.array(position, dtype=np.float32)\n rotation = np.eye(3, dtype=np.float32) if rotation is None else np.array(rotation, dtype=np.float32)\n\n self._positions = position if len(position.shape) != 1 else position[np.newaxis]\n self._rotations = rotation if len(rotation.shape) != 2 else rotation[np.newaxis]\n self._scales = (scale if isinstance(scale, np.ndarray) else np.array([scale])).astype(np.float32)\n\n n_positions = self._positions.shape[0]\n n_rotations = self._rotations.shape[0]\n n_scales = self._scales.shape[0]\n\n if n_frames > 1:\n assert n_positions == 1 or n_frames == n_positions, (\n f\"Number of position frames\" f\" ({n_positions}) must be 1 or match number of Node frames {n_frames}\"\n )\n assert n_rotations == 1 or n_frames == n_rotations, (\n f\"Number of rotations frames\" f\" ({n_rotations}) must be 1 or match number of Node frames {n_frames}\"\n )\n assert n_scales == 1 or n_frames == n_scales, (\n f\"Number of scales frames\" f\" ({n_scales}) must be 1 or match number of Node frames {n_frames}\"\n )\n else:\n n_frames = max(n_positions, n_rotations, n_scales)\n assert (\n (n_positions == 1 or n_positions == n_frames)\n and (n_rotations == 1 or n_rotations == n_frames)\n and (n_scales == 1 or n_scales == n_frames)\n ), (\n f\"Number of position\"\n f\"({n_positions}), rotation ({n_rotations}) and scale ({n_scales})\"\n \"frames must be 1 or match.\"\n )\n\n # Frames\n self._n_frames = n_frames\n self._current_frame_id = 0\n self.model_matrix = self.get_local_transform()\n self._enabled_frames = enabled_frames\n if self._enabled_frames is not None:\n assert np.count_nonzero(self._enabled_frames) == n_frames, (\n f\"Number of non-zero elements in enabled_frames\"\n f\" ({np.count_nonzero(self._enabled_frames)}) must match number of frames in sequence ({n_frames})\"\n )\n # Create an array that maps from the true frame id (counting also disabled frames) to the index of the\n # first existing frame in the sequence.\n self._enabled_frame_id = np.cumsum(self._enabled_frames) - 1\n\n # Stores the true frame id (counting also disabled frames) we use this to allow going\n # through both enabled and disabled frames from the GUI.\n self._internal_frame_id = 0\n\n # Material\n self.material = Material(color=color) if material is None else material\n\n # Renderable Attributes\n self.is_renderable = False\n self.backface_culling = True\n self.backface_fragmap = False\n self.draw_outline = False\n\n # Flags to enable rendering passes\n self.cast_shadow = False\n self.depth_prepass = False\n self.fragmap = False\n self.outline = False\n\n # Programs for render passes. Subclasses are responsible for setting these.\n self.depth_only_program = None # Required for depth_prepass and cast_shadow passes\n self.fragmap_program = None # Required for fragmap pass\n self.outline_program = None # Required for outline pass\n\n # GUI\n self.name = name if name is not None else type(self).__name__\n self.uid = C.next_gui_id()\n self.unique_name = self.name + \"{}\".format(self.uid)\n self.icon = icon if icon is not None else \"\\u0082\"\n self._enabled = True\n self._expanded = False\n self.gui_controls = {\n \"affine\": {\n \"fn\": self.gui_affine,\n \"icon\": \"\\u009b\",\n \"is_visible\": gui_affine,\n },\n \"material\": {\n \"fn\": self.gui_material,\n \"icon\": \"\\u0088\",\n \"is_visible\": gui_material,\n },\n \"animation\": {\n \"fn\": self.gui_animation,\n \"icon\": \"\\u0098\",\n \"is_visible\": (lambda: self._n_frames > 1)(),\n },\n \"io\": {\n \"fn\": self.gui_io,\n \"icon\": \"\\u009a\",\n \"is_visible\": (lambda: self.gui_io.__func__ is not Node.gui_io)(),\n },\n }\n self.gui_modes = {\"view\": {\"title\": \" View\", \"fn\": self.gui_mode_view, \"icon\": \"\\u0099\"}}\n self._selected_mode = \"view\"\n self._show_in_hierarchy = True\n self.is_selectable = is_selectable\n self.export_usd_enabled = True\n self.export_usd_expanded = True\n\n self.nodes: List[Node] = []\n self.parent: Node = None\n\n # Selected Mode\n @property\n def selected_mode(self):\n return self._selected_mode\n\n @selected_mode.setter\n def selected_mode(self, selected_mode):\n self._selected_mode = selected_mode\n\n # Transform\n @property\n def position(self):\n idx = self.current_frame_id if self._positions.shape[0] > 1 else 0\n return self._positions[idx]\n\n @position.setter\n def position(self, position):\n idx = self.current_frame_id if self._positions.shape[0] > 1 else 0\n self._positions[idx] = np.array(position, dtype=np.float32).copy()\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def positions(self):\n return self._positions\n\n @positions.setter\n def positions(self, positions):\n self._positions = positions\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def rotation(self):\n idx = self.current_frame_id if self._rotations.shape[0] > 1 else 0\n return self._rotations[idx]\n\n @rotation.setter\n def rotation(self, rotation):\n idx = self.current_frame_id if self._rotations.shape[0] > 1 else 0\n self._rotations[idx] = rotation\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def rotations(self):\n return self._rotations\n\n @rotations.setter\n def rotations(self, rotations):\n self._rotations = rotations\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def scale(self):\n idx = self.current_frame_id if self._scales.shape[0] > 1 else 0\n return self._scales[idx]\n\n @scale.setter\n def scale(self, scale):\n idx = self.current_frame_id if self._scales.shape[0] > 1 else 0\n self._scales[idx] = scale\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def scales(self):\n return self._scales\n\n @scales.setter\n def scales(self, scales):\n self._scales = scales\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @staticmethod\n @lru_cache()\n def _compute_transform(pos, rot, scale):\n rotation = np.eye(4)\n rotation[:3, :3] = np.array(rot)\n\n trans = np.eye(4)\n trans[:3, 3] = np.array(pos)\n\n scale = np.diag([scale, scale, scale, 1])\n\n return (trans @ rotation @ scale).astype(\"f4\")\n\n def get_local_transform(self):\n \"\"\"Construct local transform as a 4x4 matrix from this node's position, orientation and scale.\"\"\"\n return self._compute_transform(tuple(self.position), tuple(map(tuple, self.rotation)), self.scale)\n\n def update_transform(self, parent_transform=None):\n \"\"\"Update the model matrix of this node and all of its descendants.\"\"\"\n if parent_transform is None:\n self.model_matrix = self.get_local_transform()\n else:\n self.model_matrix = parent_transform.astype(\"f4\") @ self.get_local_transform()\n\n for n in self.nodes:\n n.update_transform(self.model_matrix)\n\n @property\n def color(self):\n return self.material.color\n\n @color.setter\n def color(self, color):\n self.material.color = color\n\n @property\n def bounds(self):\n \"\"\"The bounds in the format ((x_min, x_max), (y_min, y_max), (z_min, z_max))\"\"\"\n return np.array([[0, 0], [0, 0], [0, 0]])\n\n @property\n def current_bounds(self):\n return np.array([[0, 0], [0, 0], [0, 0]])\n\n @property\n def current_center(self):\n return self.current_bounds.mean(-1)\n\n @property\n def center(self):\n return self.bounds.mean(-1)\n\n def get_local_bounds(self, points):\n if len(points.shape) == 2 and points.shape[-1] == 3:\n points = points[np.newaxis]\n assert len(points.shape) == 3\n\n # Compute min and max coordinates of the bounding box ignoring NaNs.\n val = np.array(\n [\n [np.nanmin(points[:, :, 0]), np.nanmax(points[:, :, 0])],\n [np.nanmin(points[:, :, 1]), np.nanmax(points[:, :, 1])],\n [np.nanmin(points[:, :, 2]), np.nanmax(points[:, :, 2])],\n ]\n )\n\n # If any of the elements is NaN return an empty bounding box.\n if np.isnan(val).any():\n return np.array([[0, 0], [0, 0], [0, 0]])\n else:\n return val\n\n def get_bounds(self, points):\n val = self.get_local_bounds(points)\n\n # Transform bounding box with the model matrix.\n val = (self.model_matrix @ np.vstack((val, np.array([1.0, 1.0]))))[:3]\n\n # If any of the elements is NaN return an empty bounding box.\n if np.isnan(val).any():\n return np.array([[0, 0], [0, 0], [0, 0]])\n else:\n return val\n\n @property\n def n_frames(self):\n return self._n_frames\n\n @n_frames.setter\n def n_frames(self, n_frames):\n self._n_frames = n_frames\n\n def __len__(self):\n return self.n_frames\n\n @property\n def current_frame_id(self):\n return self._current_frame_id\n\n @current_frame_id.setter\n def current_frame_id(self, frame_id):\n # Check if the frame changed.\n last_frame_id = self._current_frame_id if self._enabled_frames is None else self._internal_frame_id\n if self.n_frames == 1 or frame_id == last_frame_id:\n return\n\n self.on_before_frame_update()\n if self._enabled_frames is None:\n if frame_id < 0:\n self._current_frame_id = 0\n elif frame_id >= len(self):\n self._current_frame_id = len(self) - 1\n else:\n self._current_frame_id = frame_id\n else:\n # If an enabled_frames is present use it to get the current frame.\n if frame_id < 0:\n self._internal_frame_id = 0\n elif frame_id >= self._enabled_frames.shape[0]:\n self._internal_frame_id = self._enabled_frames.shape[0] - 1\n else:\n self._internal_frame_id = frame_id\n self._current_frame_id = self._enabled_frame_id[self._internal_frame_id]\n # Update enabled using the mask.\n self.enabled = self._enabled_frames[self._internal_frame_id]\n\n # Update frame id of all children nodes.\n for n in self.nodes:\n n.current_frame_id = self._current_frame_id\n\n self.on_frame_update()\n if self.parent and (self._positions.shape[0] > 1 or self._rotations.shape[0] > 1 or self._scales.shape[0] > 1):\n self.update_transform(self.parent.model_matrix)\n\n def next_frame(self):\n self.current_frame_id = self.current_frame_id + 1 if self.current_frame_id < len(self) - 1 else 0\n\n def previous_frame(self):\n self.current_frame_id = self.current_frame_id - 1 if self.current_frame_id > 0 else len(self) - 1\n\n def on_before_frame_update(self):\n \"\"\"Called when the current frame is about to change, 'self.current_frame_id' still has the id of the\n previous frame.\"\"\"\n pass\n\n def on_frame_update(self):\n \"\"\"Called when the current frame is changed.\"\"\"\n pass\n\n def add(self, *nodes, **kwargs):\n self._add_nodes(*nodes, **kwargs)\n\n def _add_node(self, n: \"Node\", show_in_hierarchy=True, expanded=False, enabled=True):\n \"\"\"\n Add a single node\n :param show_in_hierarchy: Whether to show the node in the scene hierarchy.\n :param expanded: Whether the node is initially expanded in the GUI.\n \"\"\"\n if n is None:\n return\n n._show_in_hierarchy = show_in_hierarchy\n n._expanded = expanded\n n._enabled = enabled if n._enabled_frames is None else n._enabled_frames[n.current_frame_id]\n self.nodes.append(n)\n n.parent = self\n n.update_transform(self.model_matrix)\n\n def _add_nodes(self, *nodes, **kwargs):\n \"\"\"Add multiple nodes\"\"\"\n for n in nodes:\n self._add_node(n, **kwargs)\n\n def remove(self, *nodes):\n for n in nodes:\n n.release()\n try:\n self.nodes.remove(n)\n except:\n pass\n\n @property\n def show_in_hierarchy(self):\n return self._show_in_hierarchy\n\n @property\n def enabled(self):\n return self._enabled\n\n @enabled.setter\n def enabled(self, enabled):\n self._enabled = enabled\n\n @property\n def expanded(self):\n return self._expanded\n\n @expanded.setter\n def expanded(self, expanded):\n self._expanded = expanded\n\n def is_transparent(self):\n \"\"\"\n Returns true if the object is transparent and should thus be sorted when rendering.\n Subclassess that use a different color should implement this method to be rendered correctly when transparent.\n \"\"\"\n return self.material.color[3] < 1.0\n\n def gui(self, imgui):\n \"\"\"\n Render GUI for custom node properties and controls. Implementation optional.\n Elements rendered here will show up in the scene hierarchy\n :param imgui: imgui context.\n See https://pyimgui.readthedocs.io/en/latest/reference/imgui.core.html for available elements to render\n \"\"\"\n pass\n\n def gui_modes(self, imgui):\n \"\"\"Render GUI with toolbar (tools) for this particular node\"\"\"\n\n def gui_animation(self, imgui):\n \"\"\"Render GUI for animation related settings\"\"\"\n\n if self._enabled_frames is None:\n if self.n_frames > 1:\n u, fid = imgui.slider_int(\n \"Frame##r_{}\".format(self.unique_name),\n self.current_frame_id,\n min_value=0,\n max_value=self.n_frames - 1,\n )\n if u:\n self.current_frame_id = fid\n else:\n u, fid = imgui.slider_int(\n \"Frame##r_{}\".format(self.unique_name),\n self._internal_frame_id,\n min_value=0,\n max_value=self._enabled_frames.shape[0] - 1,\n )\n if u:\n self.current_frame_id = fid\n\n def gui_affine(self, imgui):\n \"\"\"Render GUI for affine transformations\"\"\"\n # Position controls\n up, pos = imgui.drag_float3(\n \"Position##pos{}\".format(self.unique_name),\n *self.position,\n 1e-2,\n format=\"%.2f\",\n )\n if up:\n self.position = pos\n\n # Rotation controls\n euler_angles = rot2euler_numpy(self.rotation[np.newaxis], degrees=True)[0]\n ur, euler_angles = imgui.drag_float3(\n \"Rotation##pos{}\".format(self.unique_name),\n *euler_angles,\n 1e-2,\n format=\"%.2f\",\n )\n if ur:\n self.rotation = euler2rot_numpy(np.array(euler_angles)[np.newaxis], degrees=True)[0]\n\n # Scale controls\n us, scale = imgui.drag_float(\n \"Scale##scale{}\".format(self.unique_name),\n self.scale,\n 1e-2,\n min_value=0.001,\n max_value=100.0,\n format=\"%.3f\",\n )\n if us:\n self.scale = scale\n\n def gui_material(self, imgui):\n \"\"\"Render GUI with material properties\"\"\"\n\n # Color Control\n uc, color = imgui.color_edit4(\"Color##color{}'\".format(self.unique_name), *self.material.color)\n if uc:\n self.color = color\n\n # Diffuse\n ud, diffuse = imgui.slider_float(\n \"Diffuse##diffuse{}\".format(self.unique_name),\n self.material.diffuse,\n 0.0,\n 1.0,\n \"%.2f\",\n )\n if ud:\n self.material.diffuse = diffuse\n\n # Ambient\n ua, ambient = imgui.slider_float(\n \"Ambient##ambient{}\".format(self.unique_name),\n self.material.ambient,\n 0.0,\n 1.0,\n \"%.2f\",\n )\n if ua:\n self.material.ambient = ambient\n\n def gui_io(self, imgui):\n \"\"\"Render GUI for import/export\"\"\"\n pass\n\n def gui_mode_view(self, imgui):\n \"\"\"Render custom GUI for view mode\"\"\"\n pass\n\n def gui_context_menu(self, imgui, x: int, y: int):\n _, self.enabled = imgui.checkbox(\"Enabled\", self.enabled)\n if any([n._show_in_hierarchy for n in self.nodes]):\n imgui.spacing()\n imgui.separator()\n imgui.spacing()\n for n in self.nodes:\n if not n._show_in_hierarchy:\n continue\n if imgui.begin_menu(f\"{n.name}##{n.uid}\"):\n n.gui_context_menu(imgui, x, y)\n imgui.end_menu()\n\n # Renderable\n @staticmethod\n def once(func):\n def _decorator(self, *args, **kwargs):\n if self.is_renderable:\n return\n else:\n func(self, *args, **kwargs)\n self.is_renderable = True\n\n return _decorator\n\n def make_renderable(self, ctx):\n \"\"\"\n Prepares this object for rendering. This function must be called before `render` is used.\n :param ctx: The moderngl context.\n \"\"\"\n pass\n\n def render(self, camera, position=None, rotation=None, **kwargs):\n \"\"\"Render the current frame in this sequence.\"\"\"\n pass\n\n def render_positions(self, prog):\n \"\"\"\n Render with a VAO with only positions bound, used for shadow mapping, fragmap and depth prepass.\n \"\"\"\n pass\n\n def redraw(self, **kwargs):\n \"\"\"Perform update and redraw operations. Push to the GPU when finished. Recursively redraw child nodes\"\"\"\n for n in self.nodes:\n n.redraw(**kwargs)\n\n def set_camera_matrices(self, prog, camera, **kwargs):\n \"\"\"Set the model view projection matrix in the given program.\"\"\"\n # Transpose because np is row-major but OpenGL expects column-major.\n prog[\"model_matrix\"].write(self.model_matrix.T.astype(\"f4\").tobytes())\n prog[\"view_projection_matrix\"].write(camera.get_view_projection_matrix().T.astype(\"f4\").tobytes())\n\n def receive_shadow(self, program, **kwargs):\n \"\"\"\n Call this function if the renderable is to receive shadows.\n :param program: The shader program that can shade with shadows.\n :param kwargs: The render kwargs.\n \"\"\"\n if kwargs.get(\"shadows_enabled\", False):\n lights = kwargs[\"lights\"]\n\n for i, light in enumerate(lights):\n if light.shadow_enabled and light.shadow_map:\n light_matrix = light.mvp() @ self.model_matrix\n program[f\"dirLights[{i}].matrix\"].write(light_matrix.T.tobytes())\n\n # Bind shadowmap to slot i + 1, we reserve slot 0 for the mesh texture\n # and use slots 1 to (#lights + 1) for shadow maps\n light.shadow_map.use(location=i + 1)\n\n # Set sampler uniforms\n uniform = program[f\"shadow_maps\"]\n uniform.value = 1 if uniform.array_length == 1 else [*range(1, len(lights) + 1)]\n\n def render_shadowmap(self, light_matrix):\n if not self.cast_shadow or self.depth_only_program is None or self.color[3] == 0.0:\n return\n\n prog = self.depth_only_program\n prog[\"model_matrix\"].write(self.model_matrix.T.tobytes())\n prog[\"view_projection_matrix\"].write(light_matrix.T.tobytes())\n\n self.render_positions(prog)\n\n def render_fragmap(self, ctx, camera, uid=None):\n if not self.fragmap or self.fragmap_program is None:\n return\n\n # Transpose because np is row-major but OpenGL expects column-major.\n prog = self.fragmap_program\n self.set_camera_matrices(prog, camera)\n\n # Render with the specified object uid, if None use the node uid instead.\n prog[\"obj_id\"] = uid or self.uid\n\n if self.backface_culling or self.backface_fragmap:\n ctx.enable(moderngl.CULL_FACE)\n else:\n ctx.disable(moderngl.CULL_FACE)\n\n # If backface_fragmap is enabled for this node only render backfaces\n if self.backface_fragmap:\n ctx.cull_face = \"front\"\n\n self.render_positions(prog)\n\n # Restore cull face to back\n if self.backface_fragmap:\n ctx.cull_face = \"back\"\n\n def render_depth_prepass(self, camera, **kwargs):\n if not self.depth_prepass or self.depth_only_program is None:\n return\n\n prog = self.depth_only_program\n self.set_camera_matrices(prog, camera)\n self.render_positions(prog)\n\n def render_outline(self, ctx, camera):\n if self.outline and self.outline_program is not None:\n prog = self.outline_program\n self.set_camera_matrices(prog, camera)\n\n if self.backface_culling:\n ctx.enable(moderngl.CULL_FACE)\n else:\n ctx.disable(moderngl.CULL_FACE)\n self.render_positions(prog)\n\n # Render children node recursively.\n for n in self.nodes:\n n.render_outline(ctx, camera)\n\n def release(self):\n \"\"\"\n Release all OpenGL resources used by this node and any of its children. Subclasses that instantiate OpenGL\n objects should implement this method with '@hooked' to avoid leaking resources.\n \"\"\"\n for n in self.nodes:\n n.release()\n\n def on_selection(self, node, instance_id, tri_id):\n \"\"\"\n Called when the node is selected\n\n :param node: the node which was clicked (can be None if the selection wasn't a mouse event)\n :param instance_id: the id of the instance that was clicked, 0 if the object is not instanced\n (can be None if the selection wasn't a mouse event)\n :param tri_id: the id of the triangle that was clicked from the 'node' mesh\n (can be None if the selection wasn't a mouse event)\n \"\"\"\n pass\n\n def key_event(self, key, wnd_keys):\n \"\"\"\n Handle shortcut key presses (if you are the selected object)\n \"\"\"\n pass\n\n def update_frames(self, *args, **kwargs):\n pass\n\n def add_frames(self, *args, **kwargs):\n pass\n\n def remove_frames(self, *args, **kwargs):\n pass\n\n def _export_usd_recursively(self, stage, usd_path, directory, verbose):\n if verbose:\n print(usd_path)\n for n in self.nodes:\n if n.export_usd_enabled:\n n.export_usd(stage, usd_path, directory, verbose)\n\n def export_usd(self, stage, usd_path: str, directory: str = None, verbose=False):\n \"\"\"\n Export the node into an USD file. Nodes that implement this method should use\n recursively call this for every children that should also be exported.\n\n :param stage: an object of type Usd.Stage into which to export the node\n :param usd_path: the path of the parent object in the USD file scene hierarchy.\n \"\"\"\n from pxr import Gf, UsdGeom\n\n usd_path = f\"{usd_path}/{self.name.replace(' ', '_')}_{self.uid:03}\"\n\n # Transform.\n xform = UsdGeom.Xform.Define(stage, usd_path)\n a_xform = xform.AddTransformOp()\n a_xform.Set(Gf.Matrix4d(self.get_local_transform().astype(np.float64).T))\n\n self._export_usd_recursively(stage, usd_path, directory, verbose)" }, { "identifier": "hooked", "path": "aitviewer/utils/decorators.py", "snippet": "class hooked:\n def __init__(self, fn):\n self.fn = fn\n\n def __set_name__(self, owner, name):\n func = self.fn\n\n def _decorator(self, *args, **kwargs):\n super_obj = super(owner, self)\n super_fn = getattr(super_obj, func.__name__)\n super_fn(*args, **kwargs)\n return func(self, *args, **kwargs)\n\n setattr(owner, name, _decorator)\n\n def __call__(self):\n assert (\n False\n ), \"@hooked decorator object should never be called directly. This can happen if you apply this decorator to a function that is not a method.\"" } ]
import numpy as np from skimage import measure from aitviewer.renderables.bounding_boxes import BoundingBoxes from aitviewer.renderables.lines import Lines from aitviewer.renderables.meshes import Meshes from aitviewer.scene.node import Node from aitviewer.utils.decorators import hooked
19,337
# Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos class SDF(Node): """ Renderable that can be used to draw level sets of a dense SDF volume meshed using marching cubes. This renderable internally uses the marching cubes algorithm from skimage. For a faster marching cubes implementation see the Volume renderable. """ def __init__( self, volume, size=(1, 1, 1), level=0.0, color=(0.7, 0.7, 0.7, 1.0), level_sets=None, level_set_colors=None, mc_step_size=1, **kwargs, ): """Initializer. :param volume: np array of shape (X, Y, Z) of signed distance values :param size: size of the volume in local units. :param level: the level set used for the main mesh. :param color: color of the main mesh. :param level_sets: a list or array of additional level set values to display. :param level_set_colors: a list or array of shape (L, 4) of the same length as the level_set parameter with colors to use for the additional level sets. :param mc_step_size: step size used for marching cubes. :param **kwargs: arguments forwarded to the Node constructor. """ assert len(volume.shape) == 3 and len(size) == 3 kwargs["gui_material"] = False super().__init__(**kwargs) self.volume = volume self.size = np.array((size), np.float32) # Mesh. verts, faces, normals, _ = measure.marching_cubes( volume, level, spacing=self.size / (np.array(self.volume.shape) - 1.0), step_size=mc_step_size ) self.mesh = Meshes(verts, faces, vertex_normals=-normals, color=color, name="Mesh") # Level sets. self.level_sets: list[Meshes] = [] if level_sets is not None: if level_set_colors is not None: assert len(level_sets) == len(level_set_colors) for i, s in enumerate(level_sets): verts, faces, normals, _ = measure.marching_cubes( volume, s, spacing=self.size / (np.array(self.volume.shape) - 1.0), step_size=mc_step_size ) shell = Meshes(verts, faces, vertex_normals=-normals, name=f"Level {s:.03f}", cast_shadow=False) if level_set_colors is not None: shell.color = tuple(level_set_colors[i]) shell.clip_control = np.array((1, 1, 1)) shell.clip_value = self.size.copy() shell.backface_culling = False self.level_sets.append(shell) # Bounding box.
# Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos class SDF(Node): """ Renderable that can be used to draw level sets of a dense SDF volume meshed using marching cubes. This renderable internally uses the marching cubes algorithm from skimage. For a faster marching cubes implementation see the Volume renderable. """ def __init__( self, volume, size=(1, 1, 1), level=0.0, color=(0.7, 0.7, 0.7, 1.0), level_sets=None, level_set_colors=None, mc_step_size=1, **kwargs, ): """Initializer. :param volume: np array of shape (X, Y, Z) of signed distance values :param size: size of the volume in local units. :param level: the level set used for the main mesh. :param color: color of the main mesh. :param level_sets: a list or array of additional level set values to display. :param level_set_colors: a list or array of shape (L, 4) of the same length as the level_set parameter with colors to use for the additional level sets. :param mc_step_size: step size used for marching cubes. :param **kwargs: arguments forwarded to the Node constructor. """ assert len(volume.shape) == 3 and len(size) == 3 kwargs["gui_material"] = False super().__init__(**kwargs) self.volume = volume self.size = np.array((size), np.float32) # Mesh. verts, faces, normals, _ = measure.marching_cubes( volume, level, spacing=self.size / (np.array(self.volume.shape) - 1.0), step_size=mc_step_size ) self.mesh = Meshes(verts, faces, vertex_normals=-normals, color=color, name="Mesh") # Level sets. self.level_sets: list[Meshes] = [] if level_sets is not None: if level_set_colors is not None: assert len(level_sets) == len(level_set_colors) for i, s in enumerate(level_sets): verts, faces, normals, _ = measure.marching_cubes( volume, s, spacing=self.size / (np.array(self.volume.shape) - 1.0), step_size=mc_step_size ) shell = Meshes(verts, faces, vertex_normals=-normals, name=f"Level {s:.03f}", cast_shadow=False) if level_set_colors is not None: shell.color = tuple(level_set_colors[i]) shell.clip_control = np.array((1, 1, 1)) shell.clip_value = self.size.copy() shell.backface_culling = False self.level_sets.append(shell) # Bounding box.
self.bounding_box = BoundingBoxes.from_min_max_diagonal(
0
2023-12-07 16:13:50+00:00
24k
nexB/dejacode
product_portfolio/filters.py
[ { "identifier": "ComponentKeyword", "path": "component_catalog/models.py", "snippet": "class ComponentKeyword(DataspacedModel):\n label = models.CharField(\n max_length=50,\n blank=True,\n help_text=_(\n \"A short, descriptive label to categorize components and support searches, \"\n \"reports, filters, and facets.\"\n ),\n )\n\n description = models.TextField(\n blank=True,\n help_text=_(\"Additional remarks about the intention and purpose of a Keyword value.\"),\n )\n\n class Meta:\n ordering = [\"label\"]\n unique_together = ((\"dataspace\", \"label\"), (\"dataspace\", \"uuid\"))\n\n def __str__(self):\n return self.label" }, { "identifier": "PROGRAMMING_LANGUAGES", "path": "component_catalog/programming_languages.py", "snippet": "PROGRAMMING_LANGUAGES = (\n \"4th Dimension/4D\",\n \"ABAP\",\n \"ABC\",\n \"ActionScript\",\n \"Ada\",\n \"Agilent VEE\",\n \"Algol\",\n \"Alice\",\n \"Angelscript\",\n \"ANTLR\",\n \"Apex\",\n \"APL\",\n \"AppleScript\",\n \"Arc\",\n \"Arduino\",\n \"ASP\",\n \"AspectJ\",\n \"Assembly\",\n \"ATLAS\",\n \"Augeas\",\n \"AutoHotkey\",\n \"AutoIt\",\n \"AutoLISP\",\n \"Automator\",\n \"Avenue\",\n \"Awk\",\n \"Bash\",\n \"Batchfile\",\n \"Visual Basic\",\n \"bc\",\n \"BCPL\",\n \"BETA\",\n \"BlitzMax\",\n \"Boo\",\n \"Bourne Shell\",\n \"Bro\",\n \"C\",\n \"C Shell\",\n \"C#\",\n \"C++\",\n \"C++/CLI\",\n \"C-Omega\",\n \"Caml\",\n \"Ceylon\",\n \"CFML\",\n \"cg\",\n \"Ch\",\n \"Chapel\",\n \"CHILL\",\n \"CIL\",\n \"CL (OS/400)\",\n \"Clarion\",\n \"Clean\",\n \"Clipper\",\n \"Clojure\",\n \"CLU\",\n \"COBOL\",\n \"Cobra\",\n \"CoffeeScript\",\n \"ColdFusion\",\n \"COMAL\",\n \"Common Lisp\",\n \"Coq\",\n \"Crystal\",\n \"CSS\",\n \"cT\",\n \"Curl\",\n \"D\",\n \"Dart\",\n \"DCL\",\n \"DCPU-16 ASM\",\n \"Delphi/Object Pascal\",\n \"DiBOL\",\n \"Dylan\",\n \"E\",\n \"eC\",\n \"Ecl\",\n \"ECMAScript\",\n \"EGL\",\n \"Eiffel\",\n \"Elixir\",\n \"Elm\",\n \"Emacs Lisp\",\n \"Erlang\",\n \"Etoys\",\n \"Euler\",\n \"Euphoria\",\n \"EXEC\",\n \"Expect\",\n \"F#\",\n \"Factor\",\n \"Falcon\",\n \"Fancy\",\n \"Fantom\",\n \"Felix\",\n \"Fish\",\n \"Flex\",\n \"Forth\",\n \"Fortran\",\n \"Fortran 90\",\n \"Fortress\",\n \"FreePascal\",\n \"Frege\",\n \"GAS\",\n \"Gambas\",\n \"GDScript\",\n \"GLSL (OpenGL Shading Language)\",\n \"GNU Octave\",\n \"Go\",\n \"Google AppsScript\",\n \"Gosu\",\n \"Groovy\",\n \"Haskell\",\n \"Haxe\",\n \"Heron\",\n \"HTML\",\n \"HPL\",\n \"Hy\",\n \"HyperTalk\",\n \"Icon\",\n \"IDL\",\n \"Inform\",\n \"Informix-4GL\",\n \"INTERCAL\",\n \"Io\",\n \"Ioke\",\n \"J\",\n \"J#\",\n \"JADE\",\n \"Java\",\n \"Java FX Script\",\n \"JavaScript\",\n \"JScript\",\n \"JScript.NET\",\n \"Julia\",\n \"Kaya\",\n \"Korn Shell\",\n \"Kotlin\",\n \"LabVIEW\",\n \"Ladder Logic\",\n \"Lasso\",\n \"Limbo\",\n \"Lingo\",\n \"Lisp\",\n \"LiveScript\",\n \"Logo\",\n \"Logtalk\",\n \"LotusScript\",\n \"LPC\",\n \"Lua\",\n \"Lustre\",\n \"M4\",\n \"MAD\",\n \"Mako\",\n \"Magic\",\n \"Magik\",\n \"Malbolge\",\n \"MANTIS\",\n \"Maple\",\n \"Mathematica\",\n \"MATLAB\",\n \"Max/MSP\",\n \"MAXScript\",\n \"MEL\",\n \"Mercury\",\n \"Mirah\",\n \"Miva\",\n \"ML\",\n \"Monkey\",\n \"Modula\",\n \"Modula-2\",\n \"Modula-3\",\n \"MOO\",\n \"Moto\",\n \"MS-DOS Batch\",\n \"MUMPS\",\n \"MXML\",\n \"N/A\",\n \"NASM\",\n \"NATURAL\",\n \"Nemerle\",\n \"Nimrod\",\n \"NQC\",\n \"NSIS\",\n \"Nu\",\n \"NXT-G\",\n \"Oberon\",\n \"Object Rexx\",\n \"Objective-C\",\n \"Objective-C++\",\n \"Objective-J\",\n \"OCaml\",\n \"Occam\",\n \"Octave\",\n \"ooc\",\n \"Opa\",\n \"OpenCL\",\n \"OpenEdge ABL\",\n \"OPL\",\n \"Oz\",\n \"Paradox\",\n \"Parrot\",\n \"Pascal\",\n \"Perl\",\n \"PHP\",\n \"Pike\",\n \"PILOT\",\n \"PL/I\",\n \"PL/SQL\",\n \"Pliant\",\n \"PostScript\",\n \"POV-Ray\",\n \"PowerBasic\",\n \"PowerScript\",\n \"PowerShell\",\n \"Processing\",\n \"Progress 4GL\",\n \"Prolog\",\n \"Puppet\",\n \"Pure Data\",\n \"Python\",\n \"Q\",\n \"QBasic\",\n \"QML\",\n \"R\",\n \"Racket\",\n \"Ragel\",\n \"REALBasic\",\n \"Red\",\n \"REBOL\",\n \"Revolution\",\n \"REXX\",\n \"RPG (OS/400)\",\n \"Ruby\",\n \"Rust\",\n \"S\",\n \"SR\",\n \"S-PLUS\",\n \"SAS\",\n \"Sather\",\n \"Scala\",\n \"Scheme\",\n \"Scilab\",\n \"Scratch\",\n \"Scriptol\",\n \"sed\",\n \"Seed7\",\n \"Self\",\n \"Shell\",\n \"SIGNAL\",\n \"Simula\",\n \"Simulink\",\n \"Slate\",\n \"Smalltalk\",\n \"Smarty\",\n \"Snobol\",\n \"SPARK\",\n \"SPSS\",\n \"SQL\",\n \"SQR\",\n \"Squeak\",\n \"Squirrel\",\n \"Standard ML\",\n \"Suneido\",\n \"SuperCollider\",\n \"Swift\",\n \"SWIG\",\n \"TACL\",\n \"Tcl\",\n \"Tcsh\",\n \"Tex\",\n \"thinBasic\",\n \"TOM\",\n \"Transact-SQL\",\n \"Turing\",\n \"TypeScript\",\n \"Vala/Genie\",\n \"VBScript\",\n \"Velocity\",\n \"Verilog\",\n \"VHDL\",\n \"VimL\",\n \"Visual Basic .NET\",\n \"Visual FoxPro\",\n \"WebDNA\",\n \"Whitespace\",\n \"X10\",\n \"xBase\",\n \"XBase++\",\n \"XBasic\",\n \"Xen\",\n \"XML\",\n \"XPL\",\n \"XSLT\",\n \"XQuery\",\n \"yacc\",\n \"Yorick\",\n \"Z shell\",\n)" }, { "identifier": "BooleanChoiceFilter", "path": "dje/filters.py", "snippet": "class BooleanChoiceFilter(django_filters.ChoiceFilter):\n def __init__(self, *args, **kwargs):\n kwargs[\"empty_label\"] = kwargs.pop(\"empty_label\", \"All\")\n kwargs[\"choices\"] = kwargs.pop(\n \"choices\",\n (\n (\"yes\", _(\"Yes\")),\n (\"no\", _(\"No\")),\n ),\n )\n super().__init__(*args, **kwargs)\n\n def filter(self, qs, value):\n boolean_value = {\"yes\": True, \"no\": False}.get(value)\n if boolean_value is not None:\n return qs.filter(**{self.field_name: boolean_value}).distinct()\n return qs" }, { "identifier": "DataspacedFilterSet", "path": "dje/filters.py", "snippet": "class DataspacedFilterSet(FilterSetUtilsMixin, django_filters.FilterSet):\n related_only = []\n\n def __init__(self, *args, **kwargs):\n try:\n self.dataspace = kwargs.pop(\"dataspace\")\n except KeyError:\n raise AttributeError(\"A dataspace needs to be provided to this FilterSet.\")\n\n self.dynamic_qs = kwargs.pop(\"dynamic_qs\", True)\n self.parent_qs_cache = {}\n\n super().__init__(*args, **kwargs)\n\n for field_name, filter_ in self.filters.items():\n # Dataspace scoping for FKs on DataspaceRelated models.\n if hasattr(filter_, \"queryset\") and is_dataspace_related(filter_.queryset.model):\n filter_.queryset = filter_.queryset.scope(self.dataspace)\n\n if field_name in self.related_only:\n self.apply_related_only(field_name, filter_)\n\n usage_policy = self.filters.get(\"usage_policy\")\n if usage_policy:\n model_name = self._meta.model._meta.model_name\n usage_policy.queryset = usage_policy.queryset.filter(content_type__model=model_name)\n\n def apply_related_only(self, field_name, filter_):\n \"\"\"\n Limit the filter choices to the values used on the parent queryset.\n This logic emulate a facets logic.\n See also `django.contrib.admin.filters.RelatedOnlyFieldListFilter`.\n \"\"\"\n parent_qs = self.get_parent_qs_for_related_only(field_name)\n is_related_field = hasattr(filter_, \"queryset\")\n\n if is_related_field: # FK type fields\n filter_.queryset = filter_.queryset.distinct().filter(\n pk__in=parent_qs.values_list(f\"{field_name}__pk\", flat=True)\n )\n else: # Choices type fields\n choices_qs = (\n parent_qs.order_by(field_name).distinct().values_list(field_name, flat=True)\n )\n filter_.extra[\"choices\"] = [\n choice for choice in filter_.extra[\"choices\"] if choice[0] in choices_qs\n ]\n\n def get_parent_qs_for_related_only(self, field_name):\n \"\"\"\n Return the parent QuerySet with active filters applied\n except for the given `filter_name`.\n The model default manager is used in place of the self.queryset\n since it do not containing the annotations and select/prefetch_related\n that are not needed for that dynamic filtering.\n \"\"\"\n parent_qs = self._meta.model._default_manager.scope(self.dataspace)\n\n if not self.dynamic_qs:\n return parent_qs\n\n data = self.data.copy()\n\n # `sort` is only used for ordering and does not apply here.\n # Removing it from the queryset improves the performances.\n fields_to_remove = [\n \"sort\",\n field_name,\n ]\n\n for name in fields_to_remove:\n data.pop(name, None)\n\n if not data:\n return parent_qs\n\n cache_key = json.dumps(data, sort_keys=True)\n cached_qs = self.parent_qs_cache.get(cache_key, None)\n if cached_qs:\n return cached_qs\n\n filterset = self.__class__(\n data=data,\n dataspace=self.dataspace,\n queryset=parent_qs,\n dynamic_qs=False,\n )\n self.parent_qs_cache[cache_key] = filterset.qs\n return filterset.qs" }, { "identifier": "DefaultOrderingFilter", "path": "dje/filters.py", "snippet": "class DefaultOrderingFilter(django_filters.OrderingFilter):\n \"\"\"Add default ordering from model meta after the provided value.\"\"\"\n\n def filter(self, qs, value):\n qs = super().filter(qs, value)\n\n ordering = qs.query.order_by\n if not ordering:\n return qs\n\n # Add the default ordering from the model and override the order_by value\n for field_name in self.model._meta.ordering:\n if field_name not in ordering:\n ordering += (field_name,)\n\n return qs.order_by(*ordering)" }, { "identifier": "HasRelationFilter", "path": "dje/filters.py", "snippet": "class HasRelationFilter(django_filters.ChoiceFilter):\n def __init__(self, *args, **kwargs):\n kwargs[\"lookup_expr\"] = \"isnull\"\n kwargs[\"empty_label\"] = \"Any\"\n kwargs[\"choices\"] = (\n (\"with\", _(\"With\")),\n (\"without\", _(\"Without\")),\n )\n super().__init__(*args, **kwargs)\n\n def filter(self, qs, value):\n if value == \"with\":\n return qs.filter(**{f\"{self.field_name}__{self.lookup_expr}\": False}).distinct()\n elif value == \"without\":\n return qs.filter(**{f\"{self.field_name}__{self.lookup_expr}\": True}).distinct()\n return qs" }, { "identifier": "MatchOrderedSearchFilter", "path": "dje/filters.py", "snippet": "class MatchOrderedSearchFilter(SearchRankFilter):\n \"\"\"\n Start with a case-insensitive containment search on the `name` field,\n ordering based on the match type using annotations.\n\n If that simple search Return nothing, fallback to the SearchRankFilter\n searching, this allows \"name version\" type string to return some results.\n\n Postgres pattern matching docs available at:\n https://www.postgresql.org/docs/10/static/functions-matching.html#POSIX-CONSTRAINT-ESCAPES-TABLE\n \"\"\"\n\n def __init__(self, match_order_fields, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.match_order_fields = match_order_fields\n\n def get_match_order_lookups(self, lookup_type, value):\n or_queries = [\n models.Q(**{f\"{field}__{lookup_type}\": value}) for field in self.match_order_fields\n ]\n return reduce(operator.or_, or_queries)\n\n def filter(self, qs, value):\n if not value:\n return qs\n\n # \\y matches only at the beginning or end of a word\n regex_escaped_value = r\"\\y{}\\y\".format(database_re_escape(value))\n\n # All matching patterns are applied case-insensitive\n match_order = Case(\n # 1. Exact match\n When(self.get_match_order_lookups(\"iexact\", value), then=Value(1)),\n # 2. Contains word with boundaries\n When(self.get_match_order_lookups(\"iregex\", regex_escaped_value), then=Value(2)),\n # 3. Contains word\n default=Value(3), # default `icontains` clause in `.filter()`\n output_field=IntegerField(),\n )\n\n default_ordering = self.model._meta.ordering\n\n simple_search_qs = (\n qs.filter(self.get_match_order_lookups(\"icontains\", value))\n .annotate(match_order=match_order)\n .order_by(\"match_order\", *default_ordering)\n )\n\n if simple_search_qs.exists():\n if self.distinct:\n simple_search_qs = simple_search_qs.distinct()\n return simple_search_qs\n\n return super().filter(qs, value)" }, { "identifier": "SearchFilter", "path": "dje/filters.py", "snippet": "class SearchFilter(django_filters.CharFilter):\n def __init__(self, search_fields, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.search_fields = search_fields\n\n def filter(self, qs, value):\n lookup_type = \"icontains\"\n\n for bit in value.split():\n or_queries = [\n models.Q(**{f\"{field}__{lookup_type}\": bit}) for field in self.search_fields\n ]\n qs = qs.filter(reduce(operator.or_, or_queries))\n\n return qs" }, { "identifier": "BootstrapSelectMultipleWidget", "path": "dje/widgets.py", "snippet": "class BootstrapSelectMultipleWidget(BootstrapSelectMixin, widgets.SelectMultiple):\n pass" }, { "identifier": "DropDownWidget", "path": "dje/widgets.py", "snippet": "class DropDownWidget(LinkWidget):\n dropdown_template = \"\"\"\n <div class=\"dropdown btn-group\">\n <a class=\"btn btn-outline-secondary btn-xs dropdown-toggle {active}\" href=\"#\" role=\"button\"\n data-bs-toggle=\"dropdown\" aria-haspopup=\"true\" aria-expanded=\"false\"\n aria-label=\"{label} options dropdown\">\n </a>\n {menu}\n </div>\n \"\"\"\n\n def __init__(self, attrs=None, choices=(), anchor=None, right_align=False, label=None):\n self.anchor = anchor\n self.right_align = right_align\n self.label = label\n super().__init__(attrs, choices)\n\n def render(self, name, value, attrs=None, renderer=None, choices=()):\n css_class = \"dropdown-menu\"\n if self.right_align:\n css_class += \" dropdown-menu-end\"\n attrs = dict(attrs)\n attrs.update({\"class\": css_class})\n\n if not hasattr(self, \"data\"):\n self.data = {}\n if value is None:\n value = \"\"\n final_attrs = self.build_attrs(self.attrs, extra_attrs=attrs)\n output = [\"<div%s>\" % flatatt(final_attrs)]\n options = self.render_options(choices, [value], name)\n if options:\n output.append(options)\n output.append(\"</div>\")\n menu = format_html(\"\\n\".join(output))\n\n return self.dropdown_template.format(\n menu=menu,\n active=\"active\" if value else \"\",\n label=self.label if self.label else name.title(),\n )\n\n def render_option(self, name, selected_choices, option_value, option_label):\n option_value = str(option_value)\n if option_label == BLANK_CHOICE_DASH[0][1]:\n option_label = _(\"All\")\n\n data = self.data.copy()\n data[name] = option_value\n selected = data == self.data or option_value in selected_choices\n css_class = \"dropdown-item\"\n if selected:\n css_class += \" active\"\n\n try:\n url = data.urlencode()\n except AttributeError:\n # doseq is required for proper encoding,\n url = urlencode(data, doseq=True)\n\n return self.option_string().format(\n css_class=css_class,\n query_string=url,\n label=str(option_label),\n anchor=self.anchor or \"\",\n )\n\n def option_string(self):\n return '<a href=\"?{query_string}{anchor}\" class=\"{css_class}\">{label}</a>'" }, { "identifier": "License", "path": "license_library/models.py", "snippet": "class License(\n LicenseSymbolMixin,\n ReferenceNotesMixin,\n UsagePolicyMixin,\n ExternalReferenceMixin,\n HistoryFieldsMixin,\n RequestMixin,\n DataspacedModel,\n):\n owner = models.ForeignKey(\n to=\"organization.Owner\",\n on_delete=models.PROTECT,\n help_text=_(\n \"An owner is an entity that is the original author or custodian of one or \"\n \"more software licenses, and which is responsible for the text of that license.\"\n ),\n )\n\n key = models.CharField(\n db_index=True,\n max_length=50,\n help_text=_(\"Unique key name of the license.\"),\n validators=[validate_slug_plus],\n )\n\n name = models.CharField(\n db_index=True,\n max_length=100,\n help_text=_(\"The full name of the license, as provided by the original authors.\"),\n )\n\n short_name = models.CharField(\n db_index=True,\n max_length=50,\n verbose_name=_(\"Short Name\"),\n help_text=_(\"Most commonly used name for the license, often abbreviated.\"),\n )\n\n keywords = models.CharField(\n db_index=True,\n max_length=500,\n blank=True,\n help_text=_(\n \"Keywords to associate with a license to ensure that the license will be \"\n \"found when a user searches on one or more of the keywords. Examples include \"\n \"alternative names for the license, or file/product names that are commonly \"\n \"associated with the license.\"\n ),\n )\n\n homepage_url = models.URLField(\n _(\"Homepage URL\"),\n max_length=1024,\n blank=True,\n help_text=_(\"Homepage URL for the license.\"),\n )\n\n full_text = NoStripTextField(\n blank=True,\n help_text=_(\n \"The full text of the license. Note that actual usage of a license with \"\n \"software may include copyright statements and owner information.\"\n ),\n )\n\n standard_notice = NoStripTextField(\n blank=True,\n help_text=_(\"The standard notice text for this license if it exists.\"),\n )\n\n text_urls = models.TextField(\n _(\"Text URLs\"),\n blank=True,\n help_text=_(\n \"URLs to the text of the license (plain text or HTML) on the main site of \"\n \"this license.\"\n ),\n )\n\n faq_url = models.URLField(\n _(\"FAQ URL\"),\n max_length=1024,\n blank=True,\n help_text=_(\"URL of a page with Frequently Asked Questions about this license.\"),\n )\n\n osi_url = models.URLField(\n _(\"OSI URL\"),\n max_length=1024,\n blank=True,\n help_text=_(\"URL on the OSI website http://opensource.org for OSI-approved licenses.\"),\n )\n\n other_urls = models.TextField(\n _(\"Other URLs\"),\n blank=True,\n help_text=_(\n \"Other URLs that identify this license, such as URLs to this license in \"\n \"different open-source projects. Obsolete links may be kept here, as they \"\n \"may be useful for historical analysis purpose.\"\n ),\n )\n\n reviewed = models.BooleanField(\n default=False,\n help_text=_(\n \"True / False (yes/no) - regarding whether a system license definition has \"\n \"been reviewed by an administrator. Defaults to False.\"\n ),\n )\n\n publication_year = models.CharField(\n max_length=4,\n blank=True,\n help_text=_(\"Year this license was first published, in four-digits format.\"),\n )\n\n spdx_license_key = models.CharField(\n _(\"SPDX short identifier\"),\n db_index=True,\n blank=True,\n max_length=50,\n validators=[validate_spdx_license_key],\n help_text=_(\n \"Short identifier of the license as stated on each license detail page at \"\n \"https://spdx.org/licenses/ or a LicenseRef value that points to another \"\n \"license list.\"\n ),\n )\n\n category = models.ForeignKey(\n to=\"license_library.LicenseCategory\",\n null=True,\n blank=True,\n on_delete=models.PROTECT,\n help_text=_(\n \"A license category, identified by a code, provides a major grouping for \"\n \"licenses, generally describing the relationship between the licensor and \"\n \"licensee.\"\n ),\n )\n\n license_style = models.ForeignKey(\n to=\"license_library.LicenseStyle\",\n on_delete=models.PROTECT,\n null=True,\n blank=True,\n help_text=_(\n \"A license style identifies a group of miscellaneous characteristics about a \"\n \"license, which may include a combination of restrictions about software \"\n \"modification and usage\"\n ),\n )\n\n license_profile = models.ForeignKey(\n to=\"license_library.LicenseProfile\",\n on_delete=models.PROTECT,\n null=True,\n blank=True,\n verbose_name=_(\"License profile\"),\n help_text=format_lazy(\n \"{verbose_name}: a selection of license tags and their values, identified by a \"\n \"numeric code, in order to provide a convenient way to assign a set of tag values to \"\n \"a license. \"\n 'A \"Tag\" identifies a frequently encountered obligation, restriction, or other '\n \"notable characteristic of license terms. \"\n \"Note that individual tag value assignments may vary by license.\",\n verbose_name=_(\"License profile\"),\n ),\n )\n\n license_status = models.ForeignKey(\n to=\"license_library.LicenseStatus\",\n verbose_name=_(\"configuration status\"),\n on_delete=models.PROTECT,\n null=True,\n blank=True,\n help_text=_(\n \"An organization can use the license status to communicate the current stage \"\n \"of the license configuration review process.\"\n ),\n )\n\n is_active = models.BooleanField(\n verbose_name=_(\"Is active\"),\n null=True,\n db_index=True,\n help_text=_(\n \"When set to True (Yes), this field indicates that a license definition in the \"\n \"library is currently in use (active). When set to False (No), this field indicates \"\n \"that a license is deprecated (inactive) and should not be used, and the license \"\n \"will not appear in the user views. When the field value is Unknown, the license \"\n \"will not appear in the user views, usually suggesting that the license has not \"\n \"yet been evaluated.\"\n ),\n )\n\n curation_level = models.PositiveSmallIntegerField(\n db_index=True,\n default=0,\n validators=[validators.MaxValueValidator(100)],\n help_text=_(\n \"A numeric value, from 0 to 100, that indicates the level of completeness of all the \"\n \"pertinent license data, as well as the state of that data being reviewed by a senior \"\n 'administrator. General Guidelines: \"10\" indicates basic data present. \"20\" indicates '\n 'Category and License Style assigned. \"30\" indicates all Obligation Tags are set. '\n '\"40\" indicates all License Tags are set. \"50\" indicates all previous conditions '\n \"plus URL fields set. Anything above that is at the discretion of a senior \"\n \"administrative reviewer.\"\n ),\n )\n\n admin_notes = models.TextField(\n blank=True,\n help_text=_(\n \"Internal notes for administrative use only, primarily intended to \"\n \"communicate special considerations about the interpretation of a license.\"\n ),\n )\n\n guidance = models.TextField(\n blank=True,\n help_text=format_lazy(\n \"Guidance notes maintained by an administrator to be communicated to the users who \"\n \"view the {license_app}, primarily intended to provide cautionary and/or policy \"\n \"information.\",\n license_app=_(\"License Library\"),\n ),\n )\n\n special_obligations = models.TextField(\n blank=True,\n help_text=format_lazy(\n \"A concise description, maintained by an administrator, of the obligations \"\n \"(or restrictions) mandated by the license which are not communicated by the \"\n \"standard tag assignments of {license_profile} associated with this License.\",\n license_profile=_(\"License profile\"),\n ),\n )\n\n tags = models.ManyToManyField(\n to=\"license_library.LicenseTag\",\n through=\"LicenseAssignedTag\",\n )\n\n is_component_license = models.BooleanField(\n default=False,\n db_index=True,\n help_text=_(\n \"When set to Yes, indicates that this license is assigned by a \"\n \"component-creator to one or more versions of a component, and is not \"\n \"generally used by other components.\"\n ),\n )\n\n is_exception = models.BooleanField(\n default=False,\n db_index=True,\n help_text=_(\n \"When set to Yes, indicates that this license is actually an \"\n \"exception applied to another license in order to modify \"\n \"specific conditions of that other license.\"\n ),\n )\n\n guidance_url = models.CharField(\n _(\"Guidance URL\"),\n max_length=1024,\n blank=True,\n help_text=_(\n \"A URL to a page that documents your organization's policies and procedures \"\n \"that relate to the obligations and restrictions associated with this \"\n \"license or with similar licenses.\"\n ),\n )\n\n popularity = models.PositiveSmallIntegerField(\n db_index=True,\n default=0,\n help_text=_(\n \"A numeric value assigned to a license and maintained by a DejaCode \"\n \"administrator, that indicates the relative popularity of a license as used by \"\n \"public software projects. The value influences the default license ordering \"\n \"of the User License List, as well as the ordering of the suggested licenses \"\n \"presented as a dropdown list when you enter text in a DejaCode license \"\n \"expression field. Popularity values are originally provided in DejaCode \"\n \"Reference Data, but your administrator has the option to modify them for your \"\n \"dataspace.\"\n ),\n )\n\n language = models.CharField(\n max_length=10,\n choices=license_library_app.languages,\n blank=True,\n help_text=_(\"The language for this license, stored in standard language ID format.\"),\n )\n\n objects = DataspacedManager.from_queryset(LicenseQuerySet)()\n\n class Meta:\n # This is a special case for the unique_together, ie several entries\n # It's important that's the first entry is 'key' in this case as it is\n # used to Match a License inside a dataspace\n unique_together = (\n (\"dataspace\", \"key\"),\n (\"dataspace\", \"name\"),\n (\"dataspace\", \"short_name\"),\n (\"dataspace\", \"uuid\"),\n )\n ordering = [\"-popularity\", \"name\"]\n permissions = (\n (\"change_usage_policy_on_license\", \"Can change the usage_policy of license\"),\n )\n\n def __str__(self):\n return f\"{self.short_name} ({self.key})\"\n\n def clean(self, from_api=False):\n if self.is_active is False and self.spdx_license_key:\n raise ValidationError(\"A deprecated license must not have an SPDX license key.\")\n super().clean(from_api)\n\n def _get_unique_checks(self, exclude=None):\n \"\"\"\n Ensure SPDX license key are unique within a Dataspace.\n This is a soft-constraint, ie not enforced at the database level.\n The check on `spdx_license_key` is not included if the value is blank.\n \"\"\"\n unique_checks, date_checks = super()._get_unique_checks(exclude)\n\n if self.spdx_license_key:\n unique_together = (\"dataspace\", \"spdx_license_key\")\n unique_checks.append((self.__class__, unique_together))\n\n return unique_checks, date_checks\n\n @property\n def urn(self):\n return urn.build(\"license\", key=self.key)\n\n def get_url(self, name, params=None):\n if not params:\n params = [self.dataspace.name, self.key]\n return super().get_url(name, params)\n\n def get_absolute_url(self):\n return self.get_url(\"details\")\n\n @property\n def details_url(self):\n return self.get_absolute_url()\n\n def get_delete_url(self):\n return self.get_url(\"delete\")\n\n def get_download_text_url(self):\n return self.get_url(\"download_text\")\n\n def get_details_url_for_expression(self):\n return self.get_absolute_link(field_name=\"key\", title=self.short_name)\n\n @property\n def permission_protected_fields(self):\n return {\"usage_policy\": \"change_usage_policy_on_license\"}\n\n @property\n def case_insensitive_unique_on(self):\n return [\"name\", \"short_name\", \"key\"]\n\n def where_used(self, user):\n \"\"\"Callable for the reporting system.\"\"\"\n return (\n f\"Product {self.product_set.get_related_secured_queryset(user).count()}\\n\"\n f\"Component {self.component_set.count()}\\n\"\n f\"Subcomponent {self.subcomponent_set.count()}\\n\"\n f\"Package {self.package_set.count()}\\n\"\n f\"ProductComponent {self.productcomponent_set.count()}\\n\"\n f\"ProductPackage {self.productpackage_set.count()}\"\n )\n\n def get_license_tab_displayed_tags(self):\n \"\"\"\n Return a list of the assigned tags for the given License limiting\n the tags where the value is set to True.\n Tags that are not in a LicenseTagGroup are not included.\n\n Use `LicenseAssignedTag.prefetch_for_license_tab()` in prefect_related of the QuerySet.\n \"\"\"\n assigned_tag_qs = self.licenseassignedtag_set.filter(\n license_tag__licensetaggroupassignedtag__isnull=False\n ).order_by(\"license_tag__licensetaggroupassignedtag\")\n\n return [\n (assigned_tag.license_tag.label, assigned_tag.value, assigned_tag.license_tag.text)\n for assigned_tag in assigned_tag_qs\n # equivalent to \"filter(value=True)\" without triggering another Query\n if assigned_tag.value\n ]\n\n def get_tagset(self, include_unknown=False, include_no_group=False):\n \"\"\"\n Return a tagset for the given License.\n A \"tagset\" is a the collection of all the LicenseTags assigned to a\n License grouped by LicenseTagGroup and ordered by the Sequence.\n Groups are ordered by their sequence and tags are also ordered by\n their sequence inside a Group.\n LicenseAssignedTag with \"Unknown\" value can be included using the\n include_unknown parameter.\n Tag not assigned in a LicenseTagGroup can be included using the\n include_no_group parameter, an extra Group \"(No Group)\" will be added.\n \"tagset\" format is:\n OrderedDict(\n [('GroupName', [\n ('TagName', 'AssignedTagValue', 'TagText', Annotations),]\n )]\n )\n \"\"\"\n filters = {\"license\": self}\n if not include_unknown:\n filters[\"value__isnull\"] = False\n\n license_assigned_tags = (\n LicenseAssignedTag.objects.scope(self.dataspace)\n .filter(**filters)\n .select_related(\"license_tag\")\n .prefetch_related(\"licenseannotation_set\")\n )\n\n # Building a dictionary with the assigned tags of the current License\n license_tags_dict = {\n t.license_tag.label: (t.value, t.license_tag.text, t.licenseannotation_set.all())\n for t in license_assigned_tags\n }\n\n # Creating a 'tabset' dictionary ordered by Group and Tag sequence\n ordered_assigned_tags = (\n LicenseTagGroupAssignedTag.objects.scope(self.dataspace)\n .order_by(\"license_tag_group__seq\", \"seq\")\n .select_related(\"license_tag_group\", \"license_tag\")\n )\n\n # Using an OrderedDict to keep the QS ordering as we build the results\n license_tagset = OrderedDict()\n for assigned_tag in ordered_assigned_tags:\n label = assigned_tag.license_tag.label\n if label in license_tags_dict:\n # Using pop() to remove the entry from the dict, so we keep a\n # list of tags that are not assigned into a LicenseTagGroup\n value, text, annotations = license_tags_dict.pop(label)\n group_name = assigned_tag.license_tag_group.name\n license_tagset.setdefault(group_name, []).append([label, value, text, annotations])\n\n # If there is still entries in license_tags_dict, that mean those tags\n # are not assigned into a LicenseTagGroup, we are adding those in the\n # result if the include_no_group is True\n if include_no_group and license_tags_dict:\n leftover_tags = [[label] + list(values) for label, values in license_tags_dict.items()]\n license_tagset.update({\"(No Group)\": leftover_tags})\n\n return license_tagset\n\n def get_tag_labels(self):\n \"\"\"Return the labels of all the tags associated with this license.\"\"\"\n return self.tags.values_list(\"label\", flat=True)\n\n def get_tag_value_from_label(self, label):\n try:\n assigned_tag = LicenseAssignedTag.objects.get(license=self, license_tag__label=label)\n except (ObjectDoesNotExist, MultipleObjectsReturned):\n return \"\" # Empty string rather than Error when no value available\n return str(assigned_tag.value)\n\n def set_assigned_tags_from_license_profile(self):\n \"\"\"Update or create missing LicenseAssignedTag from the license_profile.\"\"\"\n if not self.license_profile:\n return\n\n for profile_assigned_tag in self.license_profile.licenseprofileassignedtag_set.all():\n LicenseAssignedTag.objects.update_or_create(\n license=self,\n license_tag=profile_assigned_tag.license_tag,\n dataspace=self.dataspace,\n defaults={\"value\": profile_assigned_tag.value},\n )\n\n @staticmethod\n def get_extra_relational_fields():\n return [\"annotations\", \"external_references\"]\n\n @property\n def scancode_url(self):\n return SCANCODE_LICENSE_URL.format(self.key)\n\n @property\n def licensedb_url(self):\n return SCANCODE_LICENSEDB_URL.format(self.key)\n\n @property\n def spdx_url(self):\n \"\"\"\n Return a URL to the https://spdx.org/licenses/ list using the short identifier.\n Return None for SPDX license key starting with \"LicenseRef-\" as those are not\n available in the SPDX list.\n \"\"\"\n if self.spdx_license_key and not self.spdx_license_key.startswith(\"LicenseRef-\"):\n return SPDX_LICENSE_URL.format(self.spdx_license_key)\n\n @property\n def spdx_link(self):\n \"\"\"\n Return a link base on the `spdx_url` value.\n Return the `spdx_license_key` when the URL is not available.\n \"\"\"\n spdx_url = self.spdx_url\n if spdx_url:\n return self.get_html_link(self.spdx_url, value=self.spdx_license_key, target=\"_blank\")\n return self.spdx_license_key\n\n @property\n def spdx_id(self):\n \"\"\"\n Return the `spdx_license_key` when available or a crafted LicenseRef using\n the license key.\n \"\"\"\n return self.spdx_license_key or f\"LicenseRef-dejacode-{self.key}\"\n\n def as_spdx(self):\n \"\"\"Return this License as an SPDX ExtractedLicensingInfo entry.\"\"\"\n return spdx.ExtractedLicensingInfo(\n license_id=self.spdx_id,\n extracted_text=self.full_text,\n name=self.name,\n see_alsos=self.get_all_urls(),\n )\n\n def get_all_urls(self):\n \"\"\"Return all URLs set in URL-based fields of this License instance.\"\"\"\n url_fields = [\n \"licensedb_url\",\n \"scancode_url\",\n \"homepage_url\",\n \"osi_url\",\n \"faq_url\",\n \"text_urls\",\n \"other_urls\",\n ]\n\n urls = []\n for url_field in url_fields:\n url_value = getattr(self, url_field)\n if url_value:\n urls.extend([url for url in url_value.split() if url])\n\n return sorted(set(urls))\n\n def has_tag_field_enabled(self, tag_field):\n # Make sure to include the following prefetch on the QuerySet:\n # prefetch_related('licenseassignedtag_set__license_tag')\n for assigned_tag in self.licenseassignedtag_set.all():\n if getattr(assigned_tag.license_tag, tag_field) and assigned_tag.value:\n return True\n return False\n\n @property\n def attribution_required(self):\n return self.has_tag_field_enabled(\"attribution_required\")\n\n @property\n def redistribution_required(self):\n return self.has_tag_field_enabled(\"redistribution_required\")\n\n @property\n def change_tracking_required(self):\n return self.has_tag_field_enabled(\"change_tracking_required\")\n\n @property\n def language_code(self):\n return self.language" }, { "identifier": "CodebaseResource", "path": "product_portfolio/models.py", "snippet": "class CodebaseResource(\n HistoryFieldsMixin,\n DataspacedModel,\n):\n product = models.ForeignKey(\n to=\"product_portfolio.Product\",\n on_delete=models.CASCADE,\n related_name=\"%(class)ss\",\n # Bypass the validation in ForeignKey.validate()\n # Required since we do not have control over the QuerySet in that method.\n parent_link=True,\n )\n\n path = models.CharField(\n max_length=2000,\n help_text=_(\n \"The full path value of a codebase resource (file or directory) in either the \"\n \"development or deployment codebase of a product.\"\n ),\n )\n\n is_deployment_path = models.BooleanField(\n default=False,\n help_text=_(\n \"When set to Yes, indicates that this codebase resource identifies a path in the \"\n \"Deployment codebase. When set to No (the default value), indicates that this \"\n \"codebase resource identifies a path in the Development codebase.\"\n ),\n )\n\n product_component = models.ForeignKey(\n to=\"product_portfolio.ProductComponent\",\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name=\"%(class)ss\",\n )\n\n product_package = models.ForeignKey(\n to=\"product_portfolio.ProductPackage\",\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name=\"%(class)ss\",\n )\n\n additional_details = models.JSONField(\n blank=True,\n default=dict,\n help_text=_(\n \"An optional JSON-formatted field to identify additional codebase resource attributes \"\n \"such as name, type, sha1, size, etc.\"\n ),\n )\n\n admin_notes = models.TextField(\n blank=True,\n help_text=_(\n \"Comments about the product codebase resource, provided by administrators, \"\n \"intended for viewing and maintenance by administrators only.\",\n ),\n )\n\n deployed_to = models.ManyToManyField(\n to=\"product_portfolio.CodebaseResource\",\n through=\"product_portfolio.CodebaseResourceUsage\",\n )\n\n objects = DataspacedManager.from_queryset(CodebaseResourceQuerySet)()\n\n class Meta:\n verbose_name = _(\"codebase resource\")\n unique_together = (\n (\"product\", \"path\"),\n (\"dataspace\", \"uuid\"),\n )\n ordering = (\"product\", \"path\")\n\n def __str__(self):\n return self.path\n\n def clean(self, from_api=False):\n if self.product_component_id and self.product_component.product_id != self.product_id:\n raise ValidationError(f\"{self.product_component} is not available on {self.product}.\")\n\n if self.product_package_id and self.product_package.product_id != self.product_id:\n raise ValidationError(f\"{self.product_package} is not available on {self.product}.\")\n\n super().clean(from_api)\n\n @property\n def deployed_from_paths(self):\n return [resource.deployed_from.path for resource in self.related_deployed_from.all()]\n\n @property\n def deployed_to_paths(self):\n return [resource.deployed_to.path for resource in self.related_deployed_to.all()]" }, { "identifier": "Product", "path": "product_portfolio/models.py", "snippet": "class Product(BaseProductMixin, FieldChangesMixin, KeywordsMixin, DataspacedModel):\n is_active = models.BooleanField(\n verbose_name=_(\"active\"),\n default=True,\n db_index=True,\n help_text=_(\n \"When set to Yes, this field indicates that a product definition is currently \"\n \"in use (active). When set to No, this field indicates that a product is deprecated \"\n \"(inactive), is no longer used, and the product will not appear in the user views. \"\n \"Note that this indicator applies only to a specific product version.\"\n ),\n )\n\n configuration_status = models.ForeignKey(\n to=\"product_portfolio.ProductStatus\",\n on_delete=models.PROTECT,\n null=True,\n blank=True,\n help_text=CONFIGURATION_STATUS_HELP,\n )\n\n contact = models.CharField(\n max_length=255,\n blank=True,\n help_text=_(\n \"Identifies the person in your organization responsible for the development and \"\n \"release of the Product.\"\n ),\n )\n\n licenses = models.ManyToManyField(\n to=\"license_library.License\",\n through=\"ProductAssignedLicense\",\n )\n\n components = models.ManyToManyField(\n to=\"component_catalog.Component\",\n through=\"ProductComponent\",\n )\n\n packages = models.ManyToManyField(\n to=\"component_catalog.Package\",\n through=\"ProductPackage\",\n )\n\n objects = ProductSecuredManager()\n\n # WARNING: Bypass the security system implemented in ProductSecuredManager.\n # This is to be used only in a few cases where the User scoping is not appropriated.\n # For example: `self.dataspace.product_set(manager='unsecured_objects').count()`\n unsecured_objects = DataspacedManager()\n\n class Meta(BaseProductMixin.Meta):\n permissions = ((\"view_product\", \"Can view product\"),)\n # Defaults to ('add', 'change', 'delete', 'view')\n # Removed 'view' to avoid conflict with pre-django 2.1 custom `view_product`.\n default_permissions = (\"add\", \"change\", \"delete\")\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n\n if self.has_changed(\"configuration_status_id\"):\n self.actions_on_status_change()\n\n def get_attribution_url(self):\n return self.get_url(\"attribution\")\n\n def get_scan_all_packages_url(self):\n return self.get_url(\"scan_all_packages\")\n\n def get_import_from_scan_url(self):\n return self.get_url(\"import_from_scan\")\n\n def get_add_customcomponent_ajax_url(self):\n return self.get_url(\"add_customcomponent_ajax\")\n\n def get_manage_components_url(self):\n return self.get_url(\"manage_components\")\n\n def get_manage_packages_url(self):\n return self.get_url(\"manage_packages\")\n\n def get_license_summary_url(self):\n return self.get_url(\"license_summary\")\n\n def get_check_package_version_url(self):\n return self.get_url(\"check_package_version\")\n\n def get_import_manifest_url(self):\n return self.get_url(\"import_manifest\")\n\n def get_pull_project_data_url(self):\n return self.get_url(\"pull_project_data\")\n\n def can_be_changed_by(self, user):\n perms = guardian.shortcuts.get_perms(user, self)\n has_change_permission_on_product = \"change_product\" in perms\n\n return all(\n [\n user.has_perm(\"product_portfolio.change_product\"),\n has_change_permission_on_product,\n ]\n )\n\n def actions_on_status_change(self):\n \"\"\"Call post `save()`, when this instance `configuration_status` changes.\"\"\"\n if not self.configuration_status:\n return\n\n request_template = self.configuration_status.request_to_generate\n if request_template and self.last_modified_by:\n request_template.create_request(\n title=f\"Review Product {self} in {self.configuration_status} status\",\n requester=self.last_modified_by,\n object_id=self.id,\n )\n\n @cached_property\n def all_packages(self):\n return Package.objects.filter(\n models.Q(id__in=self.packages.all()) | models.Q(component__in=self.components.all())\n ).distinct()\n\n def get_merged_descendant_ids(self):\n \"\"\"\n Return a list of Component ids collected on the Product descendants:\n including ProductComponent and Subcomponent.\n \"\"\"\n productcomponents = self.productcomponents.catalogs()\n ids = []\n for pc in productcomponents:\n ids.append(pc.component.id)\n ids.extend(pc.component.get_descendant_ids())\n return list(set(ids))\n\n @staticmethod\n def get_feature_values(queryset):\n return (\n queryset.exclude(feature=\"\")\n .order_by(\"feature\")\n .distinct()\n .values_list(\"feature\", flat=True)\n )\n\n def get_feature_datalist(self):\n unique_features = set(self.get_feature_values(self.productcomponents))\n unique_features.update(self.get_feature_values(self.productpackages))\n options = format_html_join(\n \"\", \"<option>{}</option>\", ((feature,) for feature in sorted(unique_features))\n )\n return format_html('<datalist id=\"feature_datalist\">{}</datalist>', options)\n\n @property\n def css_icon(self):\n return \"fa-briefcase\"\n\n def get_spdx_packages(self):\n return list(self.productcomponents.catalogs()) + list(self.productpackages.all())\n\n def get_cyclonedx_components(self):\n return list(self.productcomponents.catalogs()) + list(self.productpackages.all())\n\n def assign_objects(self, related_objects, user):\n \"\"\"\n Assign provided `related_objects` to this `Product`.\n Supported object models are `Component` and `Package`.\n Return the both counts for created and unchanged objects.\n \"\"\"\n created_count = 0\n unchanged_count = 0\n\n relationship_models = {\n \"component\": ProductComponent,\n \"package\": ProductPackage,\n }\n\n for obj in related_objects:\n relation_model_name = obj._meta.model_name # 'component' or 'package'\n relation_model_class = relationship_models.get(relation_model_name)\n if not relation_model_class:\n continue\n\n filters = {\n \"product\": self,\n relation_model_name: obj,\n \"dataspace\": obj.dataspace,\n \"defaults\": {\n \"license_expression\": obj.license_expression,\n \"created_by\": user,\n \"last_modified_by\": user,\n },\n }\n relation_obj, created = relation_model_class.objects.get_or_create(**filters)\n if created:\n History.log_addition(user, relation_obj)\n History.log_change(user, self, f'Added {relation_model_name} \"{obj}\"')\n created_count += 1\n else:\n unchanged_count += 1\n\n if created_count:\n self.last_modified_by = user\n self.save()\n\n return created_count, unchanged_count\n\n def scan_all_packages_task(self, user):\n \"\"\"\n Submit a Scan request to ScanCode.io for each package assigned to this Product.\n Only packages with a proper download URL are sent.\n \"\"\"\n package_urls = [\n package.download_url\n for package in self.all_packages\n if package.download_url.startswith((\"http\", \"https\"))\n ]\n\n tasks.scancodeio_submit_scan.delay(\n uris=package_urls,\n user_uuid=user.uuid,\n dataspace_uuid=user.dataspace.uuid,\n )" }, { "identifier": "ProductComponent", "path": "product_portfolio/models.py", "snippet": "class ProductComponent(ProductRelationshipMixin):\n component = models.ForeignKey(\n to=\"component_catalog.Component\",\n null=True,\n blank=True,\n on_delete=models.PROTECT,\n related_name=\"productcomponents\",\n )\n\n # This should be on the ProductRelationshipMixin but for some reason\n # it makes test_productcomponent_import_license_expression fail\n # This license_expression is never generated but always stored.\n license_expression = models.CharField(\n _(\"License expression\"),\n max_length=1024,\n blank=True,\n db_index=True,\n help_text=_(\n \"On a product component relationship (which defines a component as used in your \"\n \"product), a license expression is limited by the license(s) assigned to the original \"\n \"component, and expresses the license(s) that apply to the context of that component \"\n \"as it is used by your product. More than one applicable license can be expressed as \"\n '\"license-key-a AND license-key-b\". A choice of licenses can be expressed as '\n '\"license-key-a OR license-key-b\", and you can indicate the primary license by '\n \"placing it first, on the left-hand side of the OR relationship. You can also assert \"\n \"your license choice for the component as used in your product by editing the license \"\n \"expression to remove any license keys that do not apply. The relationship words \"\n \"(OR, AND) can be combined as needed, and the use of parentheses can be applied to \"\n 'clarify the meaning; for example \"((license-key-a AND license-key-b) OR '\n '(license-key-c))\". An exception to a license can be expressed as \"license-key WITH '\n 'license-exception-key\".'\n ),\n )\n\n licenses = models.ManyToManyField(\n to=\"license_library.License\",\n through=\"ProductComponentAssignedLicense\",\n )\n\n # The following optional fields are are placeholders until the ProductComponent is validated.\n # When validated, the values of the Component FK are used instead.\n\n name = models.CharField(\n blank=True,\n max_length=70,\n validators=[validate_url_segment],\n help_text=_(\n \"A descriptive name for the Component (Component name). If you identified a \"\n 'DejaCode Component in the \"component\" field, this is not necessary; '\n \"otherwise, you should provide the name used by the authors of the \"\n \"component.\"\n ),\n )\n\n version = models.CharField(\n blank=True,\n max_length=50,\n validators=[validate_version],\n help_text=_(\n \"The version of the Component (Component version). If you identified a \"\n 'DejaCode Component in the \"component\" field, this is not necessary; '\n \"otherwise, you should provide the version used by the authors of the \"\n \"Component.\"\n ),\n )\n\n owner = models.CharField(\n blank=True,\n max_length=70,\n help_text=_(\n \"The creator, author, or source name of the Component. If you identified a \"\n 'DejaCode component in the \"component\" field, this is not necessary; '\n \"otherwise, you should provide the name of the owner as provided by that \"\n \"owner in the Component documentation.\"\n ),\n )\n\n copyright = models.TextField(\n blank=True,\n help_text=_(\n \"The copyright statement for this Component. If you identified a DejaCode \"\n 'Component in the \"component\" field, this is not necessary.'\n ),\n )\n\n homepage_url = models.URLField(\n _(\"Homepage URL\"),\n blank=True,\n max_length=1024,\n help_text=_(\n \"URL to the source of the Component Package. If you identified a DejaCode \"\n 'Component in the \"component\" field, this is not necessary.'\n ),\n )\n\n download_url = models.CharField(\n _(\"Download URL\"),\n blank=True,\n max_length=1024,\n validators=[generic_uri_validator],\n help_text=_(\n \"URL to the source of the Component Package. Once validated this should point to a \"\n 'Package. If you identified a DejaCode Component in the \"component\" field, and if it '\n \"already has a Package defined with the download_url, then this is not necessary.\"\n ),\n )\n\n primary_language = models.CharField(\n db_index=True,\n max_length=50,\n blank=True,\n help_text=_(\"The primary programming language associated with the component.\"),\n )\n\n objects = DataspacedManager.from_queryset(ProductComponentQuerySet)()\n\n class Meta:\n verbose_name = _(\"product component relationship\")\n unique_together = ((\"product\", \"component\"), (\"dataspace\", \"uuid\"))\n ordering = [\"product\", \"component\"]\n permissions = (\n (\n \"change_review_status_on_productcomponent\",\n \"Can change the review_status of product component relationship\",\n ),\n )\n\n def __str__(self):\n if self.component:\n return str(self.component)\n if self.name or self.version:\n return f\"{self.name} {self.version}\"\n return \"(Component data missing)\" # a value is required for the changelist link\n\n @property\n def permission_protected_fields(self):\n return {\"review_status\": \"change_review_status_on_productcomponent\"}\n\n @property\n def is_custom_component(self):\n return not self.component_id\n\n @property\n def has_custom_values(self):\n custom_fields = [\n \"name\",\n \"value\",\n \"owner\",\n \"copyright\",\n \"homepage_url\",\n \"download_url\",\n \"primary_language\",\n ]\n return any(getattr(self, field, None) for field in custom_fields)" }, { "identifier": "ProductPackage", "path": "product_portfolio/models.py", "snippet": "class ProductPackage(ProductRelationshipMixin):\n package = models.ForeignKey(\n to=\"component_catalog.Package\",\n on_delete=models.PROTECT,\n related_name=\"productpackages\",\n )\n\n # This should be on the ComponentRelationshipMixin but for some reason\n # it makes test_productcomponent_import_license_expression fail\n # This license_expression is never generated but always stored.\n license_expression = models.CharField(\n _(\"License expression\"),\n max_length=1024,\n blank=True,\n db_index=True,\n help_text=_(\n \"On a product package relationship (which defines a package as used in your product), \"\n \"a license expression is limited by the license(s) assigned to the original package \"\n \"(unless none were assigned), and expresses the license(s) that apply to the context \"\n \"of that package as it is used by your product. More than one applicable license can \"\n 'be expressed as \"license-key-a AND license-key-b\". A choice of licenses can be '\n 'expressed as \"license-key-a OR license-key-b\", and you can indicate the primary '\n \"license by placing it first, on the left-hand side of the OR relationship. You can \"\n \"also assert your license choice for the package as used in your product by editing \"\n \"the license expression to remove any license keys that do not apply. The \"\n \"relationship words (OR, AND) can be combined as needed, and the use of parentheses \"\n 'can be applied to clarify the meaning; for example \"((license-key-a AND '\n 'license-key-b) OR (license-key-c))\". An exception to a license can be expressed '\n 'as \"license-key WITH license-exception-key\".'\n ),\n )\n\n licenses = models.ManyToManyField(\n to=\"license_library.License\",\n through=\"ProductPackageAssignedLicense\",\n )\n\n objects = DataspacedManager.from_queryset(ProductSecuredQuerySet)()\n\n class Meta:\n verbose_name = _(\"product package relationship\")\n unique_together = ((\"product\", \"package\"), (\"dataspace\", \"uuid\"))\n ordering = [\"product\", \"package\"]\n permissions = (\n (\n \"change_review_status_on_productpackage\",\n \"Can change the review_status of product package relationship\",\n ),\n )\n\n def __str__(self):\n return str(self.package)\n\n @property\n def permission_protected_fields(self):\n return {\"review_status\": \"change_review_status_on_productpackage\"}" }, { "identifier": "ProductStatus", "path": "product_portfolio/models.py", "snippet": "class ProductStatus(BaseStatusMixin, DataspacedModel):\n request_to_generate = models.ForeignKey(\n to=\"workflow.RequestTemplate\",\n on_delete=models.PROTECT,\n null=True,\n blank=True,\n limit_choices_to={\n \"content_type__app_label\": \"product_portfolio\",\n \"content_type__model\": \"product\",\n \"include_applies_to\": True,\n },\n help_text=_(\n \"Identify the product-based Request Template to use for generating \"\n \"a Request when a Product is set to this status. \"\n \"Note that this Template should not include any custom required \"\n \"fields, since DejaCode will be creating the Request automatically.\"\n ),\n )\n\n class Meta(BaseStatusMixin.Meta):\n verbose_name_plural = _(\"product status\")" } ]
from django import forms from django.contrib import admin from django.utils.translation import gettext_lazy as _ from component_catalog.models import ComponentKeyword from component_catalog.programming_languages import PROGRAMMING_LANGUAGES from dje.filters import BooleanChoiceFilter from dje.filters import DataspacedFilterSet from dje.filters import DefaultOrderingFilter from dje.filters import HasRelationFilter from dje.filters import MatchOrderedSearchFilter from dje.filters import SearchFilter from dje.widgets import BootstrapSelectMultipleWidget from dje.widgets import DropDownWidget from license_library.models import License from product_portfolio.models import CodebaseResource from product_portfolio.models import Product from product_portfolio.models import ProductComponent from product_portfolio.models import ProductPackage from product_portfolio.models import ProductStatus import django_filters
14,960
# # Copyright (c) nexB Inc. and others. All rights reserved. # DejaCode is a trademark of nexB Inc. # SPDX-License-Identifier: AGPL-3.0-only # See https://github.com/nexB/dejacode for support or download. # See https://aboutcode.org for more information about AboutCode FOSS projects. # class ProductFilterSet(DataspacedFilterSet): q = MatchOrderedSearchFilter( label=_("Search"), match_order_fields=[ "name", "components__name", "packages__filename", ], search_fields=[ "name", "version", "components__name", "packages__filename", ], distinct=True, widget=forms.widgets.HiddenInput, ) sort = DefaultOrderingFilter( label=_("Sort"), fields=[ "name", "version", "license_expression", "primary_language", "owner", "configuration_status", ], field_labels={ "primary_language": "Language", "configuration_status": "Configuration Status", }, empty_label="Default", ) configuration_status = django_filters.ModelMultipleChoiceFilter( label=_("Configuration status"), field_name="configuration_status__label", to_field_name="label", queryset=ProductStatus.objects.all(), widget=BootstrapSelectMultipleWidget( search=False, search_placeholder="Search configuration status", ), ) primary_language = django_filters.MultipleChoiceFilter( label=_("Language"), choices=[(language, language) for language in PROGRAMMING_LANGUAGES], widget=BootstrapSelectMultipleWidget( search_placeholder="Search languages", ), ) licenses = django_filters.ModelMultipleChoiceFilter( label=_("License"), field_name="licenses__key", to_field_name="key", queryset=License.objects.all(), widget=BootstrapSelectMultipleWidget( search_placeholder="Search licenses", ), ) keywords = django_filters.ModelMultipleChoiceFilter( label=_("Keyword"), to_field_name="label", lookup_expr="contains", queryset=ComponentKeyword.objects.all().only("label", "dataspace"), widget=BootstrapSelectMultipleWidget( search_placeholder="Search keywords", ), ) class Meta: model = Product fields = [ "q", "licenses", "primary_language", "configuration_status", "keywords", ] class BaseProductRelationFilterSet(DataspacedFilterSet): is_deployed = BooleanChoiceFilter( empty_label="All (Inventory)", choices=( ("yes", _("Yes (BOM)")), ("no", _("No (Internal Use Only)")), ),
# # Copyright (c) nexB Inc. and others. All rights reserved. # DejaCode is a trademark of nexB Inc. # SPDX-License-Identifier: AGPL-3.0-only # See https://github.com/nexB/dejacode for support or download. # See https://aboutcode.org for more information about AboutCode FOSS projects. # class ProductFilterSet(DataspacedFilterSet): q = MatchOrderedSearchFilter( label=_("Search"), match_order_fields=[ "name", "components__name", "packages__filename", ], search_fields=[ "name", "version", "components__name", "packages__filename", ], distinct=True, widget=forms.widgets.HiddenInput, ) sort = DefaultOrderingFilter( label=_("Sort"), fields=[ "name", "version", "license_expression", "primary_language", "owner", "configuration_status", ], field_labels={ "primary_language": "Language", "configuration_status": "Configuration Status", }, empty_label="Default", ) configuration_status = django_filters.ModelMultipleChoiceFilter( label=_("Configuration status"), field_name="configuration_status__label", to_field_name="label", queryset=ProductStatus.objects.all(), widget=BootstrapSelectMultipleWidget( search=False, search_placeholder="Search configuration status", ), ) primary_language = django_filters.MultipleChoiceFilter( label=_("Language"), choices=[(language, language) for language in PROGRAMMING_LANGUAGES], widget=BootstrapSelectMultipleWidget( search_placeholder="Search languages", ), ) licenses = django_filters.ModelMultipleChoiceFilter( label=_("License"), field_name="licenses__key", to_field_name="key", queryset=License.objects.all(), widget=BootstrapSelectMultipleWidget( search_placeholder="Search licenses", ), ) keywords = django_filters.ModelMultipleChoiceFilter( label=_("Keyword"), to_field_name="label", lookup_expr="contains", queryset=ComponentKeyword.objects.all().only("label", "dataspace"), widget=BootstrapSelectMultipleWidget( search_placeholder="Search keywords", ), ) class Meta: model = Product fields = [ "q", "licenses", "primary_language", "configuration_status", "keywords", ] class BaseProductRelationFilterSet(DataspacedFilterSet): is_deployed = BooleanChoiceFilter( empty_label="All (Inventory)", choices=( ("yes", _("Yes (BOM)")), ("no", _("No (Internal Use Only)")), ),
widget=DropDownWidget(
9
2023-12-07 16:57:42+00:00
24k
wusize/CLIM
src/open_clip/model.py
[ { "identifier": "HFTextEncoder", "path": "src/open_clip/hf_model.py", "snippet": "class HFTextEncoder(nn.Module):\n \"\"\"HuggingFace model adapter\"\"\"\n output_tokens: torch.jit.Final[bool]\n\n def __init__(\n self,\n model_name_or_path: str,\n output_dim: int,\n config: PretrainedConfig = None,\n pooler_type: str = None,\n proj: str = None,\n pretrained: bool = True,\n output_tokens: bool = False,\n ):\n super().__init__()\n self.output_tokens = output_tokens\n self.output_dim = output_dim\n\n # TODO: find better way to get this information\n uses_transformer_pooler = (pooler_type == \"cls_pooler\")\n\n if transformers is None:\n raise RuntimeError(\"Please `pip install transformers` to use pre-trained HuggingFace models\")\n if config is None:\n self.config = AutoConfig.from_pretrained(model_name_or_path)\n create_func, model_args = (AutoModel.from_pretrained, model_name_or_path) if pretrained else (\n AutoModel.from_config, self.config)\n # TODO: do all model configs have this attribute? PretrainedConfig does so yes??\n if hasattr(self.config, \"is_encoder_decoder\") and self.config.is_encoder_decoder:\n self.transformer = create_func(model_args)\n self.transformer = self.transformer.encoder\n else:\n self.transformer = create_func(model_args, add_pooling_layer=uses_transformer_pooler)\n else:\n self.config = config\n self.transformer = AutoModel.from_config(config)\n if pooler_type is None: # get default arch pooler\n pooler_type = (arch_dict[self.config.model_type][\"pooler\"])\n \n self.pooler = _POOLERS[pooler_type]()\n\n d_model = getattr(self.config, arch_dict[self.config.model_type][\"config_names\"][\"width\"])\n if (d_model == output_dim) and (proj is None): # do we always need a proj?\n self.proj = nn.Identity()\n elif proj == 'linear':\n self.proj = nn.Linear(d_model, output_dim, bias=False)\n elif proj == 'mlp':\n hidden_size = (d_model + output_dim) // 2\n self.proj = nn.Sequential(\n nn.Linear(d_model, hidden_size, bias=False),\n nn.GELU(),\n nn.Linear(hidden_size, output_dim, bias=False),\n )\n\n def forward(self, x: TensorType):\n attn_mask = (x != self.config.pad_token_id).long()\n out = self.transformer(input_ids=x, attention_mask=attn_mask)\n pooled_out = self.pooler(out, attn_mask)\n projected = self.proj(pooled_out)\n\n seq_len = out.last_hidden_state.shape[1]\n tokens = (\n out.last_hidden_state[:, torch.arange(seq_len) != self.pooler.cls_token_position, :] \n if type(self.pooler) == ClsPooler \n else out.last_hidden_state\n )\n \n if self.output_tokens:\n return projected, tokens\n return projected\n\n def lock(self, unlocked_layers: int = 0, freeze_layer_norm: bool = True):\n if not unlocked_layers: # full freezing\n for n, p in self.transformer.named_parameters():\n p.requires_grad = (not freeze_layer_norm) if \"LayerNorm\" in n.split(\".\") else False\n return\n\n encoder = self.transformer.encoder if hasattr(self.transformer, 'encoder') else self.transformer\n layer_list = getattr(encoder, arch_dict[self.config.model_type][\"config_names\"][\"layer_attr\"])\n print(f\"Unlocking {unlocked_layers}/{len(layer_list) + 1} layers of hf model\")\n embeddings = getattr(\n self.transformer, arch_dict[self.config.model_type][\"config_names\"][\"token_embeddings_attr\"])\n modules = [embeddings, *layer_list][:-unlocked_layers]\n # freeze layers\n for module in modules:\n for n, p in module.named_parameters():\n p.requires_grad = (not freeze_layer_norm) if \"LayerNorm\" in n.split(\".\") else False\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.transformer.gradient_checkpointing_enable()\n\n def init_parameters(self):\n pass" }, { "identifier": "ModifiedResNet", "path": "src/open_clip/modified_resnet.py", "snippet": "class ModifiedResNet(nn.Module):\n \"\"\"\n A ResNet class that is similar to torchvision's but contains the following changes:\n - There are now 3 \"stem\" convolutions as opposed to 1, with an average pool instead of a max pool.\n - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1\n - The final pooling layer is a QKV attention instead of an average pool\n \"\"\"\n\n def __init__(self, layers, output_dim, heads, image_size=224, width=64,\n freeze_output=True,\n freeze_all_bns=True):\n super().__init__()\n self.output_dim = output_dim\n self.image_size = image_size\n self.freeze_output = freeze_output\n self.freeze_all_bns = freeze_all_bns\n # the 3-layer stem\n self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(width // 2)\n self.act1 = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(width // 2)\n self.act2 = nn.ReLU(inplace=True)\n self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)\n self.bn3 = nn.BatchNorm2d(width)\n self.act3 = nn.ReLU(inplace=True)\n self.avgpool = nn.AvgPool2d(2)\n\n # residual layers\n self._inplanes = width # this is a *mutable* variable used during construction\n self.layer1 = self._make_layer(width, layers[0])\n self.layer2 = self._make_layer(width * 2, layers[1], stride=2)\n self.layer3 = self._make_layer(width * 4, layers[2], stride=2)\n self.layer4 = self._make_layer(width * 8, layers[3], stride=2)\n\n embed_dim = width * 32 # the ResNet feature dimension\n self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim, freeze_output)\n self.attnpool_input_size = image_size // 32\n\n def _make_layer(self, planes, blocks, stride=1):\n layers = [Bottleneck(self._inplanes, planes, stride)]\n\n self._inplanes = planes * Bottleneck.expansion\n for _ in range(1, blocks):\n layers.append(Bottleneck(self._inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def lock(self, unlocked_groups=0, freeze_bn_stats=True):\n assert freeze_bn_stats\n def _lock(module):\n for param in module.parameters():\n param.requires_grad = False\n if freeze_bn_stats:\n freeze_batch_norm_2d(module)\n module.eval()\n\n freeze_at = 5 - unlocked_groups\n print(f'Freeze the resnet at {freeze_at}', flush=True)\n\n if freeze_at >= 1: # stem\n _lock(self.conv1)\n _lock(self.bn1)\n _lock(self.conv2)\n _lock(self.bn2)\n _lock(self.conv3)\n _lock(self.bn3)\n # each stage is a torch.nn.modules.container.Sequential\n for idx, stage in enumerate([self.layer1, self.layer2, self.layer3, self.layer4], start=2):\n if freeze_at >= idx:\n for block in stage.children(): # each block is a Bottleneck\n _lock(block)\n if self.freeze_all_bns:\n print(f'Freeze all bn layers', flush=True) # TODO: study if this is necessary\n freeze_batch_norm_2d(self)\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n # FIXME support for non-transformer\n pass\n\n def stem(self, x):\n x = self.act1(self.bn1(self.conv1(x)))\n x = self.act2(self.bn2(self.conv2(x)))\n x = self.act3(self.bn3(self.conv3(x)))\n x = self.avgpool(x)\n return x\n\n def forward(self, x):\n with torch.no_grad():\n x = self.stem(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.attnpool(x)\n\n return x\n\n @staticmethod\n def _denormalize_boxes(normed_boxes, x):\n h, w = x.shape[-2:]\n denormed_boxes = []\n for boxes in normed_boxes:\n new_boxes = boxes.clone() # FIXME: do not change the value in normed_boxes!\n new_boxes[:, [0, 2]] *= w\n new_boxes[:, [1, 3]] *= h\n denormed_boxes.append(new_boxes)\n return denormed_boxes\n\n def extract_roi_features(self, x, normed_boxes, extract_type='v2'):\n if extract_type == 'v1':\n return self._extract_roi_features_v1(x, normed_boxes)\n else:\n assert extract_type == 'v2'\n return self._extract_roi_features_v2(x, normed_boxes)\n\n def mask_attn_pool(self, image, masks):\n return self.mask_pool(image, masks)\n\n def mask_pool(self, image, masks):\n x = self.stem(image)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n feature_map = self.attnpool.forward_dense(x)\n feature_map = F.normalize(feature_map, dim=1) # remember to normalize!\n\n feature_map = feature_map.flatten(-2, -1) # bs, c, h*w\n num_masks_per_image = [len(masks_per_image) for masks_per_image in masks]\n masks = torch.cat(masks).float().flatten(-2, -1) # bs, h*w\n feature_map = torch.repeat_interleave(\n feature_map, torch.tensor(num_masks_per_image, device=feature_map.device), dim=0)\n features = (feature_map * masks[:, None]).sum(-1) / (masks.sum(1, keepdim=True) + 1e-12)\n\n return features\n\n def _extract_roi_features_v1(self, x, normed_boxes, **kwargs):\n with torch.no_grad():\n x = self.stem(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.attnpool.forward_dense(x)\n x = F.normalize(x, dim=1) # remember to normalize!\n # TODO: debug\n roi_feats = roi_align(x, self._denormalize_boxes(normed_boxes, x),\n (1, 1), 1.0, -1, True)[:, :, 0, 0]\n return roi_feats\n\n def _extract_roi_features_v2(self, x, normed_boxes, **kwargs):\n with torch.no_grad():\n x = self.stem(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x) # only the last layer is finetuned in our implementation\n\n tar_size = self.attnpool_input_size\n # TODO: debug\n roi_feats = roi_align(x, self._denormalize_boxes(normed_boxes, x),\n (tar_size, tar_size), 1.0, -1, True)\n\n roi_feats = self.attnpool(roi_feats)\n\n return roi_feats\n\n def encode_dense(self, x, keep_shape=True):\n x = self.stem(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n feature_map = self.attnpool.forward_dense(x)\n feature_map = F.normalize(feature_map, dim=1) # remember to normalize!\n\n return feature_map" }, { "identifier": "TimmModel", "path": "src/open_clip/timm_model.py", "snippet": "class TimmModel(nn.Module):\n \"\"\" timm model adapter\n \"\"\"\n\n def __init__(\n self,\n model_name,\n embed_dim,\n image_size=224,\n pool='avg',\n proj='linear',\n proj_bias=False,\n drop=0.,\n drop_path=None,\n patch_drop=None,\n pretrained=False,\n ):\n super().__init__()\n if timm is None:\n raise RuntimeError(\"Please `pip install timm` to use timm models.\")\n self.image_size = to_2tuple(image_size)\n\n # setup kwargs that may not be common across all models\n timm_kwargs = {}\n if drop_path is not None:\n timm_kwargs['drop_path_rate'] = drop_path\n if patch_drop is not None:\n timm_kwargs['patch_drop_rate'] = patch_drop\n\n custom_pool = pool in ('abs_attn', 'rot_attn')\n if not proj and not custom_pool:\n # use network classifier head as projection if no proj specified and no custom pooling used\n self.trunk = timm.create_model(\n model_name,\n num_classes=embed_dim,\n global_pool=pool,\n pretrained=pretrained,\n **timm_kwargs,\n )\n prev_chs = embed_dim\n else:\n self.trunk = timm.create_model(\n model_name,\n pretrained=pretrained,\n **timm_kwargs,\n )\n feat_size = self.trunk.default_cfg.get('pool_size', None)\n feature_ndim = 1 if not feat_size else 2\n if custom_pool:\n assert feature_ndim == 2\n # if attn pooling used, remove both classifier and default pool\n self.trunk.reset_classifier(0, global_pool='')\n else:\n # reset global pool if pool config set, otherwise leave as network default\n reset_kwargs = dict(global_pool=pool) if pool else {}\n self.trunk.reset_classifier(0, **reset_kwargs)\n prev_chs = self.trunk.num_features\n\n head_layers = OrderedDict()\n\n # Add custom pooling to head\n if pool == 'abs_attn':\n head_layers['pool'] = AbsAttentionPool2d(prev_chs, feat_size=feat_size, out_features=embed_dim)\n prev_chs = embed_dim\n elif pool == 'rot_attn':\n head_layers['pool'] = RotAttentionPool2d(prev_chs, out_features=embed_dim)\n prev_chs = embed_dim\n\n # NOTE attention pool ends with a projection layer, so proj should usually be set to '' if such pooling is used\n if proj == 'linear':\n head_layers['drop'] = nn.Dropout(drop)\n head_layers['proj'] = nn.Linear(prev_chs, embed_dim, bias=proj_bias)\n elif proj == 'mlp':\n head_layers['mlp'] = Mlp(prev_chs, 2 * embed_dim, embed_dim, drop=(drop, 0), bias=(True, proj_bias))\n else:\n assert not proj, f'Unknown projection type {proj}.'\n\n self.head = nn.Sequential(head_layers)\n\n def lock(self, unlocked_groups=0, freeze_bn_stats=False):\n \"\"\" lock modules\n Args:\n unlocked_groups (int): leave last n layer groups unlocked (default: 0)\n \"\"\"\n if not unlocked_groups:\n # lock full model\n for param in self.trunk.parameters():\n param.requires_grad = False\n if freeze_bn_stats:\n freeze_batch_norm_2d(self.trunk)\n else:\n # NOTE: partial freeze requires latest timm (master) branch and is subject to change\n try:\n # FIXME import here until API stable and in an official release\n from timm.models.helpers import group_parameters, group_modules\n except ImportError:\n raise RuntimeError(\n 'Please install latest timm `pip install git+https://github.com/rwightman/pytorch-image-models`')\n matcher = self.trunk.group_matcher()\n gparams = group_parameters(self.trunk, matcher)\n max_layer_id = max(gparams.keys())\n max_layer_id = max_layer_id - unlocked_groups\n for group_idx in range(max_layer_id + 1):\n group = gparams[group_idx]\n for param in group:\n self.trunk.get_parameter(param).requires_grad = False\n if freeze_bn_stats:\n gmodules = group_modules(self.trunk, matcher, reverse=True)\n gmodules = {k for k, v in gmodules.items() if v <= max_layer_id}\n freeze_batch_norm_2d(self.trunk, gmodules)\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n try:\n self.trunk.set_grad_checkpointing(enable)\n except Exception as e:\n logging.warning('grad checkpointing not supported for this timm image tower, continuing without...')\n\n def forward(self, x):\n x = self.trunk(x)\n x = self.head(x)\n return x\n\n @staticmethod\n def _denormalize_boxes(normed_boxes, x):\n h, w = x.shape[-2:]\n denormed_boxes = []\n for boxes in normed_boxes:\n new_boxes = boxes.clone() # FIXME: do not change the value in normed_boxes!\n new_boxes[:, [0, 2]] *= w\n new_boxes[:, [1, 3]] *= h\n denormed_boxes.append(new_boxes)\n return denormed_boxes\n\n def _extract_roi_features_v1(self, x, normed_boxes, **kwargs):\n h, w = x.shape[-2:]\n x = self.trunk.forward_features(x)\n h_f, w_f = x.shape[-2:]\n tar_h = (self.image_size[0] * h_f) // h\n tar_w = (self.image_size[1] * w_f) // w\n x = roi_align(x, self._denormalize_boxes(normed_boxes, x), (tar_h, tar_w),\n 1.0, -1, True)\n\n x = self.trunk.forward_head(x)\n x = self.head(x)\n\n return x\n\n def encode_dense(self, x, **kwargs):\n x = self.trunk.forward_features(x)\n x = self.dense_trunk_head(x)\n x = self.head(x)\n x = x.permute(0, 3, 1, 2)\n\n return x\n\n def dense_trunk_head(self, x):\n x = self.trunk.head.norm(x)\n x = x.permute(0, 2, 3, 1)\n x = self.trunk.head.drop(x)\n # x = x.permute(0, 3, 1, 2)\n\n return x\n\n def mask_pool(self, image, masks):\n feature_map = self.encode_dense(image)\n feature_map = F.normalize(feature_map, dim=1) # remember to normalize!\n feature_map = feature_map.flatten(-2, -1) # bs, c, h*w\n num_masks_per_image = [len(masks_per_image) for masks_per_image in masks]\n masks = torch.cat(masks).float().flatten(-2, -1) # bs, h*w\n feature_map = torch.repeat_interleave(\n feature_map, torch.tensor(num_masks_per_image, device=feature_map.device), dim=0)\n features = (feature_map * masks[:, None]).sum(-1) / (masks.sum(1, keepdim=True) + 1e-12)\n\n return features\n\n def extract_roi_features(self, x, normed_boxes, extract_type='v1'):\n assert extract_type == \"v1\"\n if extract_type == 'v1':\n return self._extract_roi_features_v1(x, normed_boxes)\n else:\n assert extract_type == 'v2'\n return self._extract_roi_features_v2(x, normed_boxes)\n\n def _extract_roi_features_v2(self, x, normed_boxes, **kwargs):\n x = self.encode_dense(x)\n x = F.normalize(x, dim=1) # remember to normalize!\n\n roi_feats = roi_align(x, self._denormalize_boxes(normed_boxes, x), (1, 1),\n 1.0, -1, True)[..., 0, 0]\n return roi_feats\n\n def encode_rois_and_image(self, x, normed_boxes, **kwargs):\n h, w = x.shape[-2:]\n x = self.trunk.forward_features(x)\n h_f, w_f = x.shape[-2:]\n tar_h = (self.image_size[0] * h_f) // h\n tar_w = (self.image_size[1] * w_f) // w\n x_image = x\n x_rois = roi_align(x, self._denormalize_boxes(normed_boxes, x), (tar_h, tar_w),\n 1.0, -1, True)\n\n x_rois = self.trunk.forward_head(x_rois)\n x_rois = self.head(x_rois)\n x_rois = F.normalize(x_rois, dim=-1)\n\n x_image = self.trunk.forward_head(x_image)\n x_image = self.head(x_image)\n x_image = F.normalize(x_image, dim=-1)\n\n return x_rois, x_image" }, { "identifier": "LayerNormFp32", "path": "src/open_clip/transformer.py", "snippet": "class LayerNormFp32(nn.LayerNorm):\n \"\"\"Subclass torch's LayerNorm to handle fp16 (by casting to float32 and back).\"\"\"\n\n def forward(self, x: torch.Tensor):\n orig_type = x.dtype\n x = F.layer_norm(x.to(torch.float32), self.normalized_shape, self.weight, self.bias, self.eps)\n return x.to(orig_type)" }, { "identifier": "LayerNorm", "path": "src/open_clip/transformer.py", "snippet": "class LayerNorm(nn.LayerNorm):\n \"\"\"Subclass torch's LayerNorm (with cast back to input dtype).\"\"\"\n\n def forward(self, x: torch.Tensor):\n orig_type = x.dtype\n x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)\n return x.to(orig_type)" }, { "identifier": "QuickGELU", "path": "src/open_clip/transformer.py", "snippet": "class QuickGELU(nn.Module):\n # NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory\n def forward(self, x: torch.Tensor):\n return x * torch.sigmoid(1.702 * x)" }, { "identifier": "Attention", "path": "src/open_clip/transformer.py", "snippet": "class Attention(nn.Module):\n def __init__(\n self,\n dim,\n num_heads=8,\n qkv_bias=True,\n scaled_cosine=False,\n scale_heads=False,\n logit_scale_max=math.log(1. / 0.01),\n attn_drop=0.,\n proj_drop=0.\n ):\n super().__init__()\n self.scaled_cosine = scaled_cosine\n self.scale_heads = scale_heads\n assert dim % num_heads == 0, 'dim should be divisible by num_heads'\n self.num_heads = num_heads\n self.head_dim = dim // num_heads\n self.scale = self.head_dim ** -0.5\n self.logit_scale_max = logit_scale_max\n\n # keeping in_proj in this form (instead of nn.Linear) to match weight scheme of original\n self.in_proj_weight = nn.Parameter(torch.randn((dim * 3, dim)) * self.scale)\n if qkv_bias:\n self.in_proj_bias = nn.Parameter(torch.zeros(dim * 3))\n else:\n self.in_proj_bias = None\n\n if self.scaled_cosine:\n self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))\n else:\n self.logit_scale = None\n self.attn_drop = nn.Dropout(attn_drop)\n if self.scale_heads:\n self.head_scale = nn.Parameter(torch.ones((num_heads, 1, 1)))\n else:\n self.head_scale = None\n self.out_proj = nn.Linear(dim, dim)\n self.out_drop = nn.Dropout(proj_drop)\n\n def forward(self, x, attn_mask: Optional[torch.Tensor] = None):\n L, N, C = x.shape\n q, k, v = F.linear(x, self.in_proj_weight, self.in_proj_bias).chunk(3, dim=-1)\n q = q.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)\n k = k.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)\n v = v.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)\n\n if self.logit_scale is not None:\n attn = torch.bmm(F.normalize(q, dim=-1), F.normalize(k, dim=-1).transpose(-1, -2))\n logit_scale = torch.clamp(self.logit_scale, max=self.logit_scale_max).exp()\n attn = attn.view(N, self.num_heads, L, L) * logit_scale\n attn = attn.view(-1, L, L)\n else:\n q = q * self.scale\n attn = torch.bmm(q, k.transpose(-1, -2))\n\n if attn_mask is not None:\n if attn_mask.dtype == torch.bool:\n new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)\n new_attn_mask.masked_fill_(attn_mask, float(\"-inf\"))\n attn_mask = new_attn_mask\n attn += attn_mask\n\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n x = torch.bmm(attn, v)\n if self.head_scale is not None:\n x = x.view(N, self.num_heads, L, C) * self.head_scale\n x = x.view(-1, L, C)\n x = x.transpose(0, 1).reshape(L, N, C)\n x = self.out_proj(x)\n x = self.out_drop(x)\n return x" }, { "identifier": "VisionTransformer", "path": "src/open_clip/transformer.py", "snippet": "class VisionTransformer(nn.Module):\n output_tokens: torch.jit.Final[bool]\n\n def __init__(\n self,\n image_size: int,\n patch_size: int,\n width: int,\n layers: int,\n heads: int,\n mlp_ratio: float,\n ls_init_value: float = None,\n global_average_pool: bool = False,\n attentional_pool: bool = False,\n n_queries: int = 256,\n attn_pooler_heads: int = 8,\n output_dim: int = 512,\n patch_dropout: float = 0.,\n input_patchnorm: bool = False,\n act_layer: Callable = nn.GELU,\n norm_layer: Callable = LayerNorm,\n output_tokens: bool = False\n ):\n super().__init__()\n self.output_tokens = output_tokens\n image_height, image_width = self.image_size = to_2tuple(image_size)\n patch_height, patch_width = self.patch_size = to_2tuple(patch_size)\n self.grid_size = (image_height // patch_height, image_width // patch_width)\n self.output_dim = output_dim\n\n # whether to layernorm each patch, as done in dual patchnorm paper - https://arxiv.org/abs/2302.01327v1\n self.input_patchnorm = input_patchnorm\n assert not input_patchnorm\n if input_patchnorm:\n patch_input_dim = patch_height * patch_width * 3\n self.patchnorm_pre_ln = LayerNorm(patch_input_dim)\n self.conv1 = nn.Linear(patch_input_dim, width)\n else:\n self.patchnorm_pre_ln = nn.Identity()\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)\n\n # class embeddings and positional embeddings\n scale = width ** -0.5\n self.class_embedding = nn.Parameter(scale * torch.randn(width))\n self.positional_embedding = nn.Parameter(scale * torch.randn(self.grid_size[0] * self.grid_size[1] + 1, width))\n\n # setting a patch_dropout of 0. would mean it is disabled and this function would be the identity fn\n self.patch_dropout = PatchDropout(patch_dropout) if patch_dropout > 0. else nn.Identity()\n\n self.ln_pre = norm_layer(width)\n self.transformer = Transformer(\n width,\n layers,\n heads,\n mlp_ratio,\n ls_init_value=ls_init_value,\n act_layer=act_layer,\n norm_layer=norm_layer,\n )\n self.num_heads = heads\n\n self.global_average_pool = global_average_pool\n if attentional_pool:\n self.attn_pool = AttentionalPooler(output_dim, width, n_head=attn_pooler_heads, n_queries=n_queries)\n self.ln_post = norm_layer(output_dim)\n self.proj = nn.Parameter(scale * torch.randn(output_dim, output_dim))\n else:\n self.attn_pool = None\n self.ln_post = norm_layer(width)\n self.proj = nn.Parameter(scale * torch.randn(width, output_dim))\n\n self.init_parameters()\n\n def lock(self, unlocked_groups=0, freeze_bn_stats=False):\n for param in self.parameters():\n param.requires_grad = False\n\n if unlocked_groups != 0:\n groups = [\n [\n self.conv1,\n self.class_embedding,\n self.ln_pre,\n ],\n self.positional_embedding,\n *self.transformer.resblocks[:-1],\n [\n self.transformer.resblocks[-1],\n # self.ln_post, # fix layer norm\n ],\n # self.proj, # fix output layers\n ]\n\n def _unlock(x):\n if isinstance(x, Sequence):\n for g in x:\n _unlock(g)\n else:\n if isinstance(x, torch.nn.Parameter):\n x.requires_grad = True\n else:\n for p in x.parameters():\n p.requires_grad = True\n\n _unlock(groups[-unlocked_groups:])\n\n def attention_lock(self, **kwargs):\n for name, params in self.named_parameters():\n params.requires_grad = True if \"attn\" in name or \"position\" in name else False\n\n def init_parameters(self):\n # FIXME OpenAI CLIP did not define an init for the VisualTransformer\n # TODO experiment if default PyTorch init, below, or alternate init is best.\n pass\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.transformer.grad_checkpointing = enable\n\n def _global_pool(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n if self.global_average_pool:\n return x.mean(dim=1), x\n else:\n return x[:, 0], x[:, 1:]\n\n def forward(self, x: torch.Tensor):\n\n # to patches - whether to use dual patchnorm - https://arxiv.org/abs/2302.01327v1\n # if self.input_patchnorm:\n # # einops - rearrange(x, 'b c (h p1) (w p2) -> b (h w) (c p1 p2)')\n # x = x.reshape(x.shape[0], x.shape[1], self.grid_size[0], self.patch_size[0], self.grid_size[1], self.patch_size[1])\n # x = x.permute(0, 2, 4, 1, 3, 5)\n # x = x.reshape(x.shape[0], self.grid_size[0] * self.grid_size[1], -1)\n # x = self.patchnorm_pre_ln(x)\n # x = self.conv1(x)\n # else:\n x = self.conv1(x) # shape = [*, width, grid, grid]\n bs, _, h, w = x.shape\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n\n # class embeddings and positional embeddings\n x = torch.cat(\n [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x], dim=1) # shape = [*, grid ** 2 + 1, width]\n # TODO: Allow interpolating the positional embeddings\n\n if (h, w) == self.grid_size:\n pe = self.positional_embedding.to(x.dtype)\n else:\n pe = self.rescale_positional_embedding(out_size=(h, w), dtype=x.dtype)\n\n x = x + pe\n\n # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in\n x = self.patch_dropout(x)\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n if self.attn_pool is not None:\n x = self.attn_pool(x)\n x = self.ln_post(x)\n pooled, tokens = self._global_pool(x)\n else:\n pooled, tokens = self._global_pool(x)\n pooled = self.ln_post(pooled)\n\n if self.proj is not None:\n pooled = pooled @ self.proj\n\n if self.output_tokens:\n return pooled, tokens\n \n return pooled\n\n def post_attention(self, x):\n x = x.permute(1, 0, 2) # LND -> NLD\n\n if self.attn_pool is not None:\n x = self.attn_pool(x)\n x = self.ln_post(x)\n pooled, tokens = self._global_pool(x)\n else:\n pooled, tokens = self._global_pool(x)\n pooled = self.ln_post(pooled)\n\n if self.proj is not None:\n pooled = pooled @ self.proj\n\n if self.output_tokens:\n return pooled, tokens\n\n return pooled\n\n def extract_roi_features(self, x, normed_boxes, extract_type='v2'):\n if extract_type == 'v1':\n return self._extract_roi_features_v1(x, normed_boxes)\n elif extract_type == 'v2':\n return self._extract_roi_features_v2(x, normed_boxes)\n else:\n raise NotImplementedError\n # assert extract_type == 'v3'\n # return self._extract_roi_features_v3(x, normed_boxes)\n\n def mask_pool(self, x, masks):\n feature_map = self.encode_dense(x)\n feature_map = F.normalize(feature_map, dim=-1)\n\n num_masks_per_image = [len(masks_per_image) for masks_per_image in masks]\n masks = torch.cat(masks).float().flatten(-2, -1) # bs, h*w\n feature_map = torch.repeat_interleave(\n feature_map, torch.tensor(num_masks_per_image, device=feature_map.device), dim=0)\n features = (feature_map * masks.unsqueeze(-1)).sum(1) / (masks.sum(1, keepdim=True) + 1e-12)\n\n return features\n\n def mask_features(self, x, masks):\n feature_map = self.encode_dense(x)\n feature_map = F.normalize(feature_map, dim=-1)\n\n num_masks_per_image = [len(masks_per_image) for masks_per_image in masks]\n masks = torch.cat(masks).flatten(-2, -1) > 0 # bs, h*w\n feature_map = torch.repeat_interleave(\n feature_map, torch.tensor(num_masks_per_image, device=feature_map.device), dim=0)\n\n mask_features = [f[m] for m, f in zip(masks, feature_map)]\n\n return mask_features\n\n def encode_dense(self, x, keep_shape=False):\n x = self.conv1(x) # shape = [*, width, grid, grid]\n bs, _, h, w = x.shape\n # assert h == w # TODO: support input of any shape, need to change the normed boxes to real boxes\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n x = torch.cat(\n [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x], dim=1) # shape = [*, grid ** 2 + 1, width]\n if (h, w) == self.grid_size:\n pe = self.positional_embedding.to(x.dtype)\n else:\n pe = self.rescale_positional_embedding(out_size=(h, w), dtype=x.dtype)\n\n x = x + pe\n\n # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in\n x = self.patch_dropout(x)\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer.extract_feature_map(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n if self.attn_pool is not None:\n x = self.attn_pool(x)\n x = self.ln_post(x)\n _, tokens = self._global_pool(x)\n else:\n _, tokens = self._global_pool(x)\n tokens = self.ln_post(tokens)\n\n if self.proj is not None:\n tokens = tokens @ self.proj\n\n feature_map = tokens.view(bs, h * w, -1) # .permute(0, 3, 1, 2)\n feature_map = F.normalize(feature_map, dim=-1) # normalize at the last dimension\n if keep_shape:\n feature_map = feature_map.view(bs, h, w, -1).permute(0, 3, 1, 2)\n return feature_map\n\n def mask_crop(self, x, masks):\n x = self.conv1(x) # shape = [*, width, grid, grid]\n num_masks_per_image = [len(masks_per_image) for masks_per_image in masks]\n masks = torch.cat(masks).to(x) # bs, h, w\n x = torch.repeat_interleave(\n x, torch.tensor(num_masks_per_image, device=x.device), dim=0)\n x = x * masks[:, None]\n bs, _, h, w = x.shape\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n\n # class embeddings and positional embeddings\n x = torch.cat(\n [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x], dim=1) # shape = [*, grid ** 2 + 1, width]\n # TODO: Allow interpolating the positional embeddings\n\n if (h, w) == self.grid_size:\n pe = self.positional_embedding.to(x.dtype)\n else:\n pe = self.rescale_positional_embedding(out_size=(h, w), dtype=x.dtype)\n\n x = x + pe\n\n x = self.patch_dropout(x)\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n if self.attn_pool is not None:\n x = self.attn_pool(x)\n x = self.ln_post(x)\n pooled, tokens = self._global_pool(x)\n else:\n pooled, tokens = self._global_pool(x)\n pooled = self.ln_post(pooled)\n\n if self.proj is not None:\n pooled = pooled @ self.proj\n\n return pooled\n\n @staticmethod\n def _generate_masks_per_image(normed_boxes, mask_h, mask_w):\n num_boxes = len(normed_boxes)\n boxes = normed_boxes * torch.tensor(\n [[mask_w, mask_h, mask_w, mask_h]], device=normed_boxes.device)\n masks = torch.zeros(num_boxes, mask_h, mask_w,\n dtype=torch.bool, device=normed_boxes.device)\n for i, box in enumerate(boxes):\n x0, y0, x1, y1 = box.long().tolist()\n masks[i, y0:y1, x0:x1] = True\n\n return masks\n \n @staticmethod\n def _denormalize_boxes(normed_boxes, x):\n h, w = x.shape[-2:]\n denormed_boxes = []\n for boxes in normed_boxes:\n new_boxes = boxes.clone() # FIXME: do not change the value in normed_boxes!\n new_boxes[:, [0, 2]] *= w\n new_boxes[:, [1, 3]] *= h\n denormed_boxes.append(new_boxes)\n return denormed_boxes\n\n def _extract_roi_features_v1(self, x, normed_boxes):\n # used masks\n bs, _, h, w = x.shape\n patch_height, patch_width = self.patch_size\n mask_h, mask_w = h // patch_height, w // patch_width\n masks = [self._generate_masks_per_image(normed_boxes_, mask_h, mask_w)\n for normed_boxes_ in normed_boxes]\n\n return self.mask_attn_pool(x, masks)\n\n def _extract_roi_features_v3(self, x, normed_boxes): # v3 for extract two types\n # used masks\n bs, _, h, w = x.shape\n patch_height, patch_width = self.patch_size\n mask_h, mask_w = h // patch_height, w // patch_width\n masks = [self._generate_masks_per_image(normed_boxes_, mask_h, mask_w)\n for normed_boxes_ in normed_boxes]\n\n roi_features_v1, dense_x = self.mask_attn_pool(x, masks, return_dense=True)\n dense_x = F.normalize(dense_x, dim=-1) # normalize along last dimension\n dense_x = dense_x.permute(0, 3, 1, 2)\n roi_features_v2 = roi_align(dense_x, self._denormalize_boxes(normed_boxes, dense_x), \n (1, 1), 1.0, -1, True)[..., 0, 0]\n\n return roi_features_v1, roi_features_v2\n\n def _extract_roi_features_v2(self, x, normed_boxes):\n x = self.conv1(x) # shape = [*, width, grid, grid]\n bs, _, h, w = x.shape\n # assert h == w # TODO: support input of any shape, need to change the normed boxes to real boxes\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n x = torch.cat(\n [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x], dim=1) # shape = [*, grid ** 2 + 1, width]\n if (h, w) == self.grid_size:\n pe = self.positional_embedding.to(x.dtype)\n else:\n pe = self.rescale_positional_embedding(out_size=(h, w), dtype=x.dtype)\n\n x = x + pe\n\n # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in\n x = self.patch_dropout(x)\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer.extract_feature_map(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n if self.attn_pool is not None:\n x = self.attn_pool(x)\n x = self.ln_post(x)\n _, tokens = self._global_pool(x)\n else:\n _, tokens = self._global_pool(x)\n tokens = self.ln_post(tokens)\n\n if self.proj is not None:\n tokens = tokens @ self.proj\n tokens = F.normalize(tokens, dim=-1) # normalize along last dimension\n tokens = tokens.view(bs, h, w, -1).permute(0, 3, 1, 2)\n return roi_align(tokens, self._denormalize_boxes(normed_boxes, tokens),\n (1, 1), 1.0, -1, True)[..., 0, 0]\n\n def rescale_positional_embedding(self, out_size, dtype):\n h, w = out_size\n rescaled_positional_embedding = \\\n self.positional_embedding.new_zeros(1 + h*w, self.positional_embedding.shape[1])\n rescaled_positional_embedding[0] = self.positional_embedding[0]\n pe_2d = self.positional_embedding[1:].T.contiguous().view(\n 1, -1, *self.grid_size)\n pe_2d = F.interpolate(pe_2d, out_size, mode='bicubic', align_corners=False).view(-1, h*w)\n rescaled_positional_embedding[1:] = pe_2d.T.contiguous()\n\n return rescaled_positional_embedding.to(dtype=dtype)\n\n def _mask_attn_pool(self, x: torch.Tensor, attn_mask: torch.Tensor, num_mask_tokens: int, return_dense=False):\n x = self.conv1(x) # shape = [*, width, grid, grid]\n bs, _, h, w = x.shape\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n x = torch.cat(\n [\n self.class_embedding.to(x.dtype)\n + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x,\n ],\n dim=1,\n ) # shape = [*, grid ** 2 + 1, width]\n if (h, w) == self.grid_size:\n pe = self.positional_embedding.to(x.dtype)\n else:\n pe = self.rescale_positional_embedding(out_size=(h, w), dtype=x.dtype)\n\n x = x + pe\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n cls_embed = x[0:1]\n cls_embed = cls_embed.expand(num_mask_tokens, -1, -1)\n x = torch.cat([cls_embed, x], dim=0)\n if return_dense:\n x, x_dense = self.transformer.forward_image_dense(x, attn_mask)\n x_dense = x_dense.permute(1, 0, 2) # LND -> NLD\n x_dense = x_dense[:, num_mask_tokens + 1:]\n\n x_dense = self.ln_post(x_dense)\n\n if self.proj is not None:\n x_dense = x_dense @ self.proj\n x_dense = F.normalize(x_dense, dim=-1) # normalize along last dimension\n x_dense = x_dense.view(bs, h, w, -1)\n else:\n x = self.transformer(x, attn_mask)\n x_dense = None\n x = x.permute(1, 0, 2) # LND -> NLD\n\n # [N, L, D]\n x = self.ln_post(x[:, :num_mask_tokens, :])\n\n if self.proj is not None:\n x = torch.einsum(\"nld,dc->nlc\", x, self.proj)\n\n return x, x_dense\n\n def mask_attn_pool(self, image, masks, return_dense=False):\n assert hasattr(self, \"positional_embedding\")\n batch_size = image.shape[0]\n assert batch_size == len(masks)\n num_masks_per_image = [mask.shape[0] for mask in masks]\n num_queries = max(num_masks_per_image)\n mask_h, mask_w = masks[0].shape[1:]\n\n batch_masks = torch.ones(batch_size, num_queries, mask_h, mask_w, dtype=torch.bool).to(image.device)\n for batch_id, mask in enumerate(masks):\n batch_masks[batch_id, :mask.shape[0]] = mask\n\n mask_token_attn_mask = torch.logical_not(batch_masks)\n # [B, Q, H//P x W//P]\n mask_token_attn_mask = mask_token_attn_mask.reshape(batch_size, num_queries, -1)\n\n num_mask_token = num_queries\n num_image_cls_token = (mask_h * mask_w + 1)\n num_image_token = num_image_cls_token - 1\n num_all_token = num_mask_token + num_image_cls_token\n\n # we start with no mask out\n attn_mask = torch.zeros(\n (num_all_token, num_all_token), dtype=torch.bool, device=image.device\n )\n\n # mask+cls+image token to mask token attention is masked out\n attn_mask[:, :num_mask_token] = True\n\n attn_mask = attn_mask.unsqueeze(0).repeat_interleave(batch_size, dim=0)\n attn_mask[:, :num_mask_token, -num_image_token:] = mask_token_attn_mask\n num_heads = self.num_heads # head width 64\n attn_mask = attn_mask.unsqueeze(1).expand(-1, num_heads, -1, -1)\n attn_mask = attn_mask.reshape(batch_size * num_heads, num_all_token, num_all_token)\n\n batch_mask_features, x_dense = self._mask_attn_pool(image, attn_mask, num_mask_token,\n return_dense=return_dense)\n\n mask_features = [batch_mask_features[batch_id, :num_masks]\n for batch_id, num_masks, in enumerate(num_masks_per_image)]\n if return_dense:\n # x_dense = F.normalize(x_dense, dim=-1).flatten(1, 2) # bs, h*w, c\n # masks = torch.cat(masks).float().flatten(-2, -1) # bs, h*w\n # x_dense = torch.repeat_interleave(\n # x_dense, torch.tensor(num_masks_per_image, device=x_dense.device), dim=0)\n # x_dense = (x_dense * masks.unsqueeze(-1)).sum(1) / masks.sum(1, keepdim=True)\n\n return torch.cat(mask_features), x_dense\n else:\n return torch.cat(mask_features)\n\n def encode_rois_and_image(self, x, normed_boxes):\n x = self.conv1(x) # shape = [*, width, grid, grid]\n bs, _, h, w = x.shape\n # assert h == w # TODO: support input of any shape, need to change the normed boxes to real boxes\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n x = torch.cat(\n [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x], dim=1) # shape = [*, grid ** 2 + 1, width]\n if (h, w) == self.grid_size:\n pe = self.positional_embedding.to(x.dtype)\n else:\n pe = self.rescale_positional_embedding(out_size=(h, w), dtype=x.dtype)\n\n x = x + pe\n\n # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in\n x = self.patch_dropout(x)\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x, x_image = self.transformer.extract_feature_map(x, return_forward=True)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n if self.attn_pool is not None:\n x = self.attn_pool(x)\n x = self.ln_post(x)\n _, tokens = self._global_pool(x)\n else:\n _, tokens = self._global_pool(x)\n tokens = self.ln_post(tokens)\n\n if self.proj is not None:\n tokens = tokens @ self.proj\n\n feature_map = tokens.view(bs, h * w, -1) # .permute(0, 3, 1, 2)\n feature_map = F.normalize(feature_map, dim=-1)\n feature_map = feature_map.view(bs, h, w, -1).permute(0, 3, 1, 2)\n x_rois = roi_align(feature_map, self._denormalize_boxes(normed_boxes, feature_map),\n (1, 1), 1.0, -1, True)[..., 0, 0]\n x_rois = F.normalize(x_rois, dim=-1)\n\n x_image = self.post_attention(x_image)\n x_image = F.normalize(x_image, dim=-1)\n\n return x_rois, x_image" }, { "identifier": "TextTransformer", "path": "src/open_clip/transformer.py", "snippet": "class TextTransformer(nn.Module):\n output_tokens: torch.jit.Final[bool]\n\n def __init__(\n self,\n context_length: int = 77,\n vocab_size: int = 49408,\n width: int = 512,\n heads: int = 8,\n layers: int = 12,\n ls_init_value: float = None,\n output_dim: int = 512,\n act_layer: Callable = nn.GELU,\n norm_layer: Callable = LayerNorm,\n embed_cls: bool = False,\n pad_id: int = 0,\n output_tokens: bool = False,\n ):\n super().__init__()\n self.output_tokens = output_tokens\n self.num_pos = self.context_length = context_length\n self.vocab_size = vocab_size\n self.width = width\n self.output_dim = output_dim\n self.heads = heads\n self.pad_id = pad_id\n\n self.text_projection = nn.Parameter(torch.empty(width, output_dim))\n\n if embed_cls:\n self.cls_emb = nn.Parameter(torch.empty(width))\n self.num_pos += 1\n else:\n self.cls_emb = None\n\n self.token_embedding = nn.Embedding(vocab_size, width)\n self.positional_embedding = nn.Parameter(torch.empty(self.num_pos, width))\n self.transformer = Transformer(\n width=width,\n layers=layers,\n heads=heads,\n ls_init_value=ls_init_value,\n act_layer=act_layer,\n norm_layer=norm_layer,\n )\n self.ln_final = norm_layer(width)\n\n self.register_buffer('attn_mask', self.build_attention_mask(), persistent=False)\n\n self.init_parameters()\n\n def init_parameters(self):\n nn.init.normal_(self.token_embedding.weight, std=0.02)\n nn.init.normal_(self.positional_embedding, std=0.01)\n if self.cls_emb is not None:\n nn.init.normal_(self.cls_emb, std=0.01)\n\n proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)\n attn_std = self.transformer.width ** -0.5\n fc_std = (2 * self.transformer.width) ** -0.5\n for block in self.transformer.resblocks:\n nn.init.normal_(block.attn.in_proj_weight, std=attn_std)\n nn.init.normal_(block.attn.out_proj.weight, std=proj_std)\n nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)\n nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)\n\n if self.text_projection is not None:\n nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)\n\n def lock(self, unlocked_layers: int = 0, freeze_layer_norm: bool = True):\n assert unlocked_layers == 0 and freeze_layer_norm\n print(f'Freeze the text encoder', flush=True)\n for p in self.parameters():\n p.requires_grad = False\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.transformer.grad_checkpointing = enable\n\n def build_attention_mask(self):\n # lazily create causal attention mask, with full attention between the tokens\n # pytorch uses additive attention mask; fill with -inf\n mask = torch.empty(self.num_pos, self.num_pos)\n mask.fill_(float(\"-inf\"))\n mask.triu_(1) # zero out the lower diagonal\n return mask\n\n def build_cls_mask(self, text, cast_dtype: torch.dtype):\n cls_mask = (text != self.pad_id).unsqueeze(1)\n cls_mask = F.pad(cls_mask, (1, 0, cls_mask.shape[2], 0), value=1.0)\n additive_mask = torch.empty(cls_mask.shape, dtype=cast_dtype, device=cls_mask.device)\n additive_mask.fill_(0)\n additive_mask.masked_fill_(~cls_mask, float(\"-inf\"))\n additive_mask = torch.repeat_interleave(additive_mask, self.heads, 0)\n return additive_mask\n\n def _repeat(self, t, N: int):\n return t.reshape(1, 1, -1).repeat(N, 1, 1)\n\n def forward(self, text):\n cast_dtype = self.transformer.get_cast_dtype()\n seq_len = text.shape[1]\n\n x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]\n attn_mask = self.attn_mask\n if self.cls_emb is not None:\n seq_len += 1\n x = torch.cat([x, self._repeat(self.cls_emb, x.shape[0])], dim=1)\n cls_mask = self.build_cls_mask(text, cast_dtype)\n attn_mask = attn_mask[None, :seq_len, :seq_len] + cls_mask[:, :seq_len, :seq_len]\n\n x = x + self.positional_embedding[:seq_len].to(cast_dtype)\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer(x, attn_mask=attn_mask)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n # x.shape = [batch_size, n_ctx, transformer.width]\n # take features from the eot embedding (eot_token is the highest number in each sequence)\n if self.cls_emb is not None:\n pooled, tokens = x[:, -1], x[:, :-1]\n pooled = self.ln_final(pooled)\n else:\n x = self.ln_final(x)\n pooled, tokens = x[torch.arange(x.shape[0]), text.argmax(dim=-1)], x\n\n if self.text_projection is not None:\n pooled = pooled @ self.text_projection\n\n if self.output_tokens:\n return pooled, tokens\n\n return pooled" }, { "identifier": "to_2tuple", "path": "src/open_clip/utils.py", "snippet": "def freeze_batch_norm_2d(module, module_match={}, name=''):\ndef _ntuple(n):\n def parse(x):" } ]
from dataclasses import dataclass from typing import Optional, Tuple, Union from torch import nn from torch.utils.checkpoint import checkpoint from .hf_model import HFTextEncoder from .modified_resnet import ModifiedResNet from .timm_model import TimmModel from .transformer import LayerNormFp32, LayerNorm, QuickGELU, Attention, VisionTransformer, TextTransformer from .utils import to_2tuple import logging import math import numpy as np import torch import torch.nn.functional as F
16,255
timm_drop_path: Optional[float] = None # backbone stochastic depth output_tokens: bool = False freeze_output = True freeze_all_bns = True @dataclass class CLIPTextCfg: context_length: int = 77 vocab_size: int = 49408 width: int = 512 heads: int = 8 layers: int = 12 ls_init_value: Optional[float] = None # layer scale initial value hf_model_name: str = None hf_tokenizer_name: str = None hf_model_pretrained: bool = True proj: str = 'mlp' pooler_type: str = 'mean_pooler' embed_cls: bool = False pad_id: int = 0 output_tokens: bool = False def get_cast_dtype(precision: str): cast_dtype = None if precision == 'bf16': cast_dtype = torch.bfloat16 elif precision == 'fp16': cast_dtype = torch.float16 return cast_dtype def _build_vision_tower( embed_dim: int, vision_cfg: CLIPVisionCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None ): if isinstance(vision_cfg, dict): vision_cfg = CLIPVisionCfg(**vision_cfg) # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more # memory efficient in recent PyTorch releases (>= 1.10). # NOTE: timm models always use native GELU regardless of quick_gelu flag. act_layer = QuickGELU if quick_gelu else nn.GELU if vision_cfg.timm_model_name: visual = TimmModel( vision_cfg.timm_model_name, pretrained=vision_cfg.timm_model_pretrained, pool=vision_cfg.timm_pool, proj=vision_cfg.timm_proj, proj_bias=vision_cfg.timm_proj_bias, drop=vision_cfg.timm_drop, drop_path=vision_cfg.timm_drop_path, patch_drop=vision_cfg.patch_dropout if vision_cfg.patch_dropout > 0 else None, embed_dim=embed_dim, image_size=vision_cfg.image_size, ) act_layer = nn.GELU # so that text transformer doesn't use QuickGELU w/ timm models elif isinstance(vision_cfg.layers, (tuple, list)): vision_heads = vision_cfg.width * 32 // vision_cfg.head_width visual = ModifiedResNet( layers=vision_cfg.layers, output_dim=embed_dim, heads=vision_heads, image_size=vision_cfg.image_size, width=vision_cfg.width, freeze_output=vision_cfg.freeze_output, freeze_all_bns=vision_cfg.freeze_all_bns ) else: vision_heads = vision_cfg.width // vision_cfg.head_width norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm visual = VisionTransformer( image_size=vision_cfg.image_size, patch_size=vision_cfg.patch_size, width=vision_cfg.width, layers=vision_cfg.layers, heads=vision_heads, mlp_ratio=vision_cfg.mlp_ratio, ls_init_value=vision_cfg.ls_init_value, patch_dropout=vision_cfg.patch_dropout, input_patchnorm=vision_cfg.input_patchnorm, global_average_pool=vision_cfg.global_average_pool, attentional_pool=vision_cfg.attentional_pool, n_queries=vision_cfg.n_queries, attn_pooler_heads=vision_cfg.attn_pooler_heads, output_tokens=vision_cfg.output_tokens, output_dim=embed_dim, act_layer=act_layer, norm_layer=norm_layer, ) return visual def _build_text_tower( embed_dim: int, text_cfg: CLIPTextCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None, ): if isinstance(text_cfg, dict): text_cfg = CLIPTextCfg(**text_cfg) if text_cfg.hf_model_name: text = HFTextEncoder( text_cfg.hf_model_name, output_dim=embed_dim, proj=text_cfg.proj, pooler_type=text_cfg.pooler_type, pretrained=text_cfg.hf_model_pretrained, output_tokens=text_cfg.output_tokens, ) else: act_layer = QuickGELU if quick_gelu else nn.GELU norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm
""" CLIP Model Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. """ @dataclass class CLIPVisionCfg: layers: Union[Tuple[int, int, int, int], int] = 12 width: int = 768 head_width: int = 64 mlp_ratio: float = 4.0 patch_size: int = 16 image_size: Union[Tuple[int, int], int] = 224 ls_init_value: Optional[float] = None # layer scale initial value patch_dropout: float = 0. # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results input_patchnorm: bool = False # whether to use dual patchnorm - would only apply the input layernorm on each patch, as post-layernorm already exist in original clip vit design global_average_pool: bool = False # whether to global average pool the last embedding layer, instead of using CLS token (https://arxiv.org/abs/2205.01580) attentional_pool: bool = False # whether to use attentional pooler in the last embedding layer n_queries: int = 256 # n_queries for attentional pooler attn_pooler_heads: int = 8 # n heads for attentional_pooling timm_model_name: str = None # a valid model name overrides layers, width, patch_size timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '') timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '') timm_proj_bias: bool = False # enable bias final projection timm_drop: float = 0. # head dropout timm_drop_path: Optional[float] = None # backbone stochastic depth output_tokens: bool = False freeze_output = True freeze_all_bns = True @dataclass class CLIPTextCfg: context_length: int = 77 vocab_size: int = 49408 width: int = 512 heads: int = 8 layers: int = 12 ls_init_value: Optional[float] = None # layer scale initial value hf_model_name: str = None hf_tokenizer_name: str = None hf_model_pretrained: bool = True proj: str = 'mlp' pooler_type: str = 'mean_pooler' embed_cls: bool = False pad_id: int = 0 output_tokens: bool = False def get_cast_dtype(precision: str): cast_dtype = None if precision == 'bf16': cast_dtype = torch.bfloat16 elif precision == 'fp16': cast_dtype = torch.float16 return cast_dtype def _build_vision_tower( embed_dim: int, vision_cfg: CLIPVisionCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None ): if isinstance(vision_cfg, dict): vision_cfg = CLIPVisionCfg(**vision_cfg) # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more # memory efficient in recent PyTorch releases (>= 1.10). # NOTE: timm models always use native GELU regardless of quick_gelu flag. act_layer = QuickGELU if quick_gelu else nn.GELU if vision_cfg.timm_model_name: visual = TimmModel( vision_cfg.timm_model_name, pretrained=vision_cfg.timm_model_pretrained, pool=vision_cfg.timm_pool, proj=vision_cfg.timm_proj, proj_bias=vision_cfg.timm_proj_bias, drop=vision_cfg.timm_drop, drop_path=vision_cfg.timm_drop_path, patch_drop=vision_cfg.patch_dropout if vision_cfg.patch_dropout > 0 else None, embed_dim=embed_dim, image_size=vision_cfg.image_size, ) act_layer = nn.GELU # so that text transformer doesn't use QuickGELU w/ timm models elif isinstance(vision_cfg.layers, (tuple, list)): vision_heads = vision_cfg.width * 32 // vision_cfg.head_width visual = ModifiedResNet( layers=vision_cfg.layers, output_dim=embed_dim, heads=vision_heads, image_size=vision_cfg.image_size, width=vision_cfg.width, freeze_output=vision_cfg.freeze_output, freeze_all_bns=vision_cfg.freeze_all_bns ) else: vision_heads = vision_cfg.width // vision_cfg.head_width norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm visual = VisionTransformer( image_size=vision_cfg.image_size, patch_size=vision_cfg.patch_size, width=vision_cfg.width, layers=vision_cfg.layers, heads=vision_heads, mlp_ratio=vision_cfg.mlp_ratio, ls_init_value=vision_cfg.ls_init_value, patch_dropout=vision_cfg.patch_dropout, input_patchnorm=vision_cfg.input_patchnorm, global_average_pool=vision_cfg.global_average_pool, attentional_pool=vision_cfg.attentional_pool, n_queries=vision_cfg.n_queries, attn_pooler_heads=vision_cfg.attn_pooler_heads, output_tokens=vision_cfg.output_tokens, output_dim=embed_dim, act_layer=act_layer, norm_layer=norm_layer, ) return visual def _build_text_tower( embed_dim: int, text_cfg: CLIPTextCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None, ): if isinstance(text_cfg, dict): text_cfg = CLIPTextCfg(**text_cfg) if text_cfg.hf_model_name: text = HFTextEncoder( text_cfg.hf_model_name, output_dim=embed_dim, proj=text_cfg.proj, pooler_type=text_cfg.pooler_type, pretrained=text_cfg.hf_model_pretrained, output_tokens=text_cfg.output_tokens, ) else: act_layer = QuickGELU if quick_gelu else nn.GELU norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm
text = TextTransformer(
8
2023-12-09 05:43:08+00:00
24k
LkPrtctrd/BSL-V53
Heart/Logic/LogicLaserMessageFactory.py
[ { "identifier": "ClientHelloMessage", "path": "Heart/Packets/Client/Authentification/ClientHelloMessage.py", "snippet": "class ClientHelloMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"Protocol\"] = self.readInt()\n fields[\"KeyVersion\"] = self.readInt()\n fields[\"MajorVersion\"] = self.readInt()\n fields[\"MinorVersion\"] = self.readInt()\n fields[\"Build\"] = self.readInt()\n fields[\"ContentHash\"] = self.readString()\n fields[\"DeviceType\"] = self.readInt()\n fields[\"AppStore\"] = self.readInt()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(20100, fields, cryptoInit)\n\n def getMessageType(self):\n return 10100\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "LoginMessage", "path": "Heart/Packets/Client/Authentification/LoginMessage.py", "snippet": "class LoginMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"AccountID\"] = self.readLong()\n fields[\"PassToken\"] = self.readString()\n fields[\"ClientMajor\"] = self.readInt()\n fields[\"ClientMinor\"] = self.readInt()\n fields[\"ClientBuild\"] = self.readInt()\n fields[\"ResourceSha\"] = self.readString()\n fields[\"Device\"] = self.readString()\n fields[\"PreferredLanguage\"] = self.readDataReference()\n fields[\"PreferredDeviceLanguage\"] = self.readString()\n fields[\"OSVersion\"] = self.readString()\n fields[\"isAndroid\"] = self.readBoolean()\n fields[\"IMEI\"] = self.readString()\n fields[\"AndroidID\"] = self.readString()\n fields[\"isAdvertisingEnabled\"] = self.readBoolean()\n fields[\"AppleIFV\"] = self.readString()\n fields[\"RndKey\"] = self.readInt()\n fields[\"AppStore\"] = self.readVInt()\n fields[\"ClientVersion\"] = self.readString()\n fields[\"TencentOpenId\"] = self.readString()\n fields[\"TencentToken\"] = self.readString()\n fields[\"TencentPlatform\"] = self.readVInt()\n fields[\"DeviceVerifierResponse\"] = self.readString()\n fields[\"AppLicensingSignature\"] = self.readString()\n fields[\"DeviceVerifierResponse\"] = self.readString()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n if fields[\"ClientMajor\"]==53:\n calling_instance.player.ClientVersion = f'{str(fields[\"ClientMajor\"])}.{str(fields[\"ClientBuild\"])}.{str(fields[\"ClientMinor\"])}'\n fields[\"Socket\"] = calling_instance.client\n db_instance = DatabaseHandler()\n if db_instance.playerExist(fields[\"PassToken\"], fields[\"AccountID\"]):\n player_data = json.loads(db_instance.getPlayerEntry(fields[\"AccountID\"])[2])\n db_instance.loadAccount(calling_instance.player, fields[\"AccountID\"])\n else:\n db_instance.createAccount(calling_instance.player.getDataTemplate(fields[\"AccountID\"][0], fields[\"AccountID\"][1], fields[\"PassToken\"]))\n ClientsManager.AddPlayer(calling_instance.player.ID, calling_instance.client)\n Messaging.sendMessage(20104, fields, cryptoInit, calling_instance.player)\n Messaging.sendMessage(24101, fields, cryptoInit, calling_instance.player)\n Messaging.sendMessage(24399, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 10101\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "AskForBattleEndMessage", "path": "Heart/Packets/Client/Battle/AskForBattleEndMessage.py", "snippet": "class AskForBattleEndMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"Unk1\"] = self.readVInt()\n fields[\"Result\"] = self.readVInt()\n fields[\"Rank\"] = self.readVInt()\n fields[\"MapID\"] = self.readDataReference()\n fields[\"HeroesCount\"] = self.readVInt()\n fields[\"Heroes\"] = []\n for i in range(fields[\"HeroesCount\"]): fields[\"Heroes\"].append({\"Brawler\": {\"ID\": self.readDataReference(), \"SkinID\": self.readDataReference()}, \"Team\": self.readVInt(), \"IsPlayer\": self.readBoolean(), \"PlayerName\": self.readString()})\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(23456, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 14110\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "ChangeAvatarNameMessage", "path": "Heart/Packets/Client/Home/ChangeAvatarNameMessage.py", "snippet": "class ChangeAvatarNameMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n self.writeString(fields[\"Name\"])\n self.writeBoolean(fields[\"NameSetByUser\"])\n\n def decode(self):\n fields = {}\n fields[\"Name\"] = self.readString()\n fields[\"NameSetByUser\"] = self.readBoolean()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n db_instance = DatabaseHandler()\n playerData = db_instance.getPlayer(calling_instance.player.ID)\n playerData[\"Name\"] = fields[\"Name\"]\n playerData[\"Registered\"] = True\n db_instance.updatePlayerData(playerData, calling_instance)\n fields[\"Socket\"] = calling_instance.client\n fields[\"Command\"] = {\"ID\": 201}\n Messaging.sendMessage(24111, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 10212\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "EndClientTurnMessage", "path": "Heart/Packets/Client/Home/EndClientTurnMessage.py", "snippet": "class EndClientTurnMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n self.readBoolean()\n fields[\"Tick\"] = self.readVInt()\n fields[\"Checksum\"] = self.readVInt()\n fields[\"CommandsCount\"] = self.readVInt()\n super().decode(fields)\n fields[\"Commands\"] = []\n for i in range(fields[\"CommandsCount\"]):\n fields[\"Commands\"].append({\"ID\": self.readVInt()})\n if LogicCommandManager.commandExist(fields[\"Commands\"][i][\"ID\"]):\n command = LogicCommandManager.createCommand(fields[\"Commands\"][i][\"ID\"])\n print(\"Command\", LogicCommandManager.getCommandsName(fields[\"Commands\"][i][\"ID\"]))\n if command is not None:\n fields[\"Commands\"][i][\"Fields\"] = command.decode(self)\n fields[\"Commands\"][i][\"Instance\"] = command\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n for command in fields[\"Commands\"]:\n if \"Instance\" not in command.keys():\n return\n\n if hasattr(command[\"Instance\"], 'execute'):\n command[\"Instance\"].execute(calling_instance, command[\"Fields\"], cryptoInit)\n if command[\"ID\"] == 519:\n Messaging.sendMessage(24104, {\"Socket\": calling_instance.client, \"ServerChecksum\": 0, \"ClientChecksum\": 0, \"Tick\": 0}, cryptoInit)\n\n def getMessageType(self):\n return 14102\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "GoHomeFromOfflinePractiseMessage", "path": "Heart/Packets/Client/Home/GoHomeFromOfflinePractiseMessage.py", "snippet": "class GoHomeFromOfflinePractiseMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n self.readBoolean()\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(24101, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 14109\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "GoHomeMessage", "path": "Heart/Packets/Client/Home/GoHomeMessage.py", "snippet": "class GoHomeMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n self.readBoolean()\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(24101, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 17750\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "GetPlayerProfileMessage", "path": "Heart/Packets/Client/Home/GetPlayerProfileMessage.py", "snippet": "class GetPlayerProfileMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"BattleInfoBoolean\"] = self.readBoolean()\n if fields[\"BattleInfoBoolean\"]:\n fields[\"unk1\"] = self.readVInt()\n fields[\"AnotherID\"] = self.readLong()\n fields[\"unk2\"] = self.readVInt()\n for i in self.readVInt():\n fields[\"CsvID\"] = self.readDataReference()\n fields[\"unk3\"] = self.readVInt()\n fields[\"unk4\"] = self.readVInt()\n fields[\"unk5\"] = self.readVInt()\n fields[\"unk6\"] = self.readVInt()\n fields[\"PlayerName\"] = self.readString()\n fields[\"unk7\"] = self.readVInt()\n fields[\"Thumbnail\"] = self.readVInt()\n fields[\"NameColor\"] = self.readVInt()\n fields[\"unk10\"] = self.readVInt()\n fields[\"unk11\"] = self.readVInt()\n fields[\"PlayerHighID\"] = self.readInt()\n fields[\"PlayerLowID\"] = self.readInt()\n super().decode(fields)\n\n\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(24113, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 15081\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "AskForAllianceDataMessage", "path": "Heart/Packets/Client/Home/AskForAllianceDataMessage.py", "snippet": "class AskForAllianceDataMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"id\"] = self.readVLong()\n fields[\"isInAlliance\"] = self.readBoolean()\n if fields[\"isInAlliance\"] == True:\n fields[\"anotherIDHigh\"] = self.readVInt()\n fields[\"anotherIDLow\"] = self.readVInt()\n super().decode(fields)\n\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(24301, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 14302\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "KeepAliveMessage", "path": "Heart/Packets/Client/Socket/KeepAliveMessage.py", "snippet": "class KeepAliveMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n return {}\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(20108, fields, cryptoInit)\n\n def getMessageType(self):\n return 10108\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "LoginFailedMessage", "path": "Heart/Packets/Server/Authentification/LoginFailedMessage.py", "snippet": "class LoginFailedMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n self.writeInt(fields['ErrorID'])\n self.writeString(fields['FingerprintData'])\n self.writeString()\n self.writeString(fields['ContentURL'])\n self.writeString()\n self.writeString(fields['Message'])\n self.writeInt(0)\n self.writeBoolean(False)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeString()\n self.writeInt(0)\n self.writeBoolean(True)\n self.writeBoolean(True)\n self.writeString()\n self.writeVInt(0)\n self.writeString()\n self.writeBoolean(False)\n\n def decode(self):\n fields = {}\n fields[\"ErrorCode\"] = self.readInt()\n fields[\"ResourceFingerprintData\"] = self.readString()\n fields[\"RedirectDomain\"] = self.readString()\n fields[\"ContentURL\"] = self.readString()\n fields[\"UpdateURL\"] = self.readString()\n fields[\"Reason\"] = self.readString()\n fields[\"SecondsUntilMaintenanceEnd\"] = self.readInt()\n fields[\"ShowContactSupportForBan\"] = self.readBoolean()\n fields[\"CompressedFingerprintData\"] = self.readBytesWithoutLength()\n fields[\"ContentURLListCount\"] = self.readInt()\n fields[\"ContentURLList\"] = []\n for i in range(fields[\"ContentURLListCount\"]):\n fields[\"ContentURLList\"].append(self.readString())\n fields[\"KunlunAppStore\"] = self.readInt()\n fields[\"MaintenanceType\"] = self.readInt()\n fields[\"HelpshiftFaqId\"] = self.readString()\n fields[\"Tier\"] = self.readInt()\n fields[\"Unk1\"] = self.readBoolean()\n fields[\"Unk2\"] = self.readBoolean()\n fields[\"Unk3\"] = self.readString()\n fields[\"Unk4\"] = self.readVInt()\n fields[\"Unk5\"] = self.readString()\n fields[\"OptionalTargetedAccountIdState\"] = self.readBoolean()\n if fields[\"OptionalTargetedAccountIdState\"] == True:\n fields[\"OptionalTargetedAccountId\"] = self.readLong()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 20103\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "LoginOkMessage", "path": "Heart/Packets/Server/Authentification/LoginOkMessage.py", "snippet": "class LoginOkMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 1\n\n def encode(self, fields, player):\n self.writeLong(player.ID[0], player.ID[1])\n self.writeLong(player.ID[0], player.ID[1])\n self.writeString(player.Token)\n self.writeString()\n self.writeString()\n self.writeInt(53)\n self.writeInt(176)\n self.writeInt(1)\n self.writeString(\"dev\")\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeString()\n self.writeString()\n self.writeString()\n self.writeInt(0)\n self.writeString()\n self.writeString(\"RU\")\n self.writeString()\n self.writeInt(0)\n self.writeString()\n self.writeInt(2)\n self.writeString('https://game-assets.brawlstarsgame.com')\n self.writeString('http://a678dbc1c015a893c9fd-4e8cc3b1ad3a3c940c504815caefa967.r87.cf2.rackcdn.com')\n self.writeInt(2)\n self.writeString('https://event-assets.brawlstars.com')\n self.writeString('https://24b999e6da07674e22b0-8209975788a0f2469e68e84405ae4fcf.ssl.cf2.rackcdn.com/event-assets')\n self.writeVInt(0)\n self.writeCompressedString(b'')\n self.writeBoolean(True)\n self.writeBoolean(False)\n self.writeString()\n self.writeString()\n self.writeString()\n self.writeString('https://play.google.com/store/apps/details?id=com.supercell.brawlstars')\n self.writeString()\n self.writeBoolean(False)\n\n self.writeBoolean(False)\n if False:\n self.writeString()\n\n self.writeBoolean(False)\n if False:\n self.writeString()\n\n self.writeBoolean(False)\n if False:\n self.writeString()\n\n self.writeBoolean(False)\n if False:\n self.writeString()\n\n\n def decode(self):\n fields = {}\n fields[\"AccountID\"] = self.readLong()\n fields[\"HomeID\"] = self.readLong()\n fields[\"PassToken\"] = self.readString()\n fields[\"FacebookID\"] = self.readString()\n fields[\"GamecenterID\"] = self.readString()\n fields[\"ServerMajorVersion\"] = self.readInt()\n fields[\"ContentVersion\"] = self.readInt()\n fields[\"ServerBuild\"] = self.readInt()\n fields[\"ServerEnvironment\"] = self.readString()\n fields[\"SessionCount\"] = self.readInt()\n fields[\"PlayTimeSeconds\"] = self.readInt()\n fields[\"DaysSinceStartedPlaying\"] = self.readInt()\n fields[\"FacebookAppID\"] = self.readString()\n fields[\"ServerTime\"] = self.readString()\n fields[\"AccountCreatedDate\"] = self.readString()\n fields[\"StartupCooldownSeconds\"] = self.readInt()\n fields[\"GoogleServiceID\"] = self.readString()\n fields[\"LoginCountry\"] = self.readString()\n fields[\"KunlunID\"] = self.readString()\n fields[\"Tier\"] = self.readInt()\n fields[\"TencentID\"] = self.readString()\n\n ContentUrlCount = self.readInt()\n fields[\"GameAssetsUrls\"] = []\n for i in range(ContentUrlCount):\n fields[\"GameAssetsUrls\"].append(self.readString())\n\n EventUrlCount = self.readInt()\n fields[\"EventAssetsUrls\"] = []\n for i in range(EventUrlCount):\n fields[\"EventAssetsUrls\"].append(self.readString())\n\n fields[\"SecondsUntilAccountDeletion\"] = self.readVInt()\n fields[\"SupercellIDToken\"] = self.readCompressedString()\n fields[\"IsSupercellIDLogoutAllDevicesAllowed\"] = self.readBoolean()\n fields[\"isSupercellIDEligible\"] = self.readBoolean()\n fields[\"LineID\"] = self.readString()\n fields[\"SessionID\"] = self.readString()\n fields[\"KakaoID\"] = self.readString()\n fields[\"UpdateURL\"] = self.readString()\n fields[\"YoozooPayNotifyUrl\"] = self.readString()\n fields[\"UnbotifyEnabled\"] = self.readBoolean()\n\n Unknown1 = self.readBoolean()\n fields[\"Unknown1\"] = Unknown1\n if Unknown1:\n fields[\"Unknown2\"] = self.readString()\n\n Unknown3 = self.readBoolean()\n fields[\"Unknown3\"] = Unknown1\n if Unknown3:\n fields[\"Unknown4\"] = self.readString()\n\n Unknown5 = self.readBoolean()\n fields[\"Unknown5\"] = Unknown1\n if Unknown5:\n fields[\"Unknown6\"] = self.readString()\n\n Unknown7 = self.readBoolean()\n fields[\"Unknown7\"] = Unknown1\n if Unknown7:\n fields[\"Unknown8\"] = self.readString()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 20104\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "OutOfSyncMessage", "path": "Heart/Packets/Server/Authentification/OutOfSyncMessage.py", "snippet": "class OutOfSyncMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n self.writeVInt(fields[\"ServerChecksum\"])\n self.writeVInt(fields[\"ClientChecksum\"])\n self.writeVInt(fields[\"Tick\"])\n\n def decode(self):\n fields = {}\n fields[\"ServerChecksum\"] = self.readVInt()\n fields[\"ClientChecksum\"] = self.readVInt()\n fields[\"Tick\"] = self.readVInt()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24104\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "ServerHelloMessage", "path": "Heart/Packets/Server/Authentification/ServerHelloMessage.py", "snippet": "class ServerHelloMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n self.writeBytes(urandom(24), 24)\n\n def decode(self):\n fields = {}\n fields[\"Random\"] = self.readBytesWithoutLength()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 20100\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "BattleEndMessage", "path": "Heart/Packets/Server/Battle/BattleEndMessage.py", "snippet": "class BattleEndMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeLong(0, 0) # Battle UUID High\n self.writeLong(0, 0) # Battle UUID Low\n self.writeVInt(2) # Battle End Game Mode (gametype)\n self.writeVInt(fields[\"Rank\"]) # Result (Victory/Defeat/Draw/Rank Score)\n self.writeVInt(0) # Tokens Gained (Gained Keys)\n self.writeVInt(0) # Trophies Result (Metascore change)\n self.writeVInt(0) # Power Play Points Gained (Pro League Points)\n self.writeVInt(0) # Doubled Tokens (Double Keys)\n self.writeVInt(0) # Double Token Event (Double Event Keys)\n self.writeVInt(0) # Token Doubler Remaining (Double Keys Remaining)\n self.writeVInt(0) # game Lenght In Seconds\n self.writeVInt(0) # Epic Win Power Play Points Gained (op Win Points)\n self.writeVInt(0) # Championship Level Reached (CC Wins)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(True)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(-1)\n self.writeBoolean(False)\n\n self.writeVInt(fields[\"HeroesCount\"])\n for heroEntry in fields[\"Heroes\"]:\n self.writeBoolean(heroEntry[\"IsPlayer\"])\n self.writeBoolean(bool(heroEntry[\"Team\"]))\n self.writeBoolean(bool(heroEntry[\"Team\"]))\n self.writeByte(1)\n for i in range(1):\n self.writeDataReference(heroEntry[\"Brawler\"][\"ID\"][0], heroEntry[\"Brawler\"][\"ID\"][1])\n self.writeByte(1)\n for i in range(1):\n if (heroEntry[\"Brawler\"][\"SkinID\"] is None):\n self.writeVInt(0)\n else:\n self.writeDataReference(heroEntry[\"Brawler\"][\"SkinID\"][0], heroEntry[\"Brawler\"][\"SkinID\"][1])\n self.writeByte(1)\n for i in range(1):\n self.writeVInt(1250)\n self.writeByte(1)\n for i in range(1):\n self.writeVInt(11)\n self.writeByte(1)\n for i in range(1):\n self.writeVInt(0)\n\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeBoolean(heroEntry[\"IsPlayer\"])\n if heroEntry[\"IsPlayer\"]:\n self.writeLong(player.ID[0], player.ID[1])\n self.writeString(heroEntry[\"PlayerName\"])\n self.writeVInt(100)\n self.writeVInt(28000000)\n self.writeVInt(43000000)\n self.writeVInt(-2)\n if heroEntry[\"IsPlayer\"]:\n self.writeBoolean(True)\n self.writeVLong(5, 4181497)\n self.writeString('haccer club')\n self.writeDataReference(8, 16)\n else:\n self.writeBoolean(False)\n\n self.writeInt8(1)\n self.writeVInt(5978)\n self.writeInt8(1)\n self.writeVInt(0)\n\n self.writeInt16(5)\n self.writeInt16(3)\n self.writeInt(27328)\n self.writeInt(25659)\n\n self.writeDataReference(0)\n\n self.writeVInt(0)\n self.writeVInt(1)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n\n def decode(self):\n fields = {}\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 23456\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "AvailableServerCommandMessage", "path": "Heart/Packets/Server/Home/AvailableServerCommandMessage.py", "snippet": "class AvailableServerCommandMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVInt(fields[\"Command\"][\"ID\"])\n command = LogicCommandManager.createCommand(fields[\"Command\"][\"ID\"], self.messagePayload)\n self.messagePayload = command.encode(fields)\n\n def decode(self):\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24111\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "LobbyInfoMessage", "path": "Heart/Packets/Server/Home/LobbyInfoMessage.py", "snippet": "class LobbyInfoMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVInt(ClientsManager.GetCount())\n self.writeString(f\"\"\"Version: {player.ClientVersion}\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\"\"\")\n self.writeVInt(0) # count event\n self.writeVInt(0) # new timer in v51\n\n def decode(self):\n fields = {}\n fields[\"PlayerCount\"] = self.readVInt()\n fields[\"Text\"] = self.readString()\n fields[\"Unk1\"] = self.readVInt()\n super().decode(fields)\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 23457\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "OwnHomeDataMessage", "path": "Heart/Packets/Server/Home/OwnHomeDataMessage.py", "snippet": "class OwnHomeDataMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVInt(1688816070)\n self.writeVInt(1191532375)\n self.writeVInt(2023189)\n self.writeVInt(73530)\n\n self.writeVInt(player.Trophies)\n self.writeVInt(player.HighestTrophies)\n self.writeVInt(player.HighestTrophies) \n self.writeVInt(player.TrophyRoadTier)\n self.writeVInt(player.Experience)\n self.writeDataReference(28, player.Thumbnail)\n self.writeDataReference(43, player.Namecolor)\n\n self.writeVInt(26)\n for x in range(26):\n self.writeVInt(x)\n\n self.writeVInt(0)\n\n self.writeVInt(0)\n\n self.writeVInt(0)\n \n self.writeVInt(len(player.OwnedSkins))\n for x in player.OwnedSkins:\n self.writeDataReference(29, x)\n\n self.writeVInt(0)\n\n self.writeVInt(0)\n\n self.writeVInt(0)\n self.writeVInt(player.HighestTrophies)\n self.writeVInt(0)\n self.writeVInt(2)\n self.writeBoolean(True)\n self.writeVInt(0)\n self.writeVInt(115)\n self.writeVInt(335442)\n self.writeVInt(1001442)\n self.writeVInt(5778642) \n\n self.writeVInt(120)\n self.writeVInt(200)\n self.writeVInt(0)\n\n self.writeBoolean(True)\n self.writeVInt(2)\n self.writeVInt(2)\n self.writeVInt(2)\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeVInt(1) # Shop Offers\n\n self.writeVInt(1) # RewardCount\n\n self.writeVInt(38) # ItemType\n self.writeVInt(1337) # Amount\n self.writeDataReference(0) # CsvID\n self.writeVInt(0) # SkinID\n\n self.writeVInt(0) # Currency(0-Gems, 1-Gold, 3-StarpoInts)\n self.writeVInt(0) # Cost\n self.writeVInt(0) # Time\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # Daily Offer\n self.writeVInt(0) # Old price\n self.writeString('Offer') # Text\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeString(\"offer_bgr_xmas23\") # Background\n self.writeVInt(0)\n self.writeBoolean(False) # This purchase is already being processed\n self.writeVInt(0) # Type Benefit\n self.writeVInt(0) # Benefit\n self.writeString()\n self.writeBoolean(False) # One time offer\n self.writeBoolean(False) # Claimed\n self.writeDataReference(0)\n self.writeDataReference(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n \n self.writeVInt(20)\n self.writeVInt(1428)\n\n self.writeVInt(0)\n\n self.writeVInt(1)\n self.writeVInt(30)\n\n self.writeByte(1) # count brawlers selected\n self.writeDataReference(16, player.SelectedBrawlers[0]) # selected brawler\n self.writeString(player.Region) # location\n self.writeString(player.ContentCreator) # supported creator\n\n self.writeVInt(6) \n self.writeVInt(1) \n self.writeVInt(9) \n self.writeVInt(1) \n self.writeVInt(22) \n self.writeVInt(3) \n self.writeVInt(25) \n self.writeVInt(1) \n self.writeVInt(24) \n self.writeVInt(0)\n self.writeVInt(15)\n self.writeVInt(32447)\n self.writeVInt(28)\n\n\n self.writeVInt(0)\n\n self.writeVInt(1)\n for season in range(1):\n self.writeVInt(22-1)\n self.writeVInt(40000)\n self.writeBoolean(True)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(True)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeBoolean(True)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeBoolean(True)\n self.writeBoolean(True)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n\n self.writeVInt(0)\n\n self.writeBoolean(True)\n self.writeVInt(0)\n self.writeVInt(1)\n self.writeVInt(2)\n self.writeVInt(0) \n\n self.writeBoolean(True) # Vanity items\n self.writeVInt(len(player.OwnedThumbnails)+len(player.OwnedPins))\n for x in player.OwnedThumbnails:\n self.writeVInt(28)\n self.writeVInt(x)\n self.writeVInt(0)\n for x in player.OwnedPins:\n self.writeVInt(52)\n self.writeVInt(x)\n self.writeVInt(0)\n\n\n self.writeBoolean(False) # Power league season data\n\n self.writeInt(0)\n self.writeVInt(0)\n self.writeVInt(16)\n self.writeVInt(76)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeVInt(2023189)\n\n self.writeVInt(35) # event slot id\n self.writeVInt(1)\n self.writeVInt(2)\n self.writeVInt(3)\n self.writeVInt(4)\n self.writeVInt(5)\n self.writeVInt(6)\n self.writeVInt(7)\n self.writeVInt(8)\n self.writeVInt(9)\n self.writeVInt(10)\n self.writeVInt(11)\n self.writeVInt(12)\n self.writeVInt(13) \n self.writeVInt(14)\n self.writeVInt(15)\n self.writeVInt(16)\n self.writeVInt(17)\n self.writeVInt(18) \n self.writeVInt(19)\n self.writeVInt(20)\n self.writeVInt(21) \n self.writeVInt(22)\n self.writeVInt(23)\n self.writeVInt(24)\n self.writeVInt(25)\n self.writeVInt(26)\n self.writeVInt(27)\n self.writeVInt(28)\n self.writeVInt(29)\n self.writeVInt(30)\n self.writeVInt(31)\n self.writeVInt(32)\n self.writeVInt(33)\n self.writeVInt(34)\n self.writeVInt(35)\n\n self.writeVInt(1)\n\n self.writeVInt(4)\n self.writeVInt(7)\n self.writeVInt(1)\n self.writeVInt(0)\n self.writeVInt(72292)\n self.writeVInt(10) \n self.writeDataReference(15, 21) # map id\n self.writeVInt(-1)\n self.writeVInt(2)\n self.writeString(\"\")\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # MapMaker map structure array\n self.writeVInt(0)\n self.writeBoolean(False) # Power League array entry\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(-1)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(-1)\n self.writeVInt(0) \n self.writeVInt(0) \n self.writeVInt(0) \n self.writeBoolean(False) \n\n self.writeVInt(0)\n \n ByteStreamHelper.encodeIntList(self, [20, 35, 75, 140, 290, 480, 800, 1250, 1875, 2800])\n ByteStreamHelper.encodeIntList(self, [30, 80, 170, 360]) # Shop Coins Price\n ByteStreamHelper.encodeIntList(self, [300, 880, 2040, 4680]) # Shop Coins Amount\n\n self.writeVInt(0) \n\n self.writeVInt(1)\n self.writeVInt(41000086) # theme\n self.writeVInt(1)\n\n self.writeVInt(0) \n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeVInt(2)\n self.writeVInt(1)\n self.writeVInt(2)\n self.writeVInt(2)\n self.writeVInt(1)\n self.writeVInt(-1)\n self.writeVInt(2)\n self.writeVInt(1)\n self.writeVInt(4)\n\n ByteStreamHelper.encodeIntList(self, [0, 29, 79, 169, 349, 699])\n ByteStreamHelper.encodeIntList(self, [0, 160, 450, 500, 1250, 2500])\n\n self.writeLong(0, 1) # Player ID\n\n self.writeVInt(0) # Notification factory\n \n self.writeVInt(1)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0) \n self.writeVInt(0)\n self.writeBoolean(False) # Login Calendar\n self.writeVInt(0)\n self.writeBoolean(True) # Starr Road\n for i in range(7):\n self.writeVInt(0)\n\n self.writeVInt(0) # Mastery\n\n #BattleCard\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n\n self.writeVInt(0) #Brawler's BattleCards\n\n self.writeVInt(5)\n for i in range(5):\n self.writeDataReference(80, i)\n self.writeVInt(-1)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeInt(0)\n self.writeVInt(0) \n self.writeVInt(0)\n self.writeVInt(86400*24)\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeBoolean(False)\n\n # end LogicClientHome\n\n self.writeVLong(player.ID[0], player.ID[1])\n self.writeVLong(player.ID[0], player.ID[1])\n self.writeVLong(player.ID[0], player.ID[1])\n self.writeStringReference(player.Name)\n self.writeBoolean(player.Registered)\n self.writeInt(-1)\n\n self.writeVInt(17)\n unlocked_brawler = [i['CardID'] for x,i in player.OwnedBrawlers.items()]\n self.writeVInt(len(unlocked_brawler) + 2)\n for x in unlocked_brawler:\n self.writeDataReference(23, x)\n self.writeVInt(-1)\n self.writeVInt(1)\n\n self.writeDataReference(5, 8)\n self.writeVInt(-1)\n self.writeVInt(player.Coins)\n\n self.writeDataReference(5, 23)\n self.writeVInt(-1)\n self.writeVInt(player.Blings)\n\n self.writeVInt(len(player.OwnedBrawlers)) # HeroScore\n for x,i in player.OwnedBrawlers.items():\n self.writeDataReference(16, x)\n self.writeVInt(-1)\n self.writeVInt(i[\"Trophies\"])\n\n self.writeVInt(len(player.OwnedBrawlers)) # HeroHighScore\n for x,i in player.OwnedBrawlers.items():\n self.writeDataReference(16, x)\n self.writeVInt(-1)\n self.writeVInt(i[\"HighestTrophies\"])\n\n self.writeVInt(0) # Array\n\n self.writeVInt(0) # HeroPower\n \n self.writeVInt(len(player.OwnedBrawlers)) # HeroLevel\n for x,i in player.OwnedBrawlers.items():\n self.writeDataReference(16, x)\n self.writeVInt(-1)\n self.writeVInt(i[\"PowerLevel\"]-1)\n\n self.writeVInt(0) # hero star power gadget and hypercharge\n\n self.writeVInt(len(player.OwnedBrawlers)) # HeroSeenState\n for x,i in player.OwnedBrawlers.items():\n self.writeDataReference(16, x)\n self.writeVInt(-1)\n self.writeVInt(2)\n\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n\n self.writeVInt(player.Gems) # Diamonds\n self.writeVInt(player.Gems) # Free Diamonds\n self.writeVInt(10) # Player Level\n self.writeVInt(100)\n self.writeVInt(0) # CumulativePurchasedDiamonds or Avatar User Level Tier | 10000 < Level Tier = 3 | 1000 < Level Tier = 2 | 0 < Level Tier = 1\n self.writeVInt(100) # Battle Count\n self.writeVInt(10) # WinCount\n self.writeVInt(80) # LoseCount\n self.writeVInt(50) # WinLooseStreak\n self.writeVInt(20) # NpcWinCount\n self.writeVInt(0) # NpcLoseCount\n self.writeVInt(2) # TutorialState | shouldGoToFirstTutorialBattle = State == 0\n self.writeVInt(12)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeString()\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(1)\n\n def decode(self):\n fields = {}\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24101\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "KeepAliveServerMessage", "path": "Heart/Packets/Server/Socket/KeepAliveServerMessage.py", "snippet": "class KeepAliveServerMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 20108\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "PlayerProfileMessage", "path": "Heart/Packets/Server/Home/PlayerProfileMessage.py", "snippet": "class PlayerProfileMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVLong(fields[\"PlayerHighID\"], fields[\"PlayerLowID\"])\n self.writeDataReference(16,11) # \n self.writeVInt(70)\n for i in range(70):\n self.writeDataReference(16, i)\n self.writeDataReference(0)\n self.writeVInt(500) # trophies\n self.writeVInt(1250) # highestTrophies\n self.writeVInt(11) #power level\n \n self.writeVInt(18)\n\n self.writeVInt(1) \n self.writeVInt(1) # 3v3 victories\n\n self.writeVInt(2)\n self.writeVInt(528859) # total exp\n\n self.writeVInt(3)\n self.writeVInt(3) # current trophies\n\n self.writeVInt(4)\n self.writeVInt(4) # highest trophies\n\n self.writeVInt(5) \n self.writeVInt(5) # unlocked brawler?\n\n self.writeVInt(8)\n self.writeVInt(6) # solo victories\n\n self.writeVInt(11) \n self.writeVInt(7) # duo victories\n\n self.writeVInt(9) \n self.writeVInt(8) # highest level robo rumble\n\n self.writeVInt(12) \n self.writeVInt(9) # highest level boss fight\n\n self.writeVInt(13)\n self.writeVInt(10) # highest power league points\n\n self.writeVInt(14)\n self.writeVInt(11) # some power league stuff\n\n self.writeVInt(15)\n self.writeVInt(12) # most challenge win\n\n self.writeVInt(16) #highest level city rampage\n self.writeVInt(13)\n\n self.writeVInt(18) #highest solo power league rank\n self.writeVInt(14)\n\n self.writeVInt(17) #highest team power league rank\n self.writeVInt(15)\n\n self.writeVInt(19) # highest Club league rank\n self.writeVInt(16)\n\n self.writeVInt(20) # number fame\n self.writeVInt(1000)\n\n self.writeVInt(21)\n self.writeVInt(502052) #v50\n\n self.writeString(player.Name) #PlayerInfo\n self.writeVInt(100)\n self.writeVInt(28000000 + player.Thumbnail)\n self.writeVInt(43000000 + player.Namecolor)\n self.writeVInt(14)\n\n self.writeBoolean(True)\n self.writeVInt(300)\n\n self.writeString(\"hello world\")\n self.writeVInt(100)\n self.writeVInt(200)\n self.writeDataReference(29, 558)\n self.writeDataReference(0)\n self.writeDataReference(0)\n self.writeDataReference(0)\n self.writeDataReference(0)\n\n self.writeBoolean(True) #alliance\n self.writeLong(0,1) #alliance ID\n self.writeString(\"haccers\") #alliance name\n self.writeDataReference(8,1) # alliance icon\n self.writeVInt(1) # type\n self.writeVInt(1) # member count\n self.writeVInt(10000) # total trophies\n self.writeVInt(1) # minimum trophies to enter\n self.writeDataReference(0)\n self.writeString(\"RU\") #location\n self.writeVInt(4) # unknown\n self.writeBoolean(True) #is Family friendly\n self.writeVInt(0)\n \n\n self.writeDataReference(25, 1) #alliance role\n self.writeVInt(16)\n\n def decode(self):\n pass\n # fields = {}\n # fields[\"PlayerCount\"] = self.readVInt()\n # fields[\"Text\"] = self.readString()\n # fields[\"Unk1\"] = self.readVInt()\n # super().decode(fields)\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24113\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "MyAllianceMessage", "path": "Heart/Packets/Server/Home/MyAllianceMessage.py", "snippet": "class MyAllianceMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVInt(1) # Online people in alliance\n self.writeBoolean(True) # isInAlliance\n self.writeDataReference(25, 4)\n self.writeLong(0, 1) # alliance ID\n self.writeString(player.ContentCreator) # alliance name\n self.writeDataReference(8, 37) # alliance icon\n self.writeVInt(3) # type\n self.writeVInt(1) # member count\n self.writeVInt(9500) # total trophies\n self.writeVInt(1) # minimum trophies to enter\n self.writeVInt(0) # 0\n self.writeString('RU') # location\n self.writeVInt(3) # unknown\n self.writeBoolean(True) # isFamilyFriendly\n self.writeVInt(0)\n\n def decode(self):\n fields = {}\n super().decode(fields)\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24399\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "AllianceDataMessage", "path": "Heart/Packets/Server/Home/AllianceDataMessage.py", "snippet": "class AllianceDataMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeBoolean(True)\n\n self.writeLong(0, 1) # alliance ID\n self.writeString(player.ContentCreator) # alliance name\n self.writeDataReference(8, 37) # alliance icon\n self.writeVInt(1) # type\n self.writeVInt(1) # member count\n self.writeVInt(player.Trophies) # total trophies\n self.writeVInt(0) # minimum trophies to enter\n self.writeVInt(0) # 0\n self.writeString('RU') # location\n self.writeVInt(1) # people online\n self.writeBoolean(True) # isFamilyFriendly\n self.writeVInt(0)\n\n self.writeString(\"this is the hacciest club in the world\")\n\n self.writeVInt(1) # member count\n self.writeLong(player.ID[0], player.ID[1]) # player ID\n self.writeVInt(2) # role\n self.writeVInt(player.Trophies) # trophies\n self.writeVInt(0) # status: 0=offline 2=online\n self.writeVInt(1) # last connected time seconds ?\n highestPowerLeagueRank = 2\n self.writeVInt(highestPowerLeagueRank)\n if highestPowerLeagueRank != 0:\n self.writeVInt(2) #solo\n self.writeVInt(1) #duo\n self.writeBoolean(False) # boolean always false?\n\n self.writeString(player.Name) # player name\n self.writeVInt(100) # VInt always 100\n self.writeVInt(28000000 + player.Thumbnail) # thumbnail\n self.writeVInt(43000000 + player.Namecolor) # name color\n self.writeVInt(46000000 + player.Namecolor)\n\n self.writeVInt(-1) # most people have it -1 but some with something\n self.writeBoolean(False) # whats this ? only 2/30 people have it true in my club\n week = 58 # week 58 of club league as of 2023/07/05, this number is 0 if you just arrived in the club\n self.writeVInt(week)\n if week != 0: # club league week number?\n self.writeVInt(3) # day\n self.writeVInt(18) # total club trophies earned\n self.writeVInt(0) # event day club trophies earned\n self.writeVInt(8) # total tickets used\n self.writeVInt(0) # event day tickets used\n self.writeVInt(6) # event day max tickets\n self.writeVInt(6) # event day tickets left\n self.writeVInt(0) # event day player ranking\n self.writeBoolean(True) # everyone have it to true\n self.writeVInt(200) # player experience lvl but why tf it doesn't show for some people\n\n def decode(self):\n fields = {}\n super().decode(fields)\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24301\n\n def getMessageVersion(self):\n return self.messageVersion" } ]
from Heart.Packets.Client.Authentification.ClientHelloMessage import ClientHelloMessage from Heart.Packets.Client.Authentification.LoginMessage import LoginMessage from Heart.Packets.Client.Battle.AskForBattleEndMessage import AskForBattleEndMessage from Heart.Packets.Client.Home.ChangeAvatarNameMessage import ChangeAvatarNameMessage from Heart.Packets.Client.Home.EndClientTurnMessage import EndClientTurnMessage from Heart.Packets.Client.Home.GoHomeFromOfflinePractiseMessage import GoHomeFromOfflinePractiseMessage from Heart.Packets.Client.Home.GoHomeMessage import GoHomeMessage from Heart.Packets.Client.Home.GetPlayerProfileMessage import GetPlayerProfileMessage from Heart.Packets.Client.Home.AskForAllianceDataMessage import AskForAllianceDataMessage from Heart.Packets.Client.Socket.KeepAliveMessage import KeepAliveMessage from Heart.Packets.Server.Authentification.LoginFailedMessage import LoginFailedMessage from Heart.Packets.Server.Authentification.LoginOkMessage import LoginOkMessage from Heart.Packets.Server.Authentification.OutOfSyncMessage import OutOfSyncMessage from Heart.Packets.Server.Authentification.ServerHelloMessage import ServerHelloMessage from Heart.Packets.Server.Battle.BattleEndMessage import BattleEndMessage from Heart.Packets.Server.Home.AvailableServerCommandMessage import AvailableServerCommandMessage from Heart.Packets.Server.Home.LobbyInfoMessage import LobbyInfoMessage from Heart.Packets.Server.Home.OwnHomeDataMessage import OwnHomeDataMessage from Heart.Packets.Server.Socket.KeepAliveServerMessage import KeepAliveServerMessage from Heart.Packets.Server.Home.PlayerProfileMessage import PlayerProfileMessage from Heart.Packets.Server.Home.MyAllianceMessage import MyAllianceMessage from Heart.Packets.Server.Home.AllianceDataMessage import AllianceDataMessage
16,026
12111: 'SignoffPlayerMapMessage', 12125: 'ReportPlayerMapMessage', 12152: 'RankedMatchBanHeroMessage', 12155: 'RankedMatchPickHeroMessage', 12157: 'RankedMatchUpdateHeroDataMessage', 12905: 'GetCurrentBattleReplayDataMessage', 12998: 'SetCountryMessage', 13922: 'AcceptTokenFriendMessage', 14101: GoHomeMessage, 14102: EndClientTurnMessage, 14103: 'StartGameMessage', 14104: 'StartSpectateMessage', 14105: 'HomeLogicStoppedMessage', 14106: 'CancelMatchmakingMessage', 14107: 'StopSpectateMessage', 14108: 'GoHomeFromSpectateMessage', #14109: GoHomeFromOfflinePractiseMessage, //before v50 14110: AskForBattleEndMessage, #14113: GetPlayerProfileMessage, //before v50 14114: 'GetBattleLogMessage', 14115: 'BattleLogViewReplayMessage', 14116: 'ViewReplayByStringMessage', 14117: 'RequestMatchCancelMessage', 14118: 'SinglePlayerMatchRequestMessage', 14166: 'ChronosEventSeenMessage', 14167: 'ChronosEventSeenMessage', 14177: 'PlayAgainMessage', 14178: 'DebugCommandMessage', 14199: 'LookForGameRoomRequestMessage', 14211: 'UnbindFacebookAccountMessage', 14201: 'BindFacebookAccountMessage', 14202: 'BindKakaoAccountMessage', 14203: 'BingLineAccountMessage', 14212: 'BindGamecenterAccountMessage', 14213: 'UnbindKakaoAccountMessage', 14214: 'UnbindLineAccountMessage', 14262: 'BindGoogleServiceAccountMessage', 14266: 'BindTencentAccountMessage', 14268: 'TencentCheckCanPayMessage', 14276: 'TencentAntiAddictionInstructionExecutedMessage', 14277: 'GetSeasonRewardsMessage', 14299: 'SetAllianceCountryMessage', 14301: 'CreateAllianceMessage', 14302: AskForAllianceDataMessage, 14303: 'AskForJoinableAlliancesListMessage', 14304: 'AskForAllianceStreamMessage', 14305: 'JoinAllianceMessage', 14306: 'ChangeAllianceMemberRoleMessage', 14307: 'KickAllianceMemberMessage', 14308: 'LeaveAllianceMessage', 14315: 'ChatToAllianceStreamMessage', 14316: 'ChangeAllianceSettingsMessage', 14317: 'RequestJoinAllianceMessage', 14321: 'RespondToAllianceJoinRequestMessage', 14322: 'SendAllianceInvitationMessage', 14323: 'JoinAllianceUsingInvitationMessage', 14324: 'SearchAlliancesMessage', 14326: 'SendAllianceInvitationToFriendMessage', 14330: 'SendAllianceMailMessage', 14350: 'TeamCreateMessage', 14351: 'TeamJoinMessage', 14352: 'TeamKickMessage', 14353: 'TeamLeaveMessage', 14354: 'TeamChangeMemberSettingsMessage', 14355: 'TeamSetMemberReadyMessage', 14356: 'TeamTogglePractiseMessage', 14357: 'TeamToggleMemberSideMessage', 14358: 'TeamSpectateMessage', 14359: 'TeamChatMessage', 14360: 'TeamPostAdMessage', 14361: 'TeamMemberStatusMessage', 14362: 'TeamSetEventMessage', 14363: 'TeamSetLocationMessage', 14364: 'TeamReportChatMessage', 14365: 'TeamInviteMessage', 14366: 'PlayerStatusMessage', 14367: 'TeamClearInviteMessage', 14368: 'TeamInviteResponseMessage', 14369: 'TeamPremadeChatMessage', 14370: 'TeamAllianceMemberInviteMessage', 14371: 'TeamJoinOrCreateGameRoomMessage', 14372: 'TeamToggleSettingsMessage', 14373: 'TeamBotSlotDisableMessage', 14403: 'GetLeaderboardMessage', 14405: 'AskForAvatarStreamMessage', 14406: 'AskForBattleReplayStreamMessage', 14418: 'RemoveAvatarStreamEntryMessage', 14469: 'AlliancePremadeChatMessage', 14479: 'TeamInvitationResponseMessage', 14600: 'AvatarNameCheckRequestMessage', 14700: 'ListBrawlTvChannelsMessage', 14701: 'TuneBrawlTvChannelMessage', 14715: 'SendGlobalChatLineMessage', 14777: 'SetInvitesBlockedMessage', 14778: 'SetTeamChatMutedMessage', 14867: 'SetRegionMessage', 14880: 'TeamRequestJoinCancelMessage', 14881: 'TeamRequestJoinMessage', 14882: 'TeamRequestJoinApproveMessage', 15081: GetPlayerProfileMessage, #v50 15793: 'GetTokenFriendMessage', 16000: 'LogicDeviceLinkCodeRequestMessage', 16001: 'LogicDeviceLinkMenuClosedMessage', 16002: 'LogicDeviceLinkEnterCodeMessage', 16003: 'LogicDeviceLinkConfirmYesMessage', 16939: 'AskApiTokenMessage', 17000: 'LogicAccountTransferCodeRequestMessage', 17190: 'JoinAllianceUsingTokenMessage', 17337: 'UnbotifyReportMessage', 17338: 'AdjustPackageMessage', 17750: GoHomeFromOfflinePractiseMessage, #v50 18686: 'SetSupportedCreatorMessage', 19001: 'LatencyTestResultMessage', 19002: 'UdpLatencyTestRequestMessage', 19003: 'TriggerStartLatencyTestMessage', 19004: 'RequestLatencyTestStatusMessage', 20000: 'SetEncryptionMessage', 20100: ServerHelloMessage, 20101: 'CreateAccountOkMessage', 20103: LoginFailedMessage,
class LogicLaserMessageFactory: messagesList = { 10055: 'AskPlayerJWTokenMessage', 10099: 'ClientCryptoErrorMessage', 10100: ClientHelloMessage, 10101: LoginMessage, 10102: 'LoginUsingSessionMessage', 10103: 'CreateAccountMessage', 10107: 'ClientCapabilitiesMessage', 10108: KeepAliveMessage, 10109: 'UdpCheckConnectionMessage', 10110: 'AnalyticEventMessage', 10111: 'AccountIdentifiersMessage', 10112: 'AuthenticationCheckMessage', 10113: 'SetDeviceTokenMessage', 10116: 'ResetAccountMessage', 10117: 'ReportUserMessage', 10118: 'AccountSwitchedMessage', 10119: 'ReportAllianceStreamMessage', 10121: 'UnlockAccountMessage', 10150: 'AppleBillingRequestMessage', 10151: 'GoogleBillingRequestMessage', 10152: 'TencentBillingRequestMessage', 10153: 'CafeBazaarBillingRequestMessage', 10159: 'KunlunBillingRequestMessage', 10160: 'BillingCancelledByClientMessage', 10177: 'ClientInfoMessage', 10212: ChangeAvatarNameMessage, 10309: 'GetAllianceInviteTokenMessage', 10321: 'AttributionEventMessage', 10401: 'CreateGameMessage', 10501: 'AcceptFriendMessage', 10502: 'AddFriendMessage', 10503: 'AskForAddableFriendsMessage', 10504: 'AskForFriendListMessage', 10506: 'RemoveFriendMessage', 10507: 'AddFriendByEmailMessage', 10509: 'AddFriendByAvatarNameAndCodeMessage', 10512: 'AskForPlayingGamecenterFriendsMessage', 10513: 'AskForPlayingFacebookFriendsMessage', 10514: 'AskForPlayingKakaoFriendsMessage', 10515: 'AskForPlayingTencentFriendsMessage', 10516: 'AskForPlayingLineFriendsMessage', 10517: 'AskForPlayingSupercellFriendsMessage', 10523: 'YoozooBillingRequestMessage', 10555: 'ClientInputMessage', 10576: 'SetBlockFriendRequestsMessage', 10599: 'AskForFriendSuggestionsMessage', 10636: 'SCIDBindAccountMessage', 11736: 'SCIDLogoutAllDevicesMessage', 12100: 'CreatePlayerMapMessage', 12101: 'DeletePlayerMapMessage', 12102: 'GetPlayerMapsMessage', 12103: 'UpdatePlayerMapMessage', 12104: 'SubmitPlayerMapMessage', 12105: 'PublishPlayerMapMessage', 12106: 'ChangePlayerMapNameMessage', 12107: 'EnterMapEditorMessage', 12108: 'GoHomeFromMapEditorMessage', 12110: 'TeamSetPlayerMapMessage', 12111: 'SignoffPlayerMapMessage', 12125: 'ReportPlayerMapMessage', 12152: 'RankedMatchBanHeroMessage', 12155: 'RankedMatchPickHeroMessage', 12157: 'RankedMatchUpdateHeroDataMessage', 12905: 'GetCurrentBattleReplayDataMessage', 12998: 'SetCountryMessage', 13922: 'AcceptTokenFriendMessage', 14101: GoHomeMessage, 14102: EndClientTurnMessage, 14103: 'StartGameMessage', 14104: 'StartSpectateMessage', 14105: 'HomeLogicStoppedMessage', 14106: 'CancelMatchmakingMessage', 14107: 'StopSpectateMessage', 14108: 'GoHomeFromSpectateMessage', #14109: GoHomeFromOfflinePractiseMessage, //before v50 14110: AskForBattleEndMessage, #14113: GetPlayerProfileMessage, //before v50 14114: 'GetBattleLogMessage', 14115: 'BattleLogViewReplayMessage', 14116: 'ViewReplayByStringMessage', 14117: 'RequestMatchCancelMessage', 14118: 'SinglePlayerMatchRequestMessage', 14166: 'ChronosEventSeenMessage', 14167: 'ChronosEventSeenMessage', 14177: 'PlayAgainMessage', 14178: 'DebugCommandMessage', 14199: 'LookForGameRoomRequestMessage', 14211: 'UnbindFacebookAccountMessage', 14201: 'BindFacebookAccountMessage', 14202: 'BindKakaoAccountMessage', 14203: 'BingLineAccountMessage', 14212: 'BindGamecenterAccountMessage', 14213: 'UnbindKakaoAccountMessage', 14214: 'UnbindLineAccountMessage', 14262: 'BindGoogleServiceAccountMessage', 14266: 'BindTencentAccountMessage', 14268: 'TencentCheckCanPayMessage', 14276: 'TencentAntiAddictionInstructionExecutedMessage', 14277: 'GetSeasonRewardsMessage', 14299: 'SetAllianceCountryMessage', 14301: 'CreateAllianceMessage', 14302: AskForAllianceDataMessage, 14303: 'AskForJoinableAlliancesListMessage', 14304: 'AskForAllianceStreamMessage', 14305: 'JoinAllianceMessage', 14306: 'ChangeAllianceMemberRoleMessage', 14307: 'KickAllianceMemberMessage', 14308: 'LeaveAllianceMessage', 14315: 'ChatToAllianceStreamMessage', 14316: 'ChangeAllianceSettingsMessage', 14317: 'RequestJoinAllianceMessage', 14321: 'RespondToAllianceJoinRequestMessage', 14322: 'SendAllianceInvitationMessage', 14323: 'JoinAllianceUsingInvitationMessage', 14324: 'SearchAlliancesMessage', 14326: 'SendAllianceInvitationToFriendMessage', 14330: 'SendAllianceMailMessage', 14350: 'TeamCreateMessage', 14351: 'TeamJoinMessage', 14352: 'TeamKickMessage', 14353: 'TeamLeaveMessage', 14354: 'TeamChangeMemberSettingsMessage', 14355: 'TeamSetMemberReadyMessage', 14356: 'TeamTogglePractiseMessage', 14357: 'TeamToggleMemberSideMessage', 14358: 'TeamSpectateMessage', 14359: 'TeamChatMessage', 14360: 'TeamPostAdMessage', 14361: 'TeamMemberStatusMessage', 14362: 'TeamSetEventMessage', 14363: 'TeamSetLocationMessage', 14364: 'TeamReportChatMessage', 14365: 'TeamInviteMessage', 14366: 'PlayerStatusMessage', 14367: 'TeamClearInviteMessage', 14368: 'TeamInviteResponseMessage', 14369: 'TeamPremadeChatMessage', 14370: 'TeamAllianceMemberInviteMessage', 14371: 'TeamJoinOrCreateGameRoomMessage', 14372: 'TeamToggleSettingsMessage', 14373: 'TeamBotSlotDisableMessage', 14403: 'GetLeaderboardMessage', 14405: 'AskForAvatarStreamMessage', 14406: 'AskForBattleReplayStreamMessage', 14418: 'RemoveAvatarStreamEntryMessage', 14469: 'AlliancePremadeChatMessage', 14479: 'TeamInvitationResponseMessage', 14600: 'AvatarNameCheckRequestMessage', 14700: 'ListBrawlTvChannelsMessage', 14701: 'TuneBrawlTvChannelMessage', 14715: 'SendGlobalChatLineMessage', 14777: 'SetInvitesBlockedMessage', 14778: 'SetTeamChatMutedMessage', 14867: 'SetRegionMessage', 14880: 'TeamRequestJoinCancelMessage', 14881: 'TeamRequestJoinMessage', 14882: 'TeamRequestJoinApproveMessage', 15081: GetPlayerProfileMessage, #v50 15793: 'GetTokenFriendMessage', 16000: 'LogicDeviceLinkCodeRequestMessage', 16001: 'LogicDeviceLinkMenuClosedMessage', 16002: 'LogicDeviceLinkEnterCodeMessage', 16003: 'LogicDeviceLinkConfirmYesMessage', 16939: 'AskApiTokenMessage', 17000: 'LogicAccountTransferCodeRequestMessage', 17190: 'JoinAllianceUsingTokenMessage', 17337: 'UnbotifyReportMessage', 17338: 'AdjustPackageMessage', 17750: GoHomeFromOfflinePractiseMessage, #v50 18686: 'SetSupportedCreatorMessage', 19001: 'LatencyTestResultMessage', 19002: 'UdpLatencyTestRequestMessage', 19003: 'TriggerStartLatencyTestMessage', 19004: 'RequestLatencyTestStatusMessage', 20000: 'SetEncryptionMessage', 20100: ServerHelloMessage, 20101: 'CreateAccountOkMessage', 20103: LoginFailedMessage,
20104: LoginOkMessage,
11
2023-12-14 18:57:56+00:00
24k
GXNU-ZhongLab/ODTrack
lib/train/base_functions.py
[ { "identifier": "Lasot", "path": "lib/train/dataset/lasot.py", "snippet": "class Lasot(BaseVideoDataset):\n \"\"\" LaSOT dataset.\n\n Publication:\n LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\n Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling\n CVPR, 2019\n https://arxiv.org/pdf/1809.07845.pdf\n\n Download the dataset from https://cis.temple.edu/lasot/download.html\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the lasot dataset.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\n videos with subscripts -1, -3, and -5 from each class will be used for training.\n split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\n vid_ids or split option can be used at a time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().lasot_dir if root is None else root\n super().__init__('LaSOT', root, image_loader)\n\n # Keep a list of all classes\n self.class_list = [f for f in os.listdir(self.root)]\n self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\n\n self.sequence_list = self._build_sequence_list(vid_ids, split)\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.seq_per_class = self._build_class_list()\n\n def _build_sequence_list(self, vid_ids=None, split=None):\n if split is not None:\n if vid_ids is not None:\n raise ValueError('Cannot set both split_name and vid_ids.')\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\n else:\n raise ValueError('Unknown split name.')\n # sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\n sequence_list = pandas.read_csv(file_path, header=None).squeeze(\"columns\").values.tolist()\n elif vid_ids is not None:\n sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids]\n else:\n raise ValueError('Set either split_name or vid_ids.')\n\n return sequence_list\n\n def _build_class_list(self):\n seq_per_class = {}\n for seq_id, seq_name in enumerate(self.sequence_list):\n class_name = seq_name.split('-')[0]\n if class_name in seq_per_class:\n seq_per_class[class_name].append(seq_id)\n else:\n seq_per_class[class_name] = [seq_id]\n\n return seq_per_class\n\n def get_name(self):\n return 'lasot'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values\n return torch.tensor(gt)\n\n def _read_target_visible(self, seq_path):\n # Read full occlusion and out_of_view\n occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\n out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\n\n with open(occlusion_file, 'r', newline='') as f:\n occlusion = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\n with open(out_of_view_file, 'r') as f:\n out_of_view = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\n\n target_visible = ~occlusion & ~out_of_view\n\n return target_visible\n\n def _get_sequence_path(self, seq_id):\n seq_name = self.sequence_list[seq_id]\n class_name = seq_name.split('-')[0]\n vid_id = seq_name.split('-')[1]\n\n return os.path.join(self.root, class_name, class_name + '-' + vid_id)\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = self._read_target_visible(seq_path) & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return self.image_loader(self._get_frame_path(seq_path, frame_id))\n\n def _get_class(self, seq_path):\n raw_class = seq_path.split('/')[-2]\n return raw_class\n\n def get_class_name(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n obj_class = self._get_class(seq_path)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n\n obj_class = self._get_class(seq_path)\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "Got10k", "path": "lib/train/dataset/got10k.py", "snippet": "class Got10k(BaseVideoDataset):\n \"\"\" GOT-10k dataset.\n\n Publication:\n GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild\n Lianghua Huang, Xin Zhao, and Kaiqi Huang\n arXiv:1810.11981, 2018\n https://arxiv.org/pdf/1810.11981.pdf\n\n Download dataset from http://got-10k.aitestunion.com/downloads\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split,\n not NOT the official got-10k validation split. To use the official validation split, provide that as\n the root folder instead.\n seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids'\n options can be used at the same time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().got10k_dir if root is None else root\n super().__init__('GOT10k', root, image_loader)\n\n # all folders inside the root\n self.sequence_list = self._get_sequence_list()\n\n # seq_id is the index of the folder inside the got10k root path\n if split is not None:\n if seq_ids is not None:\n raise ValueError('Cannot set both split_name and seq_ids.')\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_split.txt')\n elif split == 'val':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_val_split.txt')\n elif split == 'train_full':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_full_split.txt')\n elif split == 'vottrain':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_train_split.txt')\n elif split == 'votval':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_val_split.txt')\n else:\n raise ValueError('Unknown split name.')\n # seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist()\n seq_ids = pandas.read_csv(file_path, header=None, dtype=np.int64).squeeze(\"columns\").values.tolist()\n elif seq_ids is None:\n seq_ids = list(range(0, len(self.sequence_list)))\n\n self.sequence_list = [self.sequence_list[i] for i in seq_ids]\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.sequence_meta_info = self._load_meta_info()\n self.seq_per_class = self._build_seq_per_class()\n\n self.class_list = list(self.seq_per_class.keys())\n self.class_list.sort()\n\n def get_name(self):\n return 'got10k'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def _load_meta_info(self):\n sequence_meta_info = {s: self._read_meta(os.path.join(self.root, s)) for s in self.sequence_list}\n return sequence_meta_info\n\n def _read_meta(self, seq_path):\n try:\n with open(os.path.join(seq_path, 'meta_info.ini')) as f:\n meta_info = f.readlines()\n object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1][:-1],\n 'motion_class': meta_info[6].split(': ')[-1][:-1],\n 'major_class': meta_info[7].split(': ')[-1][:-1],\n 'root_class': meta_info[8].split(': ')[-1][:-1],\n 'motion_adverb': meta_info[9].split(': ')[-1][:-1]})\n except:\n object_meta = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return object_meta\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n\n for i, s in enumerate(self.sequence_list):\n object_class = self.sequence_meta_info[s]['object_class_name']\n if object_class in seq_per_class:\n seq_per_class[object_class].append(i)\n else:\n seq_per_class[object_class] = [i]\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _get_sequence_list(self):\n with open(os.path.join(self.root, 'list.txt')) as f:\n dir_list = list(csv.reader(f))\n dir_list = [dir_name[0] for dir_name in dir_list]\n return dir_list\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values\n return torch.tensor(gt)\n\n def _read_target_visible(self, seq_path):\n # Read full occlusion and out_of_view\n occlusion_file = os.path.join(seq_path, \"absence.label\")\n cover_file = os.path.join(seq_path, \"cover.label\")\n\n with open(occlusion_file, 'r', newline='') as f:\n occlusion = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\n with open(cover_file, 'r', newline='') as f:\n cover = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\n\n target_visible = ~occlusion & (cover>0).byte()\n\n visible_ratio = cover.float() / 8\n return target_visible, visible_ratio\n\n def _get_sequence_path(self, seq_id):\n return os.path.join(self.root, self.sequence_list[seq_id])\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible, visible_ratio = self._read_target_visible(seq_path)\n visible = visible & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return self.image_loader(self._get_frame_path(seq_path, frame_id))\n\n def get_class_name(self, seq_id):\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n return obj_meta['object_class_name']\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n return frame_list, anno_frames, obj_meta" }, { "identifier": "TrackingNet", "path": "lib/train/dataset/tracking_net.py", "snippet": "class TrackingNet(BaseVideoDataset):\n \"\"\" TrackingNet dataset.\n\n Publication:\n TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\n Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\n ECCV, 2018\n https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\n\n Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\n \"\"\"\n def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None):\n \"\"\"\n args:\n root - The path to the TrackingNet folder, containing the training sets.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\n sets (0 - 11) will be used.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().trackingnet_dir if root is None else root\n super().__init__('TrackingNet', root, image_loader)\n\n if set_ids is None:\n set_ids = [i for i in range(12)]\n\n self.set_ids = set_ids\n\n # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\n # video_name for each sequence\n self.sequence_list = list_sequences(self.root, self.set_ids)\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\n\n self.seq_to_class_map, self.seq_per_class = self._load_class_info()\n\n # we do not have the class_lists for the tracking net\n self.class_list = list(self.seq_per_class.keys())\n self.class_list.sort()\n\n def _load_class_info(self):\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt')\n\n with open(class_map_path, 'r') as f:\n seq_to_class_map = {seq_class.split('\\t')[0]: seq_class.rstrip().split('\\t')[1] for seq_class in f}\n\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = seq_to_class_map.get(seq[1], 'Unknown')\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_to_class_map, seq_per_class\n\n def get_name(self):\n return 'trackingnet'\n\n def has_class_info(self):\n return True\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n bb_anno_file = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"anno\", vid_name + \".txt\")\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False,\n low_memory=False).values\n return torch.tensor(gt)\n\n def get_sequence_info(self, seq_id):\n bbox = self._read_bb_anno(seq_id)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = valid.clone().byte()\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame(self, seq_id, frame_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n frame_path = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"frames\", vid_name, str(frame_id) + \".jpg\")\n return self.image_loader(frame_path)\n\n def _get_class(self, seq_id):\n seq_name = self.sequence_list[seq_id][1]\n return self.seq_to_class_map[seq_name]\n\n def get_class_name(self, seq_id):\n obj_class = self._get_class(seq_id)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n obj_class = self._get_class(seq_id)\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "ImagenetVID", "path": "lib/train/dataset/imagenetvid.py", "snippet": "class ImagenetVID(BaseVideoDataset):\n \"\"\" Imagenet VID dataset.\n\n Publication:\n ImageNet Large Scale Visual Recognition Challenge\n Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,\n Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei\n IJCV, 2015\n https://arxiv.org/pdf/1409.0575.pdf\n\n Download the dataset from http://image-net.org/\n \"\"\"\n def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1):\n \"\"\"\n args:\n root - path to the imagenet vid dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n min_length - Minimum allowed sequence length.\n max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets\n which cover complete image.\n \"\"\"\n root = env_settings().imagenet_dir if root is None else root\n super().__init__(\"imagenetvid\", root, image_loader)\n\n cache_file = os.path.join(root, 'cache.json')\n if os.path.isfile(cache_file):\n # If available, load the pre-processed cache file containing meta-info for each sequence\n with open(cache_file, 'r') as f:\n sequence_list_dict = json.load(f)\n\n self.sequence_list = sequence_list_dict\n else:\n # Else process the imagenet annotations and generate the cache file\n self.sequence_list = self._process_anno(root)\n\n with open(cache_file, 'w') as f:\n json.dump(self.sequence_list, f)\n\n # Filter the sequences based on min_length and max_target_area in the first frame\n self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and\n get_target_to_image_ratio(x) < max_target_area]\n\n def get_name(self):\n return 'imagenetvid'\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_sequence_info(self, seq_id):\n bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno'])\n valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0)\n visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte()\n return {'bbox': bb_anno, 'valid': valid, 'visible': visible}\n\n def _get_frame(self, sequence, frame_id):\n set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id'])\n vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id'])\n frame_number = frame_id + sequence['start_frame']\n frame_path = os.path.join(self.root, 'Data', 'VID', 'train', set_name, vid_name,\n '{:06d}.JPEG'.format(frame_number))\n return self.image_loader(frame_path)\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n sequence = self.sequence_list[seq_id]\n\n frame_list = [self._get_frame(sequence, f) for f in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n # Create anno dict\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n # added the class info to the meta info\n object_meta = OrderedDict({'object_class': sequence['class_name'],\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta\n\n def _process_anno(self, root):\n # Builds individual tracklets\n base_vid_anno_path = os.path.join(root, 'Annotations', 'VID', 'train')\n\n all_sequences = []\n for set in sorted(os.listdir(base_vid_anno_path)):\n set_id = int(set.split('_')[-1])\n for vid in sorted(os.listdir(os.path.join(base_vid_anno_path, set))):\n\n vid_id = int(vid.split('_')[-1])\n anno_files = sorted(os.listdir(os.path.join(base_vid_anno_path, set, vid)))\n\n frame1_anno = ET.parse(os.path.join(base_vid_anno_path, set, vid, anno_files[0]))\n image_size = [int(frame1_anno.find('size/width').text), int(frame1_anno.find('size/height').text)]\n\n objects = [ET.ElementTree(file=os.path.join(base_vid_anno_path, set, vid, f)).findall('object')\n for f in anno_files]\n\n tracklets = {}\n\n # Find all tracklets along with start frame\n for f_id, all_targets in enumerate(objects):\n for target in all_targets:\n tracklet_id = target.find('trackid').text\n if tracklet_id not in tracklets:\n tracklets[tracklet_id] = f_id\n\n for tracklet_id, tracklet_start in tracklets.items():\n tracklet_anno = []\n target_visible = []\n class_name_id = None\n\n for f_id in range(tracklet_start, len(objects)):\n found = False\n for target in objects[f_id]:\n if target.find('trackid').text == tracklet_id:\n if not class_name_id:\n class_name_id = target.find('name').text\n x1 = int(target.find('bndbox/xmin').text)\n y1 = int(target.find('bndbox/ymin').text)\n x2 = int(target.find('bndbox/xmax').text)\n y2 = int(target.find('bndbox/ymax').text)\n\n tracklet_anno.append([x1, y1, x2 - x1, y2 - y1])\n target_visible.append(target.find('occluded').text == '0')\n\n found = True\n break\n if not found:\n break\n\n new_sequence = {'set_id': set_id, 'vid_id': vid_id, 'class_name': class_name_id,\n 'start_frame': tracklet_start, 'anno': tracklet_anno,\n 'target_visible': target_visible, 'image_size': image_size}\n all_sequences.append(new_sequence)\n\n return all_sequences" }, { "identifier": "MSCOCOSeq", "path": "lib/train/dataset/coco_seq.py", "snippet": "class MSCOCOSeq(BaseVideoDataset):\n \"\"\" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.\n\n Publication:\n Microsoft COCO: Common Objects in Context.\n Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\n Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\n ECCV, 2014\n https://arxiv.org/pdf/1405.0312.pdf\n\n Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\n organized as follows.\n - coco_root\n - annotations\n - instances_train2014.json\n - instances_train2017.json\n - images\n - train2014\n - train2017\n\n Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split=\"train\", version=\"2014\"):\n \"\"\"\n args:\n root - path to the coco dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\n images will be used\n split - 'train' or 'val'.\n version - version of coco dataset (2014 or 2017)\n \"\"\"\n root = env_settings().coco_dir if root is None else root\n super().__init__('COCO', root, image_loader)\n\n self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version))\n self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version))\n\n # Load the COCO set.\n self.coco_set = COCO(self.anno_path)\n\n self.cats = self.coco_set.cats\n\n self.class_list = self.get_class_list()\n\n self.sequence_list = self._get_sequence_list()\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n self.seq_per_class = self._build_seq_per_class()\n\n def _get_sequence_list(self):\n ann_list = list(self.coco_set.anns.keys())\n seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n return seq_list\n\n def is_video_sequence(self):\n return False\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_name(self):\n return 'coco'\n\n def has_class_info(self):\n return True\n\n def get_class_list(self):\n class_list = []\n for cat_id in self.cats.keys():\n class_list.append(self.cats[cat_id]['name'])\n return class_list\n\n def has_segmentation_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def get_sequence_info(self, seq_id):\n anno = self._get_anno(seq_id)\n\n bbox = torch.Tensor(anno['bbox']).view(1, 4)\n\n mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\n\n '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels'''\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n\n visible = valid.clone().byte()\n\n return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n def _get_anno(self, seq_id):\n anno = self.coco_set.anns[self.sequence_list[seq_id]]\n\n return anno\n\n def _get_frames(self, seq_id):\n path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\n img = self.image_loader(os.path.join(self.img_pth, path))\n return img\n\n def get_meta_info(self, seq_id):\n try:\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n 'motion_class': None,\n 'major_class': cat_dict_current['supercategory'],\n 'root_class': None,\n 'motion_adverb': None})\n except:\n object_meta = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return object_meta\n\n\n def get_class_name(self, seq_id):\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n return cat_dict_current['name']\n\n def get_frames(self, seq_id=None, frame_ids=None, anno=None):\n # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\n # list containing these replicated images.\n frame = self._get_frames(seq_id)\n\n frame_list = [frame.copy() for _ in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[0, ...] for _ in frame_ids]\n\n object_meta = self.get_meta_info(seq_id)\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "Got10k_lmdb", "path": "lib/train/dataset/got10k_lmdb.py", "snippet": "class Got10k_lmdb(BaseVideoDataset):\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split,\n not NOT the official got-10k validation split. To use the official validation split, provide that as\n the root folder instead.\n seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids'\n options can be used at the same time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n use_lmdb - whether the dataset is stored in lmdb format\n \"\"\"\n root = env_settings().got10k_lmdb_dir if root is None else root\n super().__init__('GOT10k_lmdb', root, image_loader)\n\n # all folders inside the root\n self.sequence_list = self._get_sequence_list()\n\n # seq_id is the index of the folder inside the got10k root path\n if split is not None:\n if seq_ids is not None:\n raise ValueError('Cannot set both split_name and seq_ids.')\n train_lib_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_split.txt')\n elif split == 'val':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_val_split.txt')\n elif split == 'train_full':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_full_split.txt')\n elif split == 'vottrain':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_train_split.txt')\n elif split == 'votval':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_val_split.txt')\n else:\n raise ValueError('Unknown split name.')\n seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist()\n elif seq_ids is None:\n seq_ids = list(range(0, len(self.sequence_list)))\n\n self.sequence_list = [self.sequence_list[i] for i in seq_ids]\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.sequence_meta_info = self._load_meta_info()\n self.seq_per_class = self._build_seq_per_class()\n\n self.class_list = list(self.seq_per_class.keys())\n self.class_list.sort()\n\n def get_name(self):\n return 'got10k_lmdb'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def _load_meta_info(self):\n def _read_meta(meta_info):\n\n object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1],\n 'motion_class': meta_info[6].split(': ')[-1],\n 'major_class': meta_info[7].split(': ')[-1],\n 'root_class': meta_info[8].split(': ')[-1],\n 'motion_adverb': meta_info[9].split(': ')[-1]})\n\n return object_meta\n sequence_meta_info = {}\n for s in self.sequence_list:\n try:\n meta_str = decode_str(self.root, \"train/%s/meta_info.ini\" %s)\n sequence_meta_info[s] = _read_meta(meta_str.split('\\n'))\n except:\n sequence_meta_info[s] = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return sequence_meta_info\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n\n for i, s in enumerate(self.sequence_list):\n object_class = self.sequence_meta_info[s]['object_class_name']\n if object_class in seq_per_class:\n seq_per_class[object_class].append(i)\n else:\n seq_per_class[object_class] = [i]\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _get_sequence_list(self):\n dir_str = decode_str(self.root, 'train/list.txt')\n dir_list = dir_str.split('\\n')\n return dir_list\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt_str_list = decode_str(self.root, bb_anno_file).split('\\n')[:-1] # the last line in got10k is empty\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n gt_arr = np.array(gt_list).astype(np.float32)\n\n return torch.tensor(gt_arr)\n\n def _read_target_visible(self, seq_path):\n # full occlusion and out_of_view files\n occlusion_file = os.path.join(seq_path, \"absence.label\")\n cover_file = os.path.join(seq_path, \"cover.label\")\n # Read these files\n occ_list = list(map(int, decode_str(self.root, occlusion_file).split('\\n')[:-1])) # the last line in got10k is empty\n occlusion = torch.ByteTensor(occ_list)\n cover_list = list(map(int, decode_str(self.root, cover_file).split('\\n')[:-1])) # the last line in got10k is empty\n cover = torch.ByteTensor(cover_list)\n\n target_visible = ~occlusion & (cover>0).byte()\n\n visible_ratio = cover.float() / 8\n return target_visible, visible_ratio\n\n def _get_sequence_path(self, seq_id):\n return os.path.join(\"train\", self.sequence_list[seq_id])\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible, visible_ratio = self._read_target_visible(seq_path)\n visible = visible & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return decode_img(self.root, self._get_frame_path(seq_path, frame_id))\n\n def get_class_name(self, seq_id):\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n return obj_meta['object_class_name']\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n return frame_list, anno_frames, obj_meta" }, { "identifier": "Lasot_lmdb", "path": "lib/train/dataset/lasot_lmdb.py", "snippet": "class Lasot_lmdb(BaseVideoDataset):\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the lasot dataset.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\n videos with subscripts -1, -3, and -5 from each class will be used for training.\n split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\n vid_ids or split option can be used at a time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().lasot_lmdb_dir if root is None else root\n super().__init__('LaSOT_lmdb', root, image_loader)\n\n self.sequence_list = self._build_sequence_list(vid_ids, split)\n class_list = [seq_name.split('-')[0] for seq_name in self.sequence_list]\n self.class_list = []\n for ele in class_list:\n if ele not in self.class_list:\n self.class_list.append(ele)\n # Keep a list of all classes\n self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.seq_per_class = self._build_class_list()\n\n def _build_sequence_list(self, vid_ids=None, split=None):\n if split is not None:\n if vid_ids is not None:\n raise ValueError('Cannot set both split_name and vid_ids.')\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\n else:\n raise ValueError('Unknown split name.')\n sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\n elif vid_ids is not None:\n sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids]\n else:\n raise ValueError('Set either split_name or vid_ids.')\n\n return sequence_list\n\n def _build_class_list(self):\n seq_per_class = {}\n for seq_id, seq_name in enumerate(self.sequence_list):\n class_name = seq_name.split('-')[0]\n if class_name in seq_per_class:\n seq_per_class[class_name].append(seq_id)\n else:\n seq_per_class[class_name] = [seq_id]\n\n return seq_per_class\n\n def get_name(self):\n return 'lasot_lmdb'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt_str_list = decode_str(self.root, bb_anno_file).split('\\n')[:-1] # the last line is empty\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n gt_arr = np.array(gt_list).astype(np.float32)\n return torch.tensor(gt_arr)\n\n def _read_target_visible(self, seq_path):\n # Read full occlusion and out_of_view\n occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\n out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\n\n occ_list = list(map(int, decode_str(self.root, occlusion_file).split(',')))\n occlusion = torch.ByteTensor(occ_list)\n out_view_list = list(map(int, decode_str(self.root, out_of_view_file).split(',')))\n out_of_view = torch.ByteTensor(out_view_list)\n\n target_visible = ~occlusion & ~out_of_view\n\n return target_visible\n\n def _get_sequence_path(self, seq_id):\n seq_name = self.sequence_list[seq_id]\n class_name = seq_name.split('-')[0]\n vid_id = seq_name.split('-')[1]\n\n return os.path.join(class_name, class_name + '-' + vid_id)\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = self._read_target_visible(seq_path) & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return decode_img(self.root, self._get_frame_path(seq_path, frame_id))\n\n def _get_class(self, seq_path):\n raw_class = seq_path.split('/')[-2]\n return raw_class\n\n def get_class_name(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n obj_class = self._get_class(seq_path)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n\n obj_class = self._get_class(seq_path)\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "ImagenetVID_lmdb", "path": "lib/train/dataset/imagenetvid_lmdb.py", "snippet": "class ImagenetVID_lmdb(BaseVideoDataset):\n \"\"\" Imagenet VID dataset.\n\n Publication:\n ImageNet Large Scale Visual Recognition Challenge\n Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,\n Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei\n IJCV, 2015\n https://arxiv.org/pdf/1409.0575.pdf\n\n Download the dataset from http://image-net.org/\n \"\"\"\n def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1):\n \"\"\"\n args:\n root - path to the imagenet vid dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n min_length - Minimum allowed sequence length.\n max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets\n which cover complete image.\n \"\"\"\n root = env_settings().imagenet_dir if root is None else root\n super().__init__(\"imagenetvid_lmdb\", root, image_loader)\n\n sequence_list_dict = decode_json(root, \"cache.json\")\n self.sequence_list = sequence_list_dict\n\n # Filter the sequences based on min_length and max_target_area in the first frame\n self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and\n get_target_to_image_ratio(x) < max_target_area]\n\n def get_name(self):\n return 'imagenetvid_lmdb'\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_sequence_info(self, seq_id):\n bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno'])\n valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0)\n visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte()\n return {'bbox': bb_anno, 'valid': valid, 'visible': visible}\n\n def _get_frame(self, sequence, frame_id):\n set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id'])\n vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id'])\n frame_number = frame_id + sequence['start_frame']\n frame_path = os.path.join('Data', 'VID', 'train', set_name, vid_name,\n '{:06d}.JPEG'.format(frame_number))\n return decode_img(self.root, frame_path)\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n sequence = self.sequence_list[seq_id]\n\n frame_list = [self._get_frame(sequence, f) for f in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n # Create anno dict\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n # added the class info to the meta info\n object_meta = OrderedDict({'object_class': sequence['class_name'],\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "MSCOCOSeq_lmdb", "path": "lib/train/dataset/coco_seq_lmdb.py", "snippet": "class MSCOCOSeq_lmdb(BaseVideoDataset):\n \"\"\" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.\n\n Publication:\n Microsoft COCO: Common Objects in Context.\n Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\n Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\n ECCV, 2014\n https://arxiv.org/pdf/1405.0312.pdf\n\n Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\n organized as follows.\n - coco_root\n - annotations\n - instances_train2014.json\n - instances_train2017.json\n - images\n - train2014\n - train2017\n\n Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split=\"train\", version=\"2014\"):\n \"\"\"\n args:\n root - path to the coco dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\n images will be used\n split - 'train' or 'val'.\n version - version of coco dataset (2014 or 2017)\n \"\"\"\n root = env_settings().coco_dir if root is None else root\n super().__init__('COCO_lmdb', root, image_loader)\n self.root = root\n self.img_pth = 'images/{}{}/'.format(split, version)\n self.anno_path = 'annotations/instances_{}{}.json'.format(split, version)\n\n # Load the COCO set.\n print('loading annotations into memory...')\n tic = time.time()\n coco_json = decode_json(root, self.anno_path)\n print('Done (t={:0.2f}s)'.format(time.time() - tic))\n\n self.coco_set = COCO(coco_json)\n\n self.cats = self.coco_set.cats\n\n self.class_list = self.get_class_list()\n\n self.sequence_list = self._get_sequence_list()\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n self.seq_per_class = self._build_seq_per_class()\n\n def _get_sequence_list(self):\n ann_list = list(self.coco_set.anns.keys())\n seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n return seq_list\n\n def is_video_sequence(self):\n return False\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_name(self):\n return 'coco_lmdb'\n\n def has_class_info(self):\n return True\n\n def get_class_list(self):\n class_list = []\n for cat_id in self.cats.keys():\n class_list.append(self.cats[cat_id]['name'])\n return class_list\n\n def has_segmentation_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def get_sequence_info(self, seq_id):\n anno = self._get_anno(seq_id)\n\n bbox = torch.Tensor(anno['bbox']).view(1, 4)\n\n mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\n\n '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels'''\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n\n visible = valid.clone().byte()\n\n return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n def _get_anno(self, seq_id):\n anno = self.coco_set.anns[self.sequence_list[seq_id]]\n\n return anno\n\n def _get_frames(self, seq_id):\n path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\n # img = self.image_loader(os.path.join(self.img_pth, path))\n img = decode_img(self.root, os.path.join(self.img_pth, path))\n return img\n\n def get_meta_info(self, seq_id):\n try:\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n 'motion_class': None,\n 'major_class': cat_dict_current['supercategory'],\n 'root_class': None,\n 'motion_adverb': None})\n except:\n object_meta = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return object_meta\n\n\n def get_class_name(self, seq_id):\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n return cat_dict_current['name']\n\n def get_frames(self, seq_id=None, frame_ids=None, anno=None):\n # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\n # list containing these replicated images.\n frame = self._get_frames(seq_id)\n\n frame_list = [frame.copy() for _ in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[0, ...] for _ in frame_ids]\n\n object_meta = self.get_meta_info(seq_id)\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "TrackingNet_lmdb", "path": "lib/train/dataset/tracking_net_lmdb.py", "snippet": "class TrackingNet_lmdb(BaseVideoDataset):\n \"\"\" TrackingNet dataset.\n\n Publication:\n TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\n Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\n ECCV, 2018\n https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\n\n Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\n \"\"\"\n def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None):\n \"\"\"\n args:\n root - The path to the TrackingNet folder, containing the training sets.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\n sets (0 - 11) will be used.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().trackingnet_lmdb_dir if root is None else root\n super().__init__('TrackingNet_lmdb', root, image_loader)\n\n if set_ids is None:\n set_ids = [i for i in range(12)]\n\n self.set_ids = set_ids\n\n # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\n # video_name for each sequence\n self.sequence_list = list_sequences(self.root)\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\n\n self.seq_to_class_map, self.seq_per_class = self._load_class_info()\n\n # we do not have the class_lists for the tracking net\n self.class_list = list(self.seq_per_class.keys())\n self.class_list.sort()\n\n def _load_class_info(self):\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt')\n\n with open(class_map_path, 'r') as f:\n seq_to_class_map = {seq_class.split('\\t')[0]: seq_class.rstrip().split('\\t')[1] for seq_class in f}\n\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = seq_to_class_map.get(seq[1], 'Unknown')\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_to_class_map, seq_per_class\n\n def get_name(self):\n return 'trackingnet_lmdb'\n\n def has_class_info(self):\n return True\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n gt_str_list = decode_str(os.path.join(self.root, \"TRAIN_%d_lmdb\" % set_id),\n os.path.join(\"anno\", vid_name + \".txt\")).split('\\n')[:-1]\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n gt_arr = np.array(gt_list).astype(np.float32)\n return torch.tensor(gt_arr)\n\n def get_sequence_info(self, seq_id):\n bbox = self._read_bb_anno(seq_id)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = valid.clone().byte()\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame(self, seq_id, frame_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n return decode_img(os.path.join(self.root, \"TRAIN_%d_lmdb\" % set_id),\n os.path.join(\"frames\", vid_name, str(frame_id) + \".jpg\"))\n\n def _get_class(self, seq_id):\n seq_name = self.sequence_list[seq_id][1]\n return self.seq_to_class_map[seq_name]\n\n def get_class_name(self, seq_id):\n obj_class = self._get_class(seq_id)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n obj_class = self._get_class(seq_id)\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "sampler", "path": "lib/train/data/sampler.py", "snippet": "def no_processing(data):\n def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap,\n num_search_frames, num_template_frames=1, processing=no_processing, frame_sample_mode='causal',\n train_cls=False, pos_prob=0.5):\n def __len__(self):\n def _sample_visible_ids(self, visible, num_ids=1, min_id=None, max_id=None,\n allow_invisible=False, force_invisible=False):\n def __getitem__(self, index):\n def getitem(self):\n def getitem_cls(self):\n def get_center_box(self, H, W, ratio=1/8):\n def sample_seq_from_dataset(self, dataset, is_video_dataset):\n def get_one_search(self):\n def get_frame_ids_trident(self, visible):\n def get_frame_ids_stark(self, visible, valid):\nclass TrackingSampler(torch.utils.data.Dataset):\n H, W, _ = template_frames[0].shape\n H, W, _ = template_frames[0].shape\n H, W, _ = search_frames[0].shape" }, { "identifier": "processing", "path": "lib/train/data/processing.py", "snippet": "def stack_tensors(x):\n def __init__(self, transform=transforms.ToTensor(), template_transform=None, search_transform=None, joint_transform=None):\n def __call__(self, data: TensorDict):\n def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor,\n mode='pair', settings=None, *args, **kwargs):\n def _get_jittered_box(self, box, mode):\n def __call__(self, data: TensorDict):\nclass BaseProcessing:\nclass STARKProcessing(BaseProcessing):" }, { "identifier": "LTRLoader", "path": "lib/train/data/loader.py", "snippet": "class LTRLoader(torch.utils.data.dataloader.DataLoader):\n \"\"\"\n Data loader. Combines a dataset and a sampler, and provides\n single- or multi-process iterators over the dataset.\n\n Note: The only difference with default pytorch DataLoader is that an additional option stack_dim is available to\n select along which dimension the data should be stacked to form a batch.\n\n Arguments:\n dataset (Dataset): dataset from which to load the data.\n batch_size (int, optional): how many samples per batch to load\n (default: 1).\n shuffle (bool, optional): set to ``True`` to have the data reshuffled\n at every epoch (default: False).\n sampler (Sampler, optional): defines the strategy to draw samples from\n the dataset. If specified, ``shuffle`` must be False.\n batch_sampler (Sampler, optional): like sampler, but returns a batch of\n indices at a time. Mutually exclusive with batch_size, shuffle,\n sampler, and drop_last.\n num_workers (int, optional): how many subprocesses to use for data\n loading. 0 means that the data will be loaded in the main process.\n (default: 0)\n collate_fn (callable, optional): merges a list of samples to form a mini-batch.\n stack_dim (int): Dimension along which to stack to form the batch. (default: 0)\n pin_memory (bool, optional): If ``True``, the data loader will copy tensors\n into CUDA pinned memory before returning them.\n drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,\n if the dataset size is not divisible by the batch size. If ``False`` and\n the size of dataset is not divisible by the batch size, then the last batch\n will be smaller. (default: False)\n timeout (numeric, optional): if positive, the timeout value for collecting a batch\n from workers. Should always be non-negative. (default: 0)\n worker_init_fn (callable, optional): If not None, this will be called on each\n worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as\n input, after seeding and before data loading. (default: None)\n\n .. note:: By default, each worker will have its PyTorch seed set to\n ``base_seed + worker_id``, where ``base_seed`` is a long generated\n by main process using its RNG. However, seeds for other libraries\n may be duplicated upon initializing workers (w.g., NumPy), causing\n each worker to return identical random numbers. (See\n :ref:`dataloader-workers-random-seed` section in FAQ.) You may\n use ``torch.initial_seed()`` to access the PyTorch seed for each\n worker in :attr:`worker_init_fn`, and use it to set other seeds\n before data loading.\n\n .. warning:: If ``spawn`` start method is used, :attr:`worker_init_fn` cannot be an\n unpicklable object, e.g., a lambda function.\n \"\"\"\n\n __initialized = False\n\n def __init__(self, name, dataset, training=True, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,\n num_workers=0, epoch_interval=1, collate_fn=None, stack_dim=0, pin_memory=False, drop_last=False,\n timeout=0, worker_init_fn=None):\n if collate_fn is None:\n if stack_dim == 0:\n collate_fn = ltr_collate\n elif stack_dim == 1:\n collate_fn = ltr_collate_stack1\n else:\n raise ValueError('Stack dim no supported. Must be 0 or 1.')\n\n super(LTRLoader, self).__init__(dataset, batch_size, shuffle, sampler, batch_sampler,\n num_workers, collate_fn, pin_memory, drop_last,\n timeout, worker_init_fn)\n\n self.name = name\n self.training = training\n self.epoch_interval = epoch_interval\n self.stack_dim = stack_dim" }, { "identifier": "opencv_loader", "path": "lib/train/data/image_loader.py", "snippet": "def opencv_loader(path):\n \"\"\" Read image using opencv's imread function and returns it in rgb format\"\"\"\n try:\n im = cv.imread(path, cv.IMREAD_COLOR)\n\n # convert to rgb and return\n return cv.cvtColor(im, cv.COLOR_BGR2RGB)\n except Exception as e:\n print('ERROR: Could not read image \"{}\"'.format(path))\n print(e)\n return None" }, { "identifier": "is_main_process", "path": "lib/utils/misc.py", "snippet": "def is_main_process():\n return get_rank() == 0" } ]
import os import torch import lib.train.data.transforms as tfm from torch.utils.data.distributed import DistributedSampler from lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet from lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb from lib.train.data import sampler, opencv_loader, processing, LTRLoader from lib.utils.misc import is_main_process
18,241
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", ] # Tracking Task if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb") datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader)) else: datasets.append(ImagenetVID(settings.env.imagenet_dir, image_loader=image_loader)) if name == "TRACKINGNET": if settings.use_lmdb: print("Building TrackingNet from lmdb") datasets.append(TrackingNet_lmdb(settings.env.trackingnet_lmdb_dir, image_loader=image_loader)) else: # raise ValueError("NOW WE CAN ONLY USE TRACKINGNET FROM LMDB")
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", ] # Tracking Task if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb") datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader)) else: datasets.append(ImagenetVID(settings.env.imagenet_dir, image_loader=image_loader)) if name == "TRACKINGNET": if settings.use_lmdb: print("Building TrackingNet from lmdb") datasets.append(TrackingNet_lmdb(settings.env.trackingnet_lmdb_dir, image_loader=image_loader)) else: # raise ValueError("NOW WE CAN ONLY USE TRACKINGNET FROM LMDB")
datasets.append(TrackingNet(settings.env.trackingnet_dir, image_loader=image_loader))
2
2023-12-10 03:57:19+00:00
24k
lumina-test/lumina
lumina/e2e_test/test_gbn.py
[ { "identifier": "get_qp_info_list", "path": "lumina/analyzer/main.py", "snippet": "def get_qp_info_list(switch_msg_snapshot):\n \"\"\" Get the list of QP info from the switch message snapshot\n\n Args:\n switch_msg_snapshot (str): The path to the switch message snapshot\n\n Returns:\n list of dict: The list of queue pair (QP) information if successful or None otherwise.\n The list of QP information is in the following format:\n [{'psn_rcv': initial packet sequence number from the receiver qp,\n 'psn_snd': initial packet sequence number from the sender qp,\n 'qpn_rcv': receiver qp number,\n 'qpn_snd': sender qp number,\n 'ip_rcv' : receiver IP\n 'ip_snd' : sender IP}]\n \"\"\"\n try:\n with open(switch_msg_snapshot, 'r') as stream:\n qp_info_list = yaml.safe_load(stream)\n except:\n logging.error(\"Read switch message snapshot %s error.\" % switch_msg_snapshot)\n return None\n\n logging.info(\"Read switch message snapshot %s.\" % switch_msg_snapshot)\n return qp_info_list" }, { "identifier": "Orchestrator", "path": "lumina/orchestrator/main.py", "snippet": "class Orchestrator:\n \"\"\" Class to manage the experiment \"\"\"\n def __init__(self, config_file):\n \"\"\" Constructor for Orchestrator class\n\n Args:\n config_file (str): path to the yaml (config) file.\n The file contains configs for switch, requester, responder, traffic, etc.\n\n Returns:\n N/A\n \"\"\"\n with open(config_file, \"r\") as stream:\n conf = yaml.safe_load(stream)\n try:\n local_workspace = conf['local-workspace']\n result_path = conf['result-path']\n switch_conf = conf['switch']\n requester_conf = conf['requester']\n responder_conf = conf['responder']\n requester_mirror_conf = conf['requester-mirror']\n responder_mirror_conf = conf['responder-mirror']\n traffic_conf = conf['traffic']\n rewrite_udp_dst_port = conf['rewrite-udp-dst-port']\n num_repeats = conf['num-repeats']\n agg_pcap_filename = conf['aggregate-pcap-filename']\n except KeyError as e:\n print(\"Config file %s has a bad yaml format (key error: %s)\" % (config_file, e))\n sys.exit(-1)\n\n switch_conf['rewrite-udp-dst-port'] = rewrite_udp_dst_port\n requester_mirror_conf['pkt-dump-conf']['rewrite-udp-dst-port'] = rewrite_udp_dst_port\n responder_mirror_conf['pkt-dump-conf']['rewrite-udp-dst-port'] = rewrite_udp_dst_port\n\n self.local_workspace = local_workspace\n self.result_path = result_path\n self.traffic_conf = traffic_conf\n self.num_repeats = num_repeats\n self.switch = switch.Switch(switch_conf)\n self.requester = host.RDMAHost(requester_conf)\n self.responder = host.RDMAHost(responder_conf)\n self.requester_mirror = host.MirrorHost(requester_mirror_conf)\n self.responder_mirror = host.MirrorHost(responder_mirror_conf)\n self.aggregate_pcap_filename = agg_pcap_filename\n\n cmd = \"mkdir -p %s\" % self.result_path\n subprocess.call(cmd, shell = True)\n\n def rm_old_files(self):\n \"\"\" Remove result files left by previous experiments \"\"\"\n old_iter_id = 0\n old_iter_result_path = os.path.join(self.result_path, str(old_iter_id))\n\n while os.path.exists(old_iter_result_path) and not os.path.isfile(old_iter_result_path):\n cmd = \"rm -rf %s\" % (old_iter_result_path)\n subprocess.call(cmd, shell=True)\n\n old_iter_id += 1\n old_iter_result_path = os.path.join(self.result_path, str(old_iter_id))\n\n def get_requester_ip_list(self):\n \"\"\" Return the list of requester IP addresses (without prefix length info) \"\"\"\n return [x.split('/')[0] for x in self.requester.conf['nic']['ip-list']]\n\n def get_responder_ip_list(self):\n \"\"\" Return the list of responder IP addresses (without prefix length info) \"\"\"\n return [x.split('/')[0] for x in self.responder.conf['nic']['ip-list']]\n\n def get_num_repeats(self):\n \"\"\" Return the number of experiment repeats \"\"\"\n return self.num_repeats\n\n def sync_and_compile(self):\n \"\"\" Syncronize and compile the code on all the hosts\n\n Returns:\n bool: True if the code is synced and compiled successfully, False otherwise\n \"\"\"\n logging.info(\"Sync and compile the code\")\n\n ## Sync and compile the switch code\n ret = self.switch.sync_and_compile(self.local_workspace,\n switch.SWITCH_PROG_DIR_NAME,\n switch.SWITCH_PROG_FILE_NAME)\n if ret == False:\n logging.error(\"Failed to sync and compile the switch code\")\n return False\n\n ## Sync and compile the traffic generator code\n rdma_verb = self.traffic_conf['rdma-verb'].strip().lower()\n if rdma_verb not in host.VALID_IB_VERB_LIST_LOWER:\n logging.error(\"Invalid RDMA verb: %s\" % rdma_verb)\n return False\n\n ret = self.requester.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=self.requester.traffic_gen_dir_name(),\n prog_file_name=self.requester.traffic_gen_client_name(rdma_verb))\n if ret == False:\n logging.error(\"Failed to sync and compile the traffic generator code on requester\")\n return False\n\n ret = self.responder.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=self.requester.traffic_gen_dir_name(),\n prog_file_name=self.requester.traffic_gen_server_name(rdma_verb))\n if ret == False:\n logging.error(\"Failed to sync and compile the traffic generator code on responder\")\n return False\n\n ret = self.requester.sync(local_workspace=self.local_workspace,\n prog_dir_name=host.DUMP_COUNTER_DIR_NAME)\n if ret == False:\n logging.error(\"Failed to sync the dump counter code on requester\")\n return False\n\n ret = self.responder.sync(local_workspace=self.local_workspace,\n prog_dir_name=host.DUMP_COUNTER_DIR_NAME)\n if ret == False:\n logging.error(\"Failed to sync the dump counter code on responder\")\n return False\n\n ## Sync and compile the packet capture code\n ret = self.requester_mirror.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=host.PKT_CAPTURE_DIR_NAME,\n prog_file_name=host.PKT_CAPTURE_FILE_NAME)\n if ret == False:\n logging.error(\"Failed to sync and compile the packet capture code on requester_mirror\")\n return False\n\n ret = self.responder_mirror.sync_and_compile(local_workspace=self.local_workspace,\n prog_dir_name=host.PKT_CAPTURE_DIR_NAME,\n prog_file_name=host.PKT_CAPTURE_FILE_NAME)\n if ret == False:\n logging.error(\"Failed to sync and compile the packet capture code on responder_mirror\")\n return False\n\n return True\n\n def generate_switch_table_config(self):\n \"\"\" Generate the switch configuration, including:\n 1. Forward table\n 2. Mirror table\n 3. ARP table\n 4. Traffic table, including the events to inject\n\n Returns:\n bool: True if the switch configuration is generated successfully, False otherwise\n \"\"\"\n requester_nic_conf = self.requester.conf['nic']\n responder_nic_conf = self.responder.conf['nic']\n requester_mirror_nic_conf = self.requester_mirror.conf['nic']\n responder_mirror_nic_conf = self.responder_mirror.conf['nic']\n\n ## Set up forward table entries\n self.switch.conf['forward-table'] = []\n try:\n for nic_conf, host_type in zip([requester_nic_conf, responder_nic_conf, \\\n requester_mirror_nic_conf, responder_mirror_nic_conf],\n ['requester', 'responder', 'requester_mirror', 'responder_mirror']):\n forward_table_entry = {'dst-mac': nic_conf['mac'],\n 'eg-port': nic_conf['switch-port'],\n 'host': host_type}\n self.switch.conf['forward-table'].append(forward_table_entry)\n except:\n logging.error(\"Failed to set forward table\")\n return False\n\n ## Set up mirror table entries, use ingress_to_egress\n try:\n requester_mirror_entry = {'direction': 'ingress_to_egress',\n 'src-port': requester_nic_conf['switch-port'],\n 'dst-port': requester_mirror_nic_conf['switch-port']}\n\n responder_mirror_entry = {'direction': 'ingress_to_egress',\n 'src-port': responder_nic_conf['switch-port'],\n 'dst-port': responder_mirror_nic_conf['switch-port']}\n self.switch.conf['mirror-table'] = [requester_mirror_entry, responder_mirror_entry]\n except:\n logging.error(\"Failed to set mirror table\")\n return False\n\n requester_mac = requester_nic_conf['mac']\n responder_mac = responder_nic_conf['mac']\n requester_ip_list = requester_nic_conf['ip-list']\n responder_ip_list = responder_nic_conf['ip-list']\n ## Set up arp table entries\n arp_entries = []\n try:\n for dst_ip_list, dst_mac in zip([requester_ip_list, responder_ip_list],\n [requester_mac, responder_mac]):\n for dst_ip_subnet in dst_ip_list:\n dst_ip = dst_ip_subnet.split('/')[0]\n arp_entries.append({'dst-ip': dst_ip, 'dst-mac': dst_mac})\n self.switch.conf['arp-table'] = arp_entries\n except:\n logging.error(\"Failed to set ARP table\")\n return False\n\n ## Generate the events of each iteration for switch config\n per_iter_event_list = self.traffic_conf['data-pkt-events']\n msg_size = self.traffic_conf['message-size']\n mtu = self.traffic_conf['mtu']\n num_msgs_per_qp = self.traffic_conf['num-msgs-per-qp']\n num_pkts_per_msg = int(math.ceil(msg_size / mtu))\n self.switch.conf['traffic'] = {}\n self.switch.conf['traffic']['num-msgs-per-qp'] = num_msgs_per_qp\n self.switch.conf['traffic']['num-pkts-per-msg'] = num_pkts_per_msg\n self.switch.conf['traffic']['data-pkt-events'] = []\n\n if per_iter_event_list is None or len(per_iter_event_list) == 0:\n ## No events at all\n return True\n\n for i in range(num_msgs_per_qp):\n for per_iter_event in per_iter_event_list:\n global_event = copy.deepcopy(per_iter_event)\n\n ## This event is applied to all the packets of the message. We need to expand it!\n if str(global_event['psn']).lower() == 'all':\n for psn in range(num_pkts_per_msg):\n global_event['psn'] = psn + i * num_pkts_per_msg\n self.switch.conf['traffic']['data-pkt-events'].append(copy.deepcopy(global_event))\n else:\n global_event['psn'] += i * num_pkts_per_msg\n self.switch.conf['traffic']['data-pkt-events'].append(copy.deepcopy(global_event))\n\n return True\n\n def ping_mesh(self):\n \"\"\" Ping all the IP addresses between requester and responder to check the connectivity\n\n Returns:\n bool: True if all the IP addresses can be pinged successfully, False otherwise\n \"\"\"\n for requester_ip_subnet in self.requester.conf['nic']['ip-list']:\n requester_ip = requester_ip_subnet.split('/')[0]\n command = \"ping \" + requester_ip + \" -c 5 -i 0.2\"\n ret_val, err_info, exit_status = self.responder.execute_command(command)\n if exit_status != 0:\n logging.error(\"Failed to ping ip \" + requester_ip)\n logging.error(\"[Command return info]: %s %s\" % (', '.join(ret_val), ', '.join(err_info)))\n return False\n\n for responder_ip_subnet in self.responder.conf['nic']['ip-list']:\n responder_ip = responder_ip_subnet.split('/')[0]\n command = \"ping \" + responder_ip + \" -c 5 -i 0.2\"\n ret_val, err_info, exit_status = self.requester.execute_command(command)\n if exit_status != 0:\n logging.error(\"Failed to ping ip \" + responder_ip)\n logging.error(\"[Command return info]: %s %s\" % (ret_val, err_info))\n return False\n\n logging.info(\"Successfully pinged all the IP addresses between requester and responder\")\n return True\n\n def generate_switch_config_file(self):\n \"\"\" Generate the switch configuration file and copy it to the switch\n\n Returns:\n bool: True if the switch configuration file is generated and copied successfully, False otherwise\n \"\"\"\n ## Get the mac address for all the hosts\n self.requester.get_mac_address()\n self.responder.get_mac_address()\n self.requester_mirror.get_mac_address()\n self.responder_mirror.get_mac_address()\n\n ## Generate config for Match-Action table in switch\n if self.generate_switch_table_config() == False:\n logging.error(\"Failed to generate switch table configuration\")\n return False\n\n ## Dump the switch configuration into a file, and copy it to the switch\n if self.switch.dump_controller_config(self.local_workspace) == False:\n logging.error(\"Failed to dump switch config\")\n return False\n\n return True\n\n def __is_valid_traffc(self):\n \"\"\" Check if the traffic configuration is valid, including:\n 1. The tx-depth should be 1 or > 1\n 2. If tx-depth > 1, then we can only inject ECN marking events\n\n Returns:\n bool: True if the traffic configuration is valid, False otherwise\n \"\"\"\n try:\n data_pkt_events = self.traffic_conf['data-pkt-events']\n tx_depth = self.traffic_conf['tx-depth']\n\n if tx_depth == 1:\n return True\n elif tx_depth <= 0:\n return False\n\n for event in data_pkt_events:\n if event['type'] != 'ecn':\n logging.error(\"Cannot inject %s event when tx depth = %d\" % (event['type'], tx_depth))\n return False\n except:\n logging.error(\"Failed to parse traffic configuration\")\n return False\n\n return True\n\n def run_experiment(self):\n \"\"\" Run the experiment\n\n Returns:\n bool: True if the experiment is completed successfully, False otherwise\n \"\"\"\n\n ## Check if traffic configuration is valid\n if self.__is_valid_traffc() == False:\n logging.error(\"Invalid traffic configuration\")\n return False\n\n ## Run switch program\n if self.switch.run_switch() == False:\n logging.error(\"Failed to run switch\")\n return False\n\n ## Sleep for 1 second to make sure control plane is listenning (for client message)\n time.sleep(1)\n\n ## Configure the servers\n if self.requester.config_traffic_gen() == False:\n logging.error(\"Failed to config RDMA requester\")\n return False\n\n if self.responder.config_traffic_gen() == False:\n logging.error(\"Failed to config RDMA responder\")\n return False\n\n if self.requester_mirror.config_packet_capture() == False:\n logging.error(\"Failed to config packet capture on requester mirror\")\n return False\n\n if self.responder_mirror.config_packet_capture() == False:\n logging.error(\"Failed to config packet capture on responder mirror\")\n return False\n\n ## Check the connectivity through pingmesh (try 5 rounds)\n num_tries = 0\n pingmesh_ret = False\n\n while num_tries < 5:\n pingmesh_ret = self.ping_mesh()\n if pingmesh_ret == True:\n break\n num_tries += 1\n time.sleep(1)\n\n if pingmesh_ret == False:\n logging.error(\"Failed to ping all the IP addresses between requester and responder\")\n return False\n\n ## Launch packet capture for both side\n ## Prerequisite: config hugepage and igb_uio if needed\n if self.requester_mirror.run_packet_capture() == False:\n logging.error(\"Failed to run packet capture on requester mirror\")\n return False\n\n if self.responder_mirror.run_packet_capture() == False:\n logging.error(\"Failed to run packet capture on responder mirror\")\n return False\n\n time.sleep(3)\n\n ## Dump the counters before running\n if self.requester.dump_counters(host.REQ_START_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on requester before running\")\n return False\n\n if self.responder.dump_counters(host.RSP_START_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on responder before running\")\n return False\n\n ## Launch RDMA server first\n run_server_ret = self.responder.run_traffic_gen_server(self.traffic_conf)\n if run_server_ret == False:\n logging.error(\"Failed to run RDMA server\")\n return False\n\n time.sleep(2)\n\n ## Launch RDMA client\n try:\n destination_ip_subnet = self.responder.conf['nic']['ip-list'][0]\n destination_ip = destination_ip_subnet.split('/')[0]\n except:\n logging.error(\"Failed to get destination IP\")\n return False\n\n run_client_ret = self.requester.run_traffic_gen_client(traffic_conf=self.traffic_conf,\n destination_ip=destination_ip,\n controller_ip=self.switch.conf['control-ip'],\n controller_listen_port=self.switch.conf['listen-port'])\n if run_client_ret == False:\n logging.error(\"Failed to run RDMA client\")\n return False\n\n if self.switch.dump_results() == False:\n logging.error(\"Failed to dump results from switch\")\n return False\n\n if self.requester.dump_counters(host.REQ_FINISH_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on requester after running\")\n return False\n\n if self.responder.dump_counters(host.RSP_FINISH_COUNTER_FILE_NAME) == False:\n logging.error(\"Failed to dump counters on responder after running\")\n return False\n\n logging.info(\"Experiment completed successfully\")\n return True\n\n def clean_up(self):\n \"\"\" Clean up the environment after the experiment\n\n Returns:\n bool: True if the clean up is completed successfully, False otherwise\n \"\"\"\n logging.info(\"Start cleaning up the environment\")\n\n if self.switch.clean_up() == False:\n logging.error(\"Failed to clean up switch\")\n return False\n\n if self.requester.clean_up() == False:\n logging.error(\"Failed to clean up requester\")\n return False\n\n if self.responder.clean_up() == False:\n logging.error(\"Failed to clean up responder\")\n return False\n\n if self.requester_mirror.clean_up() == False:\n logging.error(\"Failed to clean up requester mirror\")\n return False\n\n if self.responder_mirror.clean_up() == False:\n logging.error(\"Failed to clean up responder mirror\")\n return False\n\n return True\n\n def fetch_results(self, iter_id=0):\n \"\"\" Fetch the results of iteration 'iter_id', including:\n 1. Switch table entries and counters\n 2. Packet trace (pcap file)\n 3. Configs and end-to-end results from RDMA hosts\n\n Args:\n iter_id (int, optional): iteration ID, defaults to 0\n\n Returns:\n bool: True if the result collection is completed successfully, False otherwise\n \"\"\"\n ## Make the results dir if it does not exist\n iter_result_path = os.path.join(self.result_path, str(iter_id))\n cmd = \"mkdir -p %s\" % iter_result_path\n try:\n subprocess.call(cmd, shell=True)\n except:\n logging.error(\"Failed to create result directory %s\" % iter_result_path)\n return False\n\n if self.switch.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from switch\")\n return False\n\n if self.requester_mirror.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from requester mirror\")\n return False\n\n if self.responder_mirror.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from responder mirror\")\n return False\n\n if self.requester.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from requester\")\n return False\n\n if self.responder.fetch_results(iter_result_path) == False:\n logging.error(\"Failed to fetch results from responder\")\n return False\n\n logging.info(\"Finished fetching results for iteration %d\" % iter_id)\n return True\n\n def merge_traces(self, iter_id=0):\n iter_pcap_dir_path = os.path.join(self.result_path, str(iter_id), host.PCAP_RESULT_DIR)\n src_pcap_file_list = [os.path.join(iter_pcap_dir_path,\n self.requester_mirror.conf['pkt-dump-conf']['dump-filename']),\n os.path.join(iter_pcap_dir_path,\n self.responder_mirror.conf['pkt-dump-conf']['dump-filename'])]\n target_pcap_path = os.path.join(self.result_path,\n str(iter_id),\n host.PCAP_RESULT_DIR,\n self.aggregate_pcap_filename)\n packet_list = pcap_process.merge_pcaps(src_pcap_file_list)\n if packet_list is None:\n logging.error(\"Failed to merge pcap files for iteration %d\" % iter_id)\n return False\n\n if pcap_process.dump_pkts_to_pcap(target_pcap_path, packet_list) == False:\n logging.error(\"Failed to dump packets to pcap file %s\" % target_pcap_path)\n return False\n\n logging.info(\"Successfully merged pcap files for iteration %d\" % iter_id)\n\n def check_integrity(self, iter_id=0):\n ## Check if the collected packet trace passes integrity check\n pcap_path = os.path.join(self.result_path,\n str(iter_id),\n host.PCAP_RESULT_DIR,\n self.aggregate_pcap_filename)\n packet_list = get_packet_list(pcap_path)\n packet_list.sort(key=lambda x:x.get_switch_seqnum())\n logging.info(\"Packet trace sorted by switch sequence number.\")\n\n switch_state_snapshot = os.path.join(self.result_path,\n str(iter_id),\n switch.SWITCH_RESULT_DIR,\n switch.SWITCH_STATE_SNAPSHOT)\n port_map = {'requester': self.requester.conf['nic']['switch-port'],\n 'responder': self.responder.conf['nic']['switch-port'],\n 'requester-mirror': self.requester_mirror.conf['nic']['switch-port'],\n 'responder-mirror': self.responder_mirror.conf['nic']['switch-port']}\n switch_counter = SwitchCounter(switch_state_snapshot, port_map)\n\n integrity_checker = IntegrityCheck(packet_list=packet_list,\n switch_counter=switch_counter,\n requester_ip_list=self.get_requester_ip_list(),\n responder_ip_list=self.get_responder_ip_list())\n\n if integrity_checker.check() == True:\n logging.info(\"Integrity check passed\")\n return True\n else:\n logging.info(\"Integrity check failed\")\n return False" }, { "identifier": "SwitchCounter", "path": "lumina/analyzer/counter/switch_counter.py", "snippet": "class SwitchCounter:\n \"\"\" Class to parse switch counter files\n\n Attributes:\n _counter (dict of dict): the switch counters with the following format:\n {'requester': {'ingress': counter_value, 'egress': counter_value},\n 'responder': {'ingress': counter_value, 'egress': counter_value},\n 'requester-mirror': {'ingress': counter_value, 'egress': counter_value},\n 'responder-mirror': {'ingress': counter_value, 'egress': counter_value}}\n \"\"\"\n def __init__(self, snapshot_filename, port_map):\n \"\"\" Constructor\n\n Args:\n snapshot_filename (str): the file where switch dumps its counters\n port_map (dict): the mapping between port name and port number\n\n Returns:\n N/A\n \"\"\"\n with open(snapshot_filename, \"r\") as stream:\n conf = yaml.safe_load(stream)\n try:\n ingress_counters = conf['counter']['ingress']\n egress_counters = conf['counter']['egress']\n except:\n print(\"Bad yaml format in %s\" % snapshot_filename)\n sys.exit(-1)\n\n requester_port = port_map['requester']\n responder_port = port_map['responder']\n requester_mirror_port = port_map['requester-mirror']\n responder_mirror_port = port_map['responder-mirror']\n\n self._counter = {'requester' : {'ingress':0, 'egress': 0},\n 'responder' : {'ingress':0, 'egress': 0},\n 'requester-mirror' : {'ingress':0, 'egress': 0},\n 'responder-mirror' : {'ingress':0, 'egress': 0}}\n try:\n self._counter['requester']['ingress'] = ingress_counters[requester_port]\n self._counter['responder']['ingress'] = ingress_counters[responder_port]\n self._counter['requester-mirror']['ingress'] = ingress_counters[requester_mirror_port]\n self._counter['responder-mirror']['ingress'] = ingress_counters[responder_mirror_port]\n\n self._counter['requester']['egress'] = egress_counters[requester_port]\n self._counter['responder']['egress'] = egress_counters[responder_port]\n self._counter['requester-mirror']['egress'] = egress_counters[requester_mirror_port]\n self._counter['responder-mirror']['egress'] = egress_counters[responder_mirror_port]\n\n except:\n print(\"Port number not exist in the switch snapshot\")\n sys.exit(-1)\n\n def get_counter(self):\n \"\"\" Return the switch counters (dict of dict) \"\"\"\n return self._counter" }, { "identifier": "MLNXHostCounter", "path": "lumina/analyzer/counter/host_counter.py", "snippet": "class MLNXHostCounter(HostCounter):\n \"\"\" Class to parse MLNX host counter files \"\"\"\n def __init__(self, counter_start_filename, counter_finish_filename):\n \"\"\" Constructor\n\n Args:\n counter_start_filename (str): the file where host dumps its counters at the start phase\n counter_finish_filename (str): the file where host dumps its counters at the finish phase\n\n Returns:\n N/A\n \"\"\"\n super().__init__(counter_start_filename, counter_finish_filename)\n\n def get_port_rcv_packets(self):\n \"\"\" Return the number of received packets \"\"\"\n return self._counter['port-counters']['port_rcv_packets']\n\n def get_port_xmit_packets(self):\n \"\"\" Return the number of transmitted packets \"\"\"\n return self._counter['port-counters']['port_xmit_packets']\n\n def get_num_packet_seq_err(self):\n \"\"\" Return the number of received NAK sequence error packets \"\"\"\n return self._counter['hw-counters']['packet_seq_err']\n\n def get_num_out_of_sequence(self):\n \"\"\" Return the number of out-of-sequence packets received \"\"\"\n return self._counter['hw-counters']['out_of_sequence']\n\n def get_num_dup_requests(self):\n \"\"\" Return the number of duplicate requests \"\"\"\n return self._counter['hw-counters']['duplicate_request']\n\n def implied_nak_seq_err(self):\n \"\"\" Return the number of READ requests implying sequence errors \"\"\"\n return self._counter['hw-counters']['implied_nak_seq_err']\n\n def get_num_cnp_sent(self):\n \"\"\" Return the number of congestion notification packets sent by notification point \"\"\"\n return self._counter['hw-counters']['np_cnp_sent']\n\n def get_num_ecn_marked_packets(self):\n \"\"\" Return the number of ECN marked RoCEv2 packets received by notification point \"\"\"\n return self._counter['hw-counters']['np_ecn_marked_roce_packets']\n\n def get_num_cnp_handled(self):\n \"\"\" Return the number of congestion notification packets handled by reaction point \"\"\"\n return self._counter['hw-counters']['rp_cnp_handled']\n\n def get_num_icrc_errors(self):\n \"\"\" Return the number of RoCE packets with ICRC errors received \"\"\"\n return self._counter['hw-counters']['rx_icrc_encapsulated']\n\n def get_num_timeout_err(self):\n \"\"\" Return the number of times QP's ack timer expired for RC, XRC, DCT QPs at the sender side \"\"\"\n return self._counter['hw-counters']['local_ack_timeout_err']\n\n def get_num_discards_dict_tx(self):\n \"\"\" Return the number of TX discarded packets (dict)\"\"\"\n discards_dict_tx = {}\n for x in self._counter['ethtool-counters'].keys():\n if 'discard' in x and 'tx' in x:\n discards_dict_tx[x] = self._counter['ethtool-counters'][x]\n return discards_dict_tx\n\n def get_num_discards_dict_rx(self):\n \"\"\" Return the number of RX discarded packets (dict) \"\"\"\n discards_dict_rx = {}\n for x in self._counter['ethtool-counters'].keys():\n if 'discard' in x and 'rx' in x:\n discards_dict_rx[x] = self._counter['ethtool-counters'][x]\n return discards_dict_rx" }, { "identifier": "IntelHostCounter", "path": "lumina/analyzer/counter/host_counter.py", "snippet": "class IntelHostCounter(HostCounter):\n \"\"\" Class to parse Intel host counter files \"\"\"\n def __init__(self, counter_start_filename, counter_finish_filename):\n \"\"\" Constructor\n\n Args:\n counter_start_filename (str): the file where host dumps its counters at the start phase\n counter_finish_filename (str): the file where host dumps its counters at the finish phase\n\n Returns:\n N/A\n \"\"\"\n super().__init__(counter_start_filename, counter_finish_filename)\n\n def get_num_cnp_sent(self):\n \"\"\" Return the number of congestion notification packets sent by notification point \"\"\"\n return self._counter['hw-counters']['cnpSent']\n\n def get_num_ecn_marked_packets(self):\n \"\"\" Return the number of ECN marked RoCEv2 packets received by notification point \"\"\"\n return self._counter['hw-counters']['RxECNMrkd']\n\n def get_num_cnp_handled(self):\n \"\"\" Return the number of congestion notification packets handled by reaction point \"\"\"\n return self._counter['hw-counters']['cnpHandled']\n\n def get_num_discards_dict(self):\n \"\"\" Return the number of discarded packets (dict) \"\"\"\n discards_dict= {}\n for x in self._counter['hw-counters'].keys():\n if 'discard' in x:\n discards_dict[x] = self._counter['hw-counters'][x]\n return discards_dict" }, { "identifier": "get_packet_list", "path": "lumina/analyzer/pcap_processor/pcap_process.py", "snippet": "def get_packet_list(pcap_file):\n \"\"\" Read a pcap file and return a list of packets\n\n Args:\n pcap_file (str): The pcap file to read\n\n Returns:\n list: The list of packets if successful, empty list otherwise\n\n Raises:\n IOError: If the pcap file cannot be opened for reading\n Exception: If the pcap file cannot be read\n \"\"\"\n packet_list = []\n try:\n with open(pcap_file, 'rb') as file_read:\n pcap = dpkt.pcap.Reader(file_read)\n for packet in pcap:\n packet_list.append(roce_packet.RRoCEPacket(packet))\n except IOError:\n logging.error(\"Unable to open pcap file %s. Please check your filename.\" % pcap_file)\n raise IOError\n\n except:\n logging.error(\"Failed to read pcap file %s.\" % pcap_file)\n raise Exception\n\n logging.info(\"Successfully read %d packets from %s.\" % (len(packet_list), pcap_file))\n return packet_list" }, { "identifier": "LatencyMeasure", "path": "lumina/analyzer/measurer/latency_measure.py", "snippet": "class LatencyMeasure:\n \"\"\" Class to measure the latency between packets for some events,\n e.g., NACK latency, Retransmission latency, CNP latency\n\n Attributes:\n packet_list (list of RRoCEPacket objects): list of packets\n qp_info_list (list of dict): list of QP info with the following format:\n [{'psn_rcv': initial packet sequence number from the receiver qp,\n 'psn_snd': initial packet sequence number from the sender qp,\n 'qpn_rcv': receiver qp number,\n 'qpn_snd': sender qp number,\n 'ip_rcv' : receiver IP\n 'ip_snd' : sender IP}]\n is_read (bool): if the QPs use RDMA read verb\n \"\"\"\n def __init__(self, packet_list, qp_info_list, is_read=False):\n \"\"\" Constructor\n\n Args:\n packet_list (list of RRoCEPacket objects): list of packets\n qp_info_list (list of dict): list of QP info with the following format:\n [{'psn_rcv': initial packet sequence number from the receiver qp,\n 'psn_snd': initial packet sequence number from the sender qp,\n 'qpn_rcv': receiver qp number,\n 'qpn_snd': sender qp number,\n 'ip_rcv' : receiver IP\n 'ip_snd' : sender IP}]\n is_read (bool): if the QPs use RDMA read verb (default: False)\n\n Returns:\n N/A\n \"\"\"\n self.packet_list = packet_list\n self.qp_info_list = qp_info_list\n self.is_read = is_read\n\n def get_peer_qp_info(self, dest_qpn, dest_ip):\n \"\"\" Get the info of the peer QP (qpn, ip) of a given qp (qpn, ip)\n\n Args:\n dest_qpn (int): destination QP number\n dest_ip (str): destination IP\n\n Returns:\n int: peer QP number (None if not found)\n str: peer IP (None if not found)\n \"\"\"\n for qp_info in self.qp_info_list:\n if qp_info['qpn_snd'] == dest_qpn and qp_info['ip_snd'] == dest_ip:\n return qp_info['qpn_rcv'], qp_info['ip_rcv']\n elif qp_info['qpn_rcv'] == dest_qpn and qp_info['ip_rcv'] == dest_ip:\n return qp_info['qpn_snd'], qp_info['ip_snd']\n\n return None, None\n\n def get_bit_error_pkts(self, relative_dest_qpn=None):\n \"\"\" Get the packets marked with bit error flag\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of RRoCEPacket objects: the list of packets marked with bit error flag\n \"\"\"\n error_pkt_list = []\n\n if relative_dest_qpn != None:\n dest_qpn = self.qp_info_list[relative_dest_qpn]['qpn_rcv']\n dest_ip = self.qp_info_list[relative_dest_qpn]['ip_rcv']\n\n for packet in self.packet_list:\n if packet.is_bit_error() == False:\n continue\n\n if relative_dest_qpn == None or \\\n (packet.get_roce_dest_qp() == dest_qpn and packet.get_dst_ip() == dest_ip):\n error_pkt_list.append(packet)\n\n return error_pkt_list\n\n def get_dropped_pkts(self, relative_dest_qpn=None):\n \"\"\" Get the packets marked with drop flag\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of RRoCEPacket objects: the list of packets marked with drop flag\n \"\"\"\n dropped_pkt_list = []\n\n if relative_dest_qpn != None:\n dest_qpn = self.qp_info_list[relative_dest_qpn]['qpn_rcv']\n dest_ip = self.qp_info_list[relative_dest_qpn]['ip_rcv']\n\n for packet in self.packet_list:\n if packet.is_dropped() == False:\n continue\n\n if relative_dest_qpn == None or \\\n (packet.get_roce_dest_qp() == dest_qpn and packet.get_dst_ip() == dest_ip):\n dropped_pkt_list.append(packet)\n\n return dropped_pkt_list\n\n def get_ecn_pkts(self):\n \"\"\" Get the packets marked with ECN\n\n Returns:\n list of RRoCEPacket objects: the list of packets marked with ECN\n \"\"\"\n ecn_pkt_list = []\n\n for packet in self.packet_list:\n if packet.is_ecn():\n ecn_pkt_list.append(packet)\n\n return ecn_pkt_list\n\n def get_cnp_pkts(self):\n \"\"\" Get the congestion notification packets\n\n Returns:\n list of RRoCEPacket objects: the list of congestion notification packets\n \"\"\"\n cnp_pkt_list = []\n\n for packet in self.packet_list:\n if packet.is_cnp():\n cnp_pkt_list.append(packet)\n\n return cnp_pkt_list\n\n def get_undelivered_pkts(self, relative_dest_qpn = None):\n \"\"\" Get the undelivered packets (dropped or marked with bit error)\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of RRoCEPacket objects: the list of undelivered packets\n \"\"\"\n undelivered_pkt_list = []\n\n if relative_dest_qpn != None:\n dest_qpn = self.qp_info_list[relative_dest_qpn]['qpn_rcv']\n dest_ip = self.qp_info_list[relative_dest_qpn]['ip_rcv']\n\n for packet in self.packet_list:\n if packet.is_delivered() == True:\n continue\n\n if relative_dest_qpn == None or \\\n (packet.get_roce_dest_qp() == dest_qpn and packet.get_dst_ip() == dest_ip):\n undelivered_pkt_list.append(packet)\n\n return undelivered_pkt_list\n\n def get_nack(self, undelivered_pkt):\n \"\"\" Given an undelivered packet, return the NACK packet that triggers its retransmission.\n If there's no NACK packet found for the undelivered packet, return None.\n Note that for RDMA READ, NACK is essentially a READ request packet that triggers retransmission\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n RRoCEPacket object: the NACK packet that triggers the retransmission of the undelivered packet\n (None if not found)\n \"\"\"\n undelivered_pkt_dest_qpn = undelivered_pkt.get_roce_dest_qp()\n undelivered_pkt_dst_ip = undelivered_pkt.get_dst_ip()\n undelivered_pkt_psn = undelivered_pkt.get_roce_pkt_seq()\n undelivered_pkt_switch_seqnum = undelivered_pkt.get_switch_seqnum()\n matched_dest_qpn, matched_dst_ip = self.get_peer_qp_info(undelivered_pkt_dest_qpn, undelivered_pkt_dst_ip)\n\n if matched_dest_qpn == None or matched_dst_ip == None:\n logging.error(\"QP info of the undelivered packet not found in qp_info_list dumped by switch\")\n return None\n\n for packet in self.packet_list:\n if self.is_same_roce_data_pkt(packet, undelivered_pkt) and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n return None\n\n if ((self.is_read and packet.is_roce_read_req()) or packet.is_roce_nack()) and \\\n packet.get_dst_ip() == matched_dst_ip and \\\n packet.get_roce_dest_qp() == matched_dest_qpn and \\\n packet.get_roce_pkt_seq() == undelivered_pkt_psn and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n ## We return the first packet appears after the undelivered packet and matches the undelivered packet\n return packet\n\n return None\n\n def get_qp_first_nack_before_retrans(self, undelivered_pkt):\n \"\"\" For an undelivered packet, return the first NACK packet on its QP between it and its retransmission.\n If there's no NACK packet found before the retransmission, return None.\n Note that for RDMA READ, NACK is essentially a READ request packet\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n RRoCEPacket object: the first NACK packet on the QP between the undelivered packet and its retransmission\n (None if not found)\n \"\"\"\n undelivered_pkt_dest_qpn = undelivered_pkt.get_roce_dest_qp()\n undelivered_pkt_dst_ip = undelivered_pkt.get_dst_ip()\n undelivered_pkt_psn = undelivered_pkt.get_roce_pkt_seq()\n undelivered_pkt_switch_seqnum = undelivered_pkt.get_switch_seqnum()\n matched_dest_qpn, matched_dst_ip = self.get_peer_qp_info(undelivered_pkt_dest_qpn, undelivered_pkt_dst_ip)\n\n if matched_dest_qpn == None or matched_dst_ip == None:\n logging.error(\"QP info of the undelivered packet not found in qp_info_list dumped by switch\")\n return None\n\n for packet in self.packet_list:\n if self.is_same_roce_data_pkt(packet, undelivered_pkt) and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n return None\n\n if ((self.is_read and packet.is_roce_read_req()) or packet.is_roce_nack()) and \\\n packet.get_dst_ip() == matched_dst_ip and \\\n packet.get_roce_dest_qp() == matched_dest_qpn and \\\n packet.get_roce_pkt_seq() <= undelivered_pkt_psn and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n return packet\n\n return None\n\n def get_qp_next_delivered_pkt(self, current_pkt):\n \"\"\" For a packet, return the next delivered packet on the same QP.\n\n Args:\n current_pkt (RRoCEPacket object): the current packet\n\n Returns:\n RRoCEPacket object: the next delivered packet on the same QP (None if not found)\n \"\"\"\n switch_seqnum = current_pkt.get_switch_seqnum()\n\n for packet in self.packet_list:\n if self.is_same_qp_roce_data_pkt(packet, current_pkt) and \\\n packet.get_switch_seqnum() > switch_seqnum and \\\n packet.is_delivered():\n return packet\n\n return None\n\n def get_retransmit_pkt(self, undelivered_pkt):\n \"\"\" Given an undelivered packet, return its retransmission packet.\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n RRoCEPacket object: the retransmission packet of the undelivered packet (None if not found)\n \"\"\"\n undelivered_pkt_switch_seqnum = undelivered_pkt.get_switch_seqnum()\n\n for packet in self.packet_list:\n if self.is_same_roce_data_pkt(packet, undelivered_pkt) and \\\n packet.get_switch_seqnum() > undelivered_pkt_switch_seqnum:\n ## We return the first packet appears after the undelivered packet and matches the undelivered packet\n return packet\n\n return None\n\n def get_latency_between_pkts(self, packet_alpha, packet_beta):\n \"\"\" Return the time of packet_beta - time of packet_alpha in seconds\n\n Args:\n packet_alpha (RRoCEPacket object): the first packet\n packet_beta (RRoCEPacket object): the second packet\n\n Returns:\n float: the time difference between two packets in seconds\n \"\"\"\n return packet_beta.get_switch_timestamp() - packet_alpha.get_switch_timestamp()\n\n def is_same_roce_data_pkt(self, packet_alpha, packet_beta):\n \"\"\" Return if two packets are the same RoCE data packet (same src ip, dst ip, dest qp, and psn)\n\n Args:\n packet_alpha (RRoCEPacket object): the first packet\n packet_beta (RRoCEPacket object): the second packet\n\n Returns:\n bool: True if two packets are the same RoCE data packet, False otherwise\n \"\"\"\n return packet_alpha.get_src_ip() == packet_beta.get_src_ip() and \\\n packet_alpha.get_dst_ip() == packet_beta.get_dst_ip() and \\\n packet_alpha.get_roce_dest_qp() == packet_beta.get_roce_dest_qp() and \\\n packet_alpha.get_roce_pkt_seq() == packet_beta.get_roce_pkt_seq()\n\n def is_same_qp_roce_data_pkt(self, packet_alpha, packet_beta):\n \"\"\" Return if two packets are RoCE data packets on the same QP (same src ip, dst ip, and dest qp)\n\n Args:\n packet_alpha (RRoCEPacket object): the first packet\n packet_beta (RRoCEPacket object): the second packet\n\n Returns:\n bool: True if two packets are RoCE data packets on the same QP, False otherwise\n \"\"\"\n return packet_alpha.get_src_ip() == packet_beta.get_src_ip() and \\\n packet_alpha.get_dst_ip() == packet_beta.get_dst_ip() and \\\n packet_alpha.get_roce_dest_qp() == packet_beta.get_roce_dest_qp()\n\n def get_qp_next_delivered_pkt_latency(self, pkt):\n \"\"\" Get the latency between 'pkt' and next 'delivered' packet on the same QP\n\n Args:\n pkt (RRoCEPacket object): the packet\n\n Returns:\n float: the latency between 'pkt' and next 'delivered' packet on the same QP\n (None if not found)\n \"\"\"\n\n next_pkt = self.get_qp_next_delivered_pkt(pkt)\n if next_pkt is None:\n return None\n\n return self.get_latency_between_pkts(pkt, next_pkt)\n\n def get_nack_gen_latency(self, undelivered_pkt):\n \"\"\" For an undelivered packet, return the NACK generation latency, i.e., the duration from the detection of\n the undelivered packet to the generation of the NACK packet that triggers its retransmission.\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n float: the NACK generation latency for the undelivered packet (None if not found)\n \"\"\"\n nack_pkt = self.get_nack(undelivered_pkt)\n if nack_pkt == None:\n return None\n\n # NACK should be triggered by the next delivered packet on the same QP\n next_delivered_pkt = self.get_qp_next_delivered_pkt(undelivered_pkt)\n if self.is_same_roce_data_pkt(next_delivered_pkt, undelivered_pkt):\n # We should never reach here\n return None\n\n nack_gen_latency = self.get_latency_between_pkts(next_delivered_pkt, nack_pkt)\n return nack_gen_latency\n\n def get_nack_resp_latency(self, undelivered_pkt):\n \"\"\" For an undelivered packet, return the NACK response latency, i.e., the duration from the generation of\n the NACK packet to the retransmission of this undelivered packet.\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n float: the NACK response latency for the undelivered packet (None if not found)\n \"\"\"\n nack_pkt = self.get_nack(undelivered_pkt)\n if nack_pkt == None:\n return None\n\n retransmit_pkt = self.get_retransmit_pkt(undelivered_pkt)\n if retransmit_pkt == None:\n return None\n\n nack_resp_latency = self.get_latency_between_pkts(nack_pkt, retransmit_pkt)\n return nack_resp_latency\n\n def get_retransmit_latency(self, undelivered_pkt):\n \"\"\" For an undelivered packet, return the retransmission latency, i.e., the duration from the packet\n to its retransmission.\n\n Args:\n undelivered_pkt (RRoCEPacket object): the undelivered packet\n\n Returns:\n float: the retransmission latency for the undelivered packet (None if not found)\n \"\"\"\n retransmit_pkt = self.get_retransmit_pkt(undelivered_pkt)\n if retransmit_pkt == None:\n return None\n\n retransmit_latency = self.get_latency_between_pkts(undelivered_pkt, retransmit_pkt)\n return retransmit_latency\n\n def get_nack_gen_latency_list(self, relative_dest_qpn=None):\n \"\"\" Return a list of NACK generation latency for all undelivered packets with relative_dest_qpn\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of float: a list of NACK generation latency for all undelivered packets with relative_dest_qpn\n \"\"\"\n undelivered_pkts = self.get_undelivered_pkts(relative_dest_qpn)\n nack_latency_list = []\n\n for undelivered_pkt in undelivered_pkts:\n nack_pkt = self.get_nack(undelivered_pkt)\n if nack_pkt == None:\n nack_latency_list.append(None)\n else:\n nack_latency = self.get_latency_between_pkts(undelivered_pkt, nack_pkt)\n nack_latency_list.append(nack_latency)\n\n return nack_latency_list\n\n def get_retransmit_latency_list(self, relative_dest_qpn):\n \"\"\" Return a list of retransmission latency for all undelivered packets with relative_dest_qpn\n\n Args:\n relative_dest_qpn (int): the relative destination QP number (None if not specified)\n\n Returns:\n list of float: a list of retransmission latency for all undelivered packets with relative_dest_qpn\n \"\"\"\n undelivered_pkts = self.get_undelivered_pkts(relative_dest_qpn)\n retransmit_latency_list = []\n\n for undelivered_pkt in undelivered_pkts:\n retransmit_pkt = self.get_retransmit_pkt(undelivered_pkt)\n if retransmit_pkt == None:\n retransmit_latency_list.append(None)\n else:\n retransmit_latency = self.get_latency_between_pkts(undelivered_pkt, retransmit_pkt)\n retransmit_latency_list.append(retransmit_latency)\n\n return retransmit_latency_list" }, { "identifier": "config_stream_handler", "path": "lumina/utils/config_loggers.py", "snippet": "def config_stream_handler(logger):\n \"\"\" Configure stream handler\n\n Args:\n logger (logging.Logger): Logger object\n\n Returns:\n N/A\n \"\"\"\n logger.setLevel(logging.INFO)\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n console.setFormatter(logging.Formatter('%(name)-18s: %(levelname)-8s %(message)s'))\n logger.addHandler(console)" }, { "identifier": "config_file_handler", "path": "lumina/utils/config_loggers.py", "snippet": "def config_file_handler(logger, log_file, no_format=False):\n \"\"\" Configure file handler\n\n Args:\n logger (logging.Logger): Logger object\n log_file (str): Log file path\n no_format (bool): If True, do not format log messages (default: False)\n\n Returns:\n N/A\n \"\"\"\n logger.setLevel(logging.INFO)\n file_handler = logging.FileHandler(log_file, mode=\"w\")\n if no_format == False:\n file_handler.setFormatter(logging.Formatter('%(name)-18s: %(levelname)-8s %(message)s'))\n file_handler.setLevel(logging.INFO)\n logger.addHandler(file_handler)" }, { "identifier": "TRIGGER_OOS", "path": "lumina/analyzer/packet_parser/roce_packet.py", "snippet": "TRIGGER_OOS = 1" }, { "identifier": "TRIGGER_TIMEOUT", "path": "lumina/analyzer/packet_parser/roce_packet.py", "snippet": "TRIGGER_TIMEOUT = 2" } ]
import argparse, os, math, glob, logging, time import lumina.analyzer.checker.integrity_check as integrity_check import lumina.analyzer.checker.host_check as host_check import lumina.analyzer.checker.gbn_check as gbn_check import lumina.analyzer.checker.read_gbn_check as read_gbn_check import lumina.orchestrator.host as host import lumina.orchestrator.switch as switch from lumina.analyzer.main import get_qp_info_list from lumina.orchestrator.main import Orchestrator from lumina.analyzer.counter.switch_counter import SwitchCounter from lumina.analyzer.counter.host_counter import MLNXHostCounter, IntelHostCounter from lumina.analyzer.pcap_processor.pcap_process import get_packet_list from lumina.analyzer.measurer.latency_measure import LatencyMeasure from lumina.utils.config_loggers import config_stream_handler, config_file_handler from lumina.analyzer.packet_parser.roce_packet import TRIGGER_OOS, TRIGGER_TIMEOUT
15,816
elif orchestrator.requester.is_intel_nic(): requester_counter = IntelHostCounter(requester_counter_start, requester_counter_finish) else: logging.error("Unkown NIC Vendor for rdma requester.") requester_counter = None if orchestrator.responder.is_mlnx_nic(): responder_counter = MLNXHostCounter(responder_counter_start, responder_counter_finish) elif orchestrator.responder.is_intel_nic(): responder_counter = IntelHostCounter(responder_counter_start, responder_counter_finish) else: logging.error("Unkown NIC Vendor for rdma responder.") responder_counter = None qp_info_list = get_qp_info_list(switch_msg_snapshot) packet_list = get_packet_list(pcap_filename) packet_list.sort(key=lambda x:x.get_switch_seqnum()) result_logger.info("Packet trace sorted by switch sequence number.") ## Do integrity check to make sure there is nothing wrong with traces and counters integrity_checker = integrity_check.IntegrityCheck(packet_list=packet_list, switch_counter=switch_counter, requester_ip_list=requester_ip_list, responder_ip_list=responder_ip_list) if integrity_checker.check(): result_logger.info("Integrity check passed") else: result_logger.error("Integrity check failed") continue ## Check host counters host_counter_checker = host_check.HostCounterCheck() if host_counter_checker.check_no_packet_loss(requester_counter, responder_counter): result_logger.info("Host packet discard counter check passed") else: result_logger.error("Host packet discard counter check failed") continue rdma_verb = orchestrator.traffic_conf['rdma-verb'].lower().strip() if rdma_verb not in host.VALID_IB_VERB_LIST_LOWER: logging.error("Invalid RDMA verb: %s" % rdma_verb) continue ## RDMA READ if rdma_verb == 'read': read_gbn_checker = read_gbn_check.ReadGBNCheck(packet_list=packet_list, qp_info_list=qp_info_list, num_msgs_per_qp=num_msgs_per_qp, msg_size=msg_size, mtu=mtu) if read_gbn_checker.check_all_qps() == True: result_logger.info("READ Go-Back-N state machine check passed for all qps.") else: result_logger.error("READ Go-Back-N state machine check failed") continue gbn_counter_check = read_gbn_checker.check_counters(sender_counter=responder_counter, receiver_counter=requester_counter) if gbn_counter_check == True: result_logger.info("READ Go-Back-N counter check passed.") else: result_logger.error("READ Go-Back-N counter check failed") continue ## A mix of RDMA SEND and READ elif rdma_verb == 'send_read': num_qps_send, num_qps_read = [int(x) for x in orchestrator.traffic_conf['num-qps'].split(',')] send_qp_info_list = qp_info_list[0:num_qps_send] read_qp_info_list = qp_info_list[num_qps_send:] send_gbn_checker = gbn_check.GBNCheck(packet_list=packet_list, qp_info_list=send_qp_info_list, num_data_pkts=math.ceil(msg_size/mtu) * num_msgs_per_qp) read_gbn_checker = read_gbn_check.ReadGBNCheck(packet_list=packet_list, qp_info_list=read_qp_info_list, num_msgs_per_qp=num_msgs_per_qp, msg_size=msg_size, mtu=mtu) if send_gbn_checker.check_all_qps() == True and read_gbn_checker.check_all_qps() == True: result_logger.info("Go-Back-N state machine check passed for all qps.") else: result_logger.error("Go-Back-N state machine check failed") continue send_gbn_counter_check = send_gbn_checker.check_counters(sender_counter=requester_counter, receiver_counter=responder_counter) read_gbn_counter_check = read_gbn_checker.check_counters(sender_counter=responder_counter, receiver_counter=requester_counter) if send_gbn_counter_check == True and read_gbn_counter_check == True: result_logger.info("Go-Back-N counter check passed for all qps.") else: result_logger.error("Go-Back-N counter check failed") continue else: ## Check the traces and counters against the GBN state machine gbn_checker = gbn_check.GBNCheck(packet_list=packet_list, qp_info_list=qp_info_list, num_data_pkts=math.ceil(msg_size/mtu) * num_msgs_per_qp) if gbn_checker.check_all_qps() == True: result_logger.info("Go-Back-N state machine check passed for all qps.") else: result_logger.error("Go-Back-N state machine check failed") continue gbn_counter_check = gbn_checker.check_counters(sender_counter=requester_counter, receiver_counter=responder_counter) if gbn_counter_check == True: result_logger.info("Go-Back-N counter check passed.") else: result_logger.error("Go-Back-N counter check failed") continue ## Output the latency for undelivered packets num_qps = len(qp_info_list) for qp_index in range(num_qps): is_read = (rdma_verb == 'read') or (rdma_verb == 'send_read' and qp_index >= num_qps_send)
## All logs will be logged into file LOG_FILENAME LOG_FILENAME = "test_gbn.log" ## Results (checkers and measurements) will also be dumped into file RESULT_FILENAME RESULT_FILENAME = "result.log" ## Max # of retries for each experiment iteration MAX_NB_EXP_RETRIES = 3 def setup_root_logger(orchestrator): """ Setup the root logger for the test Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ root_logger = logging.getLogger() root_logger.handlers.clear() config_stream_handler(root_logger) config_file_handler(logger=root_logger, log_file=os.path.join(orchestrator.result_path, LOG_FILENAME), no_format=False) def run_traffic(orchestrator): """ Run the traffic and collect the results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: bool: True if the experiment is successful, False otherwise """ orchestrator.rm_old_files() if orchestrator.sync_and_compile() == False: logging.error("Failed to sync and compile the code") sys.exit(-1) logging.info("Sync and compile completed") if orchestrator.generate_switch_config_file() == False: logging.error("Failed to generate switch configuration file") sys.exit(-1) num_repeats = orchestrator.get_num_repeats() for i in range(num_repeats): logging.info("=" * 100) nb_retry = 0 iter_result = False while nb_retry < MAX_NB_EXP_RETRIES: if orchestrator.run_experiment() == False: logging.error("Iteration %d: Failed to complete experiment" % i) logging.error("Iteration %d: Rerun experiment (retry: %d)" % i, nb_retry) nb_retry += 1 orchestrator.clean_up() time.sleep(5) continue logging.info("Iteration %d: Completed experiment" % i) try: orchestrator.clean_up() orchestrator.fetch_results(i) logging.info("Iteration %d: Fetch experiment results" % i) orchestrator.merge_traces(i) logging.info("Iteration %d: Merge the pcap files" % i) except: logging.error("Iteration %d: Result collection failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue if orchestrator.check_integrity(i) == False: logging.error("Iteration %d: Integrity check failed" % (i)) logging.error("Iteration %d: Rerun experiment (retry: %d)" % (i, nb_retry)) nb_retry += 1 time.sleep(5) continue iter_result = True break if iter_result is False: logging.error("Iteration %d: Still failed after %d retries" % (i, nb_retry)) return False return True def analyze_retrans_latency(pkt, latency_measurement, is_read, logger): """ Analyze the retransmission latency breakdown for an undelivered packet Args: pkt (Packet object): The undelivered packet latency_measurement (LatencyMeasure object): A LatencyMeasure object that can compute latency breakdown is_read (bool): If we use RDMA READ in this experiment logger (logging.Logger): A logger object Returns: N/A """ # All the undelivered packets should be retransmitted in our test cases if latency_measurement.get_retransmit_pkt(pkt) == None: logger.error("\t\t No retransmit packet found for this packet") logger.error("\t\t It is possible that this undelivered packet is a redundant transmission") return retrans_latency = latency_measurement.get_retransmit_latency(pkt) if is_read == True: # For RDMA READ, we should always find a NACK READ request that triggers retransmission nack = latency_measurement.get_nack(pkt) if nack is not None: trigger = nack.get_trigger() if trigger == TRIGGER_OOS: next_delivered_pkt_delay = latency_measurement.get_qp_next_delivered_pkt_latency(pkt) nack_gen_latency = latency_measurement.get_nack_gen_latency(pkt) nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t Next delivered packet delay: %fus' % (next_delivered_pkt_delay * 1e6)) logger.info("\t\t NACK READ request generation latency: %fus" % (nack_gen_latency * 1e6)) logger.info('\t\t NACK READ request response latency: %fus' % (nack_resp_latency * 1e6)) elif trigger == TRIGGER_TIMEOUT: nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t NACK READ request response latency: %fus' % (nack_resp_latency * 1e6)) else: logger.error("\t\t NACK READ request should be triggered by either OOS or timeout") else: nack = latency_measurement.get_qp_first_nack_before_retrans(pkt) if nack is None: logger.error("\t\t Cannot find the NACK READ request to recover this lost packet") return trigger = nack.get_trigger() if trigger == TRIGGER_OOS: logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t But the NACK READ request indicates a loss (%d) before this packet (%d)" %\ (nack.get_roce_pkt_seq(), pkt.get_roce_pkt_seq())) logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) elif trigger == TRIGGER_TIMEOUT: logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t But the NACK READ request indicates a loss (%d) before this packet (%d)" %\ (nack.get_roce_pkt_seq(), pkt.get_roce_pkt_seq())) logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) else: logger.error("\t\t NACK READ request should be triggered by either OOS or timeout") else: # For other verbs, we can only find a NACK in case of out of sequence arriving packets if latency_measurement.get_nack(pkt) != None: # Out of sequence/NACK triggered retransmission next_delivered_pkt_delay = latency_measurement.get_qp_next_delivered_pkt_latency(pkt) nack_gen_latency = latency_measurement.get_nack_gen_latency(pkt) nack_resp_latency = latency_measurement.get_nack_resp_latency(pkt) logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) logger.info('\t\t Next delivered packet delay: %fus' % (next_delivered_pkt_delay * 1e6)) logger.info("\t\t NACK generation latency: %fus" % (nack_gen_latency * 1e6)) logger.info('\t\t NACK response latency: %fus' % (nack_resp_latency * 1e6)) elif latency_measurement.get_qp_first_nack_before_retrans(pkt) != None: logger.info("\t\t Out of sequence (OOS) triggered retransmission") logger.info("\t\t But the NACK indicates a loss (%d) before this packet (%d)") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) else: logger.info("\t\t Timeout triggered retransmission") logger.info("\t\t Retransmission latency: %fus" % (retrans_latency * 1e6)) def verify_results(orchestrator): """ Verify the experiment results Args: orchestrator (Orchestrator object): Orchestrator object that contains all the configurations Returns: N/A """ result_dir = orchestrator.result_path num_repeats = orchestrator.num_repeats mtu = orchestrator.traffic_conf['mtu'] msg_size = orchestrator.traffic_conf['message-size'] num_msgs_per_qp = orchestrator.traffic_conf['num-msgs-per-qp'] aggregate_pcap_filename = orchestrator.aggregate_pcap_filename port_map = {'requester': orchestrator.requester.conf['nic']['switch-port'], 'responder': orchestrator.responder.conf['nic']['switch-port'], 'requester-mirror': orchestrator.requester_mirror.conf['nic']['switch-port'], 'responder-mirror': orchestrator.responder_mirror.conf['nic']['switch-port']} requester_ip_list = orchestrator.get_requester_ip_list() responder_ip_list = orchestrator.get_responder_ip_list() for iter in range(num_repeats): iter = str(iter) result_logger = logging.getLogger('Analysis iter %s' % (iter)) result_logger.handlers.clear() config_file_handler(logger=result_logger, log_file=os.path.join(result_dir, iter, RESULT_FILENAME), no_format=True) result_logger.info("=" * 100) result_logger.info("Iteration %s" % iter) switch_msg_snapshot = os.path.join(result_dir, iter, switch.SWITCH_RESULT_DIR, switch.SWITCH_MESSAGE_SNAPSHOT) switch_state_snapshot = os.path.join(result_dir, iter, switch.SWITCH_RESULT_DIR, switch.SWITCH_STATE_SNAPSHOT) pcap_filename = os.path.join(result_dir, iter, host.PCAP_RESULT_DIR, aggregate_pcap_filename) requester_counter_start = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.REQ_START_COUNTER_FILE_NAME) requester_counter_finish = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.REQ_FINISH_COUNTER_FILE_NAME) responder_counter_start = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.RSP_START_COUNTER_FILE_NAME) responder_counter_finish = os.path.join(result_dir, iter, host.RDMA_RESULT_DIR, host.RSP_FINISH_COUNTER_FILE_NAME) switch_counter = SwitchCounter(switch_state_snapshot, port_map) if orchestrator.requester.is_mlnx_nic(): requester_counter = MLNXHostCounter(requester_counter_start, requester_counter_finish) elif orchestrator.requester.is_intel_nic(): requester_counter = IntelHostCounter(requester_counter_start, requester_counter_finish) else: logging.error("Unkown NIC Vendor for rdma requester.") requester_counter = None if orchestrator.responder.is_mlnx_nic(): responder_counter = MLNXHostCounter(responder_counter_start, responder_counter_finish) elif orchestrator.responder.is_intel_nic(): responder_counter = IntelHostCounter(responder_counter_start, responder_counter_finish) else: logging.error("Unkown NIC Vendor for rdma responder.") responder_counter = None qp_info_list = get_qp_info_list(switch_msg_snapshot) packet_list = get_packet_list(pcap_filename) packet_list.sort(key=lambda x:x.get_switch_seqnum()) result_logger.info("Packet trace sorted by switch sequence number.") ## Do integrity check to make sure there is nothing wrong with traces and counters integrity_checker = integrity_check.IntegrityCheck(packet_list=packet_list, switch_counter=switch_counter, requester_ip_list=requester_ip_list, responder_ip_list=responder_ip_list) if integrity_checker.check(): result_logger.info("Integrity check passed") else: result_logger.error("Integrity check failed") continue ## Check host counters host_counter_checker = host_check.HostCounterCheck() if host_counter_checker.check_no_packet_loss(requester_counter, responder_counter): result_logger.info("Host packet discard counter check passed") else: result_logger.error("Host packet discard counter check failed") continue rdma_verb = orchestrator.traffic_conf['rdma-verb'].lower().strip() if rdma_verb not in host.VALID_IB_VERB_LIST_LOWER: logging.error("Invalid RDMA verb: %s" % rdma_verb) continue ## RDMA READ if rdma_verb == 'read': read_gbn_checker = read_gbn_check.ReadGBNCheck(packet_list=packet_list, qp_info_list=qp_info_list, num_msgs_per_qp=num_msgs_per_qp, msg_size=msg_size, mtu=mtu) if read_gbn_checker.check_all_qps() == True: result_logger.info("READ Go-Back-N state machine check passed for all qps.") else: result_logger.error("READ Go-Back-N state machine check failed") continue gbn_counter_check = read_gbn_checker.check_counters(sender_counter=responder_counter, receiver_counter=requester_counter) if gbn_counter_check == True: result_logger.info("READ Go-Back-N counter check passed.") else: result_logger.error("READ Go-Back-N counter check failed") continue ## A mix of RDMA SEND and READ elif rdma_verb == 'send_read': num_qps_send, num_qps_read = [int(x) for x in orchestrator.traffic_conf['num-qps'].split(',')] send_qp_info_list = qp_info_list[0:num_qps_send] read_qp_info_list = qp_info_list[num_qps_send:] send_gbn_checker = gbn_check.GBNCheck(packet_list=packet_list, qp_info_list=send_qp_info_list, num_data_pkts=math.ceil(msg_size/mtu) * num_msgs_per_qp) read_gbn_checker = read_gbn_check.ReadGBNCheck(packet_list=packet_list, qp_info_list=read_qp_info_list, num_msgs_per_qp=num_msgs_per_qp, msg_size=msg_size, mtu=mtu) if send_gbn_checker.check_all_qps() == True and read_gbn_checker.check_all_qps() == True: result_logger.info("Go-Back-N state machine check passed for all qps.") else: result_logger.error("Go-Back-N state machine check failed") continue send_gbn_counter_check = send_gbn_checker.check_counters(sender_counter=requester_counter, receiver_counter=responder_counter) read_gbn_counter_check = read_gbn_checker.check_counters(sender_counter=responder_counter, receiver_counter=requester_counter) if send_gbn_counter_check == True and read_gbn_counter_check == True: result_logger.info("Go-Back-N counter check passed for all qps.") else: result_logger.error("Go-Back-N counter check failed") continue else: ## Check the traces and counters against the GBN state machine gbn_checker = gbn_check.GBNCheck(packet_list=packet_list, qp_info_list=qp_info_list, num_data_pkts=math.ceil(msg_size/mtu) * num_msgs_per_qp) if gbn_checker.check_all_qps() == True: result_logger.info("Go-Back-N state machine check passed for all qps.") else: result_logger.error("Go-Back-N state machine check failed") continue gbn_counter_check = gbn_checker.check_counters(sender_counter=requester_counter, receiver_counter=responder_counter) if gbn_counter_check == True: result_logger.info("Go-Back-N counter check passed.") else: result_logger.error("Go-Back-N counter check failed") continue ## Output the latency for undelivered packets num_qps = len(qp_info_list) for qp_index in range(num_qps): is_read = (rdma_verb == 'read') or (rdma_verb == 'send_read' and qp_index >= num_qps_send)
latency_measurement = LatencyMeasure(packet_list=packet_list,
6
2023-12-09 08:21:14+00:00
24k
ebb-earl-co/tidal-wave
tidal_wave/main.py
[ { "identifier": "login", "path": "tidal_wave/login.py", "snippet": "def login(\n audio_format: AudioFormat,\n) -> Tuple[Optional[requests.Session], Optional[AudioFormat]]:\n \"\"\"Given a selected audio_format, either log in \"automatically\"\n via the Fire TV OAuth 2.0 flow, or ask for an Android-/Windows-/MacOS-\n gleaned API token; the latter to be able to access HiRes fLaC audio.\n Returns a tuple of a requests.Session object, if no error, and the\n AudioFormat instance passed in; or (None, \"\") in the event of error.\n \"\"\"\n android_formats: Set[AudioFormat] = {\n AudioFormat.sony_360_reality_audio,\n AudioFormat.hi_res,\n }\n fire_tv_formats: Set[AudioFormat] = {\n AudioFormat.dolby_atmos,\n AudioFormat.mqa,\n AudioFormat.lossless,\n AudioFormat.high,\n AudioFormat.low,\n }\n if audio_format in fire_tv_formats:\n return (login_fire_tv(), audio_format)\n elif audio_format in android_formats:\n options: set = {\"android\", \"a\", \"windows\", \"w\"}\n _input: str = \"\"\n while _input not in options:\n _input = typer.prompt(\n \"For which of Android [a] or Windows [w] would you like to provide an API token?\"\n ).lower()\n else:\n if _input in {\"android\", \"a\"}:\n return (login_android(), audio_format)\n elif _input in {\"windows\", \"w\"}:\n return (login_windows(), audio_format)\n else:\n logger.critical(\n \"Please provide one of the following: \"\n f\"{', '.join(e.value for e in AudioFormat)}\"\n )\n return (None, \"\")" }, { "identifier": "AudioFormat", "path": "tidal_wave/login.py", "snippet": "class AudioFormat(str, Enum):\n sony_360_reality_audio = \"360\"\n dolby_atmos = \"Atmos\"\n hi_res = \"HiRes\"\n mqa = \"MQA\"\n lossless = \"Lossless\"\n high = \"High\"\n low = \"Low\"" }, { "identifier": "LogLevel", "path": "tidal_wave/login.py", "snippet": "class LogLevel(str, Enum):\n debug = \"DEBUG\" # 10\n info = \"INFO\" # 20\n warning = \"WARNING\" # 30\n error = \"ERROR\" # 40\n critical = \"CRITICAL\" # 50" }, { "identifier": "Album", "path": "tidal_wave/album.py", "snippet": "class Album:\n album_id: int\n\n def __post_init__(self):\n self.album_dir: Optional[Path] = None\n self.album_cover_saved: bool = False\n\n def get_items(self, session: Session):\n \"\"\"This method populates self.tracks by requesting from\n TIDAL albums/items endpoint.\"\"\"\n album_items: AlbumsItemsResponseJSON = request_album_items(\n session=session, identifier=self.album_id\n )\n _items = album_items.items if album_items is not None else ()\n self.tracks = tuple(_item.item for _item in _items)\n\n def get_metadata(self, session: Session):\n \"\"\"This method populates self.metadata by requesting from\n TIDAL /albums endpoint\"\"\"\n self.metadata: AlbumsEndpointResponseJSON = request_albums(\n session=session, identifier=self.album_id\n )\n\n def get_review(self, session: Session):\n \"\"\"This method requests the review corresponding to self.album_id\n in TIDAL. If it exists, it is written to disk as AlbumReview.json\n in self.album_dir\"\"\"\n self.album_review: Optional[AlbumsReviewResponseJSON] = request_album_review(\n session=session, identifier=self.album_id\n )\n if self.album_review is not None:\n (self.album_dir / \"AlbumReview.json\").write_text(\n self.album_review.to_json()\n )\n\n def set_dir(self, out_dir: Path):\n \"\"\"This method populates self.album_dir as a sub-subdirectory of\n out_dir: its parent directory is the name of the (main) artist of\n the album\"\"\"\n artist_substring: str = self.metadata.artist.name.replace(\"..\", \"\")\n album_substring: str = (\n f\"{self.metadata.name.replace('..', '')} \"\n f\"[{self.metadata.id}] [{self.metadata.release_date.year}]\"\n )\n self.album_dir = out_dir / artist_substring / album_substring\n self.album_dir.mkdir(parents=True, exist_ok=True)\n\n if self.metadata.number_of_volumes > 1:\n for v in range(1, self.metadata.number_of_volumes + 1):\n volume_substring: str = f\"Volume {v}\"\n (out_dir / artist_substring / album_substring / volume_substring).mkdir(\n parents=True, exist_ok=True\n )\n\n def save_cover_image(self, session: Session, out_dir: Path):\n \"\"\"This method writes cover.jpg in self.album_dir via the\n utils.download_cover_image() function. If successful,\n then self.album_cover_saved takes the value True\"\"\"\n if self.album_dir is None:\n self.set_dir(out_dir=out_dir)\n self.cover_path: Path = self.album_dir / \"cover.jpg\"\n if not self.cover_path.exists():\n download_cover_image(\n session=session,\n cover_uuid=self.metadata.cover,\n output_dir=self.album_dir,\n )\n else:\n self.album_cover_saved = True\n\n def get_tracks(\n self, session: Session, audio_format: AudioFormat, out_dir: Path\n ) -> List[Optional[str]]:\n \"\"\"This method uses self.tracks to call track.Track.get() for each\n track in self.tracks. It uses the result of each of these calls to\n populate self.track_files\"\"\"\n track_files: List[str] = [None] * self.metadata.number_of_tracks\n for i, t in enumerate(self.tracks): # type(t) is TracksEndpointResponseJSON\n track: Track = Track(track_id=t.id)\n\n track_files_value: Optional[str] = track.get(\n session=session,\n audio_format=audio_format,\n out_dir=out_dir,\n metadata=t,\n album=self.metadata,\n )\n track_files[i] = {track.metadata.track_number: track_files_value}\n else:\n self.track_files = track_files\n\n def dumps(self):\n \"\"\"This method returns a JSON-like string of self.track_files\"\"\"\n return json.dumps(self.track_files)\n\n def dump(self, fp=sys.stdout):\n \"\"\"This method writes to (by default) STDOUT a\n JSON-like string of self.track_files\"\"\"\n json.dump(self.track_files, fp)\n\n def get(\n self,\n session: Session,\n audio_format: AudioFormat,\n out_dir: Path,\n metadata: Optional[AlbumsEndpointResponseJSON] = None,\n ):\n \"\"\"This method is the driver method of the class. It calls the\n other methods in order:\n 1. get_metadata()\n 2. get_items()\n 3. save_cover_image()\n 4. get_review()\n 5. get_tracks()\n \"\"\"\n if metadata is None:\n self.get_metadata(session)\n else:\n self.metadata = metadata\n \n if self.metadata is None:\n self.track_files = {}\n return\n\n self.get_items(session)\n self.save_cover_image(session, out_dir)\n self.get_review(session)\n self.get_tracks(session, audio_format, out_dir)" }, { "identifier": "Artist", "path": "tidal_wave/artist.py", "snippet": "class Artist:\n artist_id: int\n\n def set_metadata(self, session: Session):\n \"\"\"This function requests from TIDAL API endpoint /artists and\n stores the results in self.metadata\"\"\"\n self.metadata: Optional[ArtistsEndpointResponseJSON] = request_artists(\n session, self.artist_id\n )\n\n def save_artist_image(self, session: Session):\n \"\"\"This method writes the bytes of self.metadata.picture to\n the file cover.jpg in self.artist_dir\"\"\"\n artist_image: Path = self.artist_dir / \"cover.jpg\"\n if not artist_image.exists():\n download_cover_image(\n session, self.metadata.picture, self.artist_dir, dimension=750\n )\n\n def set_albums(self, session: Session):\n \"\"\"This method requests from TIDAL API endpoint /artists/albums and\n stores the results in self.albums\"\"\"\n self.albums: Optional[ArtistsAlbumsResponseJSON] = request_artists_albums(\n session, self.artist_id\n )\n\n def set_audio_works(self, session: Session):\n \"\"\"This method requests from TIDAL API endpoint\n /artists/albums?filter=EPSANDSINGLES and stores the results in self.albums\"\"\"\n self.albums: Optional[ArtistsAlbumsResponseJSON] = request_artists_audio_works(\n session, self.artist_id\n )\n\n def set_videos(self, session: Session):\n \"\"\"This method requests from TIDAL API endpoint /artists/videos and\n stores the results in self.albums\"\"\"\n self.videos: Optional[ArtistsVideosResponseJSON] = request_artists_videos(\n session, self.artist_id\n )\n\n def set_dir(self, out_dir: Path):\n \"\"\"This method sets self.artist_dir and creates the directory on the file system\n if it does not exist\"\"\"\n self.name: str = self.metadata.name.replace(\"..\", \"\")\n self.artist_dir = out_dir / self.name\n self.artist_dir.mkdir(parents=True, exist_ok=True)\n\n def get_albums(\n self,\n session: Session,\n audio_format: AudioFormat,\n out_dir: Path,\n include_eps_singles: bool = False,\n ) -> List[Optional[str]]:\n \"\"\"This method first fetches the total albums on TIDAL's service\n corresponding to the artist with ID self.artist_id. Then, each of\n the albums (and, optionally, EPs and singles) is requested and\n written to subdirectories of out_dir\"\"\"\n if include_eps_singles:\n self.set_audio_works(session)\n logger.info(\n f\"Starting attempt to get {self.albums.total_number_of_items} \"\n \"albums, EPs, and singles for artist with ID \"\n f\"{self.metadata.id}, '{self.name}'\"\n )\n else:\n self.set_albums(session)\n logger.info(\n f\"Starting attempt to get {self.albums.total_number_of_items} albums \"\n f\"for artist with ID {self.metadata.id}, '{self.name}'\"\n )\n\n for i, a in enumerate(self.albums.items):\n album: Album = Album(album_id=a.id)\n album.get(\n session=session,\n audio_format=audio_format,\n out_dir=out_dir,\n metadata=a,\n )\n\n def get_videos(\n self,\n session: Session,\n out_dir: Path,\n ) -> List[Optional[str]]:\n \"\"\"This method sets self.videos by calling self.set_videos()\n then, for each video, instantiates a Video object and executes\n video.get()\"\"\"\n self.set_videos(session)\n logger.info(\n f\"Starting attempt to get {self.videos.total_number_of_items} videos \"\n f\"for artist with ID {self.metadata.id}, '{self.name}'\"\n )\n for i, v in enumerate(self.videos.items):\n video: Video = Video(video_id=v.id)\n video.get(\n session=session,\n out_dir=out_dir,\n metadata=v,\n )\n\n def get(\n self,\n session: Session,\n audio_format: AudioFormat,\n out_dir: Path,\n include_eps_singles: bool,\n ):\n \"\"\"This is the driver method of the class. It executes the other\n methods in order:\n 1. set_metadata\n 2. set_dir\n 3. save_artist_image\n 4. get_videos\n 5. get_albums\n \"\"\"\n self.set_metadata(session)\n \n if self.metadata is None:\n return\n \n self.set_dir(out_dir)\n self.save_artist_image(session)\n self.get_videos(session, out_dir)\n if include_eps_singles:\n self.get_albums(session, audio_format, out_dir, include_eps_singles=True)\n self.get_albums(session, audio_format, out_dir, include_eps_singles=False)" }, { "identifier": "Mix", "path": "tidal_wave/mix.py", "snippet": "class Mix:\n mix_id: str\n\n def __post_init__(self):\n self.mix_dir: Optional[Path] = None\n self.mix_cover_saved: bool = False\n\n def get_metadata(self, session: Session):\n \"\"\"Request from TIDAL API /playlists endpoint\"\"\"\n self.metadata: Optional[PlaylistsEndpointResponseJSON] = request_mixes(\n session=session, mix_id=self.mix_id\n )\n \n if self.metadata is None:\n return\n \n self.name = (\n self.metadata.title.replace(\"/\", \"_\")\n .replace(\"|\", \"_\")\n .replace(\":\", \" -\")\n .replace('\"', \"\")\n .replace(\"..\", \"\")\n )\n\n def set_items(self, session: Session):\n \"\"\"Uses data from TIDAL API /mixes/items endpoint to\n populate self.items\"\"\"\n mix_items: Optional[MixesItemsResponseJSON] = get_mix(\n session=session, mix_id=self.mix_id\n )\n if mix_items is None:\n self.items = tuple()\n else:\n self.items: Tuple[Optional[MixItem]] = tuple(mix_items.items)\n\n def set_dir(self, out_dir: Path):\n \"\"\"Populates self.mix_dir based on self.name, self.mix_id\"\"\"\n mix_substring: str = f\"{self.name} [{self.mix_id}]\"\n self.mix_dir: Path = out_dir / \"Mixes\" / mix_substring\n self.mix_dir.mkdir(parents=True, exist_ok=True)\n\n def save_cover_image(self, session: Session, out_dir: Path):\n \"\"\"Requests self.metadata.image and attempts to write it to disk\"\"\"\n if self.mix_dir is None:\n self.set_dir(out_dir=out_dir)\n self.cover_path: Path = self.mix_dir / \"cover.jpg\"\n if not self.cover_path.exists():\n with session.get(\n url=self.metadata.image, params={k: None for k in session.params}\n ) as r:\n (self.mix_dir / \"cover.jpg\").write_bytes(r.content)\n\n self.mix_cover_saved = True\n else:\n self.mix_cover_saved = True\n\n def get_items(self, session: Session, audio_format: AudioFormat):\n \"\"\"Using either Track.get() or Video.get(), attempt to request\n the data for each track or video in self.items\"\"\"\n if len(self.items) == 0:\n return\n tracks_videos: list = [None] * len(self.items)\n for i, item in enumerate(self.items):\n if item is None:\n tracks_videos[i] = None\n continue\n elif isinstance(item, TracksEndpointResponseJSON):\n track: Track = Track(track_id=item.id)\n track.get(\n session=session,\n audio_format=audio_format,\n out_dir=self.mix_dir,\n metadata=item,\n )\n tracks_videos[i] = track\n elif isinstance(item, VideosEndpointResponseJSON):\n video: Video = Video(video_id=item.id)\n video.get(\n session=session,\n out_dir=self.mix_dir,\n metadata=item,\n )\n tracks_videos[i] = video\n else:\n tracks_videos[i] = None\n continue\n else:\n self.tracks_videos: Tuple[\n Tuple[int, Optional[Union[Track, Video]]]\n ] = tuple(tracks_videos)\n return tracks_videos\n\n def flatten_mix_dir(self):\n \"\"\"When self.get_items() is called, the tracks and/or videos in\n self.items are downloaded using their self-contained .get() logic;\n this means that they will be downloaded to albums. This function\n \"flattens\" self.mix_dir, meaning that it moves all downloaded\n audio and video files to self.mix_dir, and removes the various\n subdirectories created\"\"\"\n files: List[Dict[int, Optional[str]]] = [None] * len(self.tracks_videos)\n if len(self.tracks_videos) == 0:\n return\n subdirs: Set[Path] = set()\n\n for i, tv in enumerate(self.tracks_videos, 1):\n if getattr(tv, \"outfile\") is None:\n try:\n getattr(tv, \"album_dir\")\n except AttributeError:\n pass\n else:\n subdirs.add(tv.album_dir)\n subdirs.add(tv.album_dir.parent)\n files[i - 1] = {i: None}\n continue\n\n _path: Optional[Path] = Path(tv.outfile) if tv is not None else None\n # if the item never got turned into a track or video\n if _path is None:\n files[i - 1] = {i: None}\n continue\n\n # if the track or video didn't download\n if _path.exists():\n if _path.stat().st_size == 0:\n files[i - 1] = {i: None}\n continue\n else:\n files[i - 1] = {i: None}\n continue\n\n # otherwise, move files and clean up\n if isinstance(tv, Track):\n new_path: Path = self.mix_dir / f\"{i:03d} - {tv.trackname}\"\n new_path.write_bytes(_path.read_bytes())\n _path.unlink()\n files[i - 1] = {i: str(new_path.absolute())}\n elif isinstance(tv, Video):\n new_path: Path = self.mix_dir / f\"{i:03d} - {_path.name}\"\n new_path.write_bytes(_path.read_bytes())\n _path.unlink()\n files[i - 1] = {i: str(new_path.absolute())}\n else:\n self.files: List[Dict[int, Optional[str]]] = files\n\n # Find all subdirectories written to\n subdirs: Set[Path] = set()\n for tv in self.tracks_videos:\n if isinstance(tv, Track):\n try:\n getattr(tv, \"album_dir\")\n except AttributeError:\n pass\n else:\n subdirs.add(tv.album_dir)\n subdirs.add(tv.album_dir.parent)\n elif isinstance(tv, Video):\n subdirs.add(tv.artist_dir)\n\n # Copy all artist images, artist bio JSON files out\n # of subdirs\n artist_images: Set[Path] = set()\n for subdir in subdirs:\n for p in subdir.glob(\"*.jpg\"):\n if p.name == \"cover.jpg\":\n continue\n artist_images.add(p)\n else:\n for artist_image_path in artist_images:\n if artist_image_path.exists():\n shutil.copyfile(\n artist_image_path.absolute(),\n self.mix_dir / artist_image_path.name,\n )\n\n artist_bios: Set[Path] = set()\n for subdir in subdirs:\n for p in subdir.glob(\"*bio.json\"):\n artist_bios.add(p)\n else:\n for artist_bio_path in artist_bios:\n if artist_bio_path.exists():\n shutil.copyfile(\n artist_bio_path.absolute(),\n self.mix_dir / artist_bio_path.name,\n )\n\n # Remove all subdirs\n for subdir in subdirs:\n if subdir.exists():\n shutil.rmtree(subdir)\n else:\n return self.mix_dir\n\n def dumps(self):\n return json.dumps(self.files)\n\n def dump(self, fp=sys.stdout):\n json.dump(self.files, fp)\n\n def get(self, session: Session, audio_format: AudioFormat, out_dir: Path):\n \"\"\"The main method of this class, executing a number of other methods\n in a row:\n - self.get_metadata()\n - self.set_items()\n - self.set_dir()\n - self.save_cover_image()\n - self.get_items()\n - self.flatten_playlist_dir()\n \"\"\"\n self.get_metadata(session)\n \n if self.metadata is None:\n self.files = {}\n return\n \n self.set_items(session)\n self.set_dir(out_dir)\n self.save_cover_image(session, out_dir)\n try:\n self.save_description()\n except Exception:\n pass\n\n _get_items = self.get_items(session, audio_format)\n if _get_items is None:\n logger.critical(f\"Could not retrieve mix with ID '{self.mix_id}'\")\n return\n self.flatten_mix_dir()\n logger.info(f\"Mix files written to '{self.mix_dir}'\")" }, { "identifier": "Playlist", "path": "tidal_wave/playlist.py", "snippet": "class Playlist:\n playlist_id: str # UUID4\n\n def __post_init__(self):\n self.playlist_dir: Optional[Path] = None\n self.playlist_cover_saved: bool = False\n\n def get_metadata(self, session: Session):\n \"\"\"Request from TIDAL API /playlists endpoint\"\"\"\n self.metadata: Optional[PlaylistsEndpointResponseJSON] = request_playlists(\n session=session, identifier=self.playlist_id\n )\n \n if self.metadata is None:\n return\n \n self.name = (\n self.metadata.title.replace(\"/\", \"_\")\n .replace(\"|\", \"_\")\n .replace(\":\", \" -\")\n .replace('\"', \"\")\n .replace(\"..\", \"\")\n )\n\n def set_items(self, session: Session):\n \"\"\"Uses data from TIDAL API /playlists/items endpoint to\n populate self.items\"\"\"\n playlist_items: Optional[PlaylistsItemsResponseJSON] = get_playlist(\n session=session, playlist_id=self.playlist_id\n )\n if playlist_items is None:\n self.items = tuple()\n else:\n self.items: Tuple[Optional[PlaylistItem]] = tuple(playlist_items.items)\n\n def set_dir(self, out_dir: Path):\n \"\"\"Populates self.playlist_dir based on self.name, self.playlist_id\"\"\"\n playlist_substring: str = f\"{self.name} [{self.playlist_id}]\"\n self.playlist_dir: Path = out_dir / \"Playlists\" / playlist_substring\n self.playlist_dir.mkdir(parents=True, exist_ok=True)\n\n def save_cover_image(self, session: Session, out_dir: Path):\n \"\"\"Requests self.metadata.image and attempts to write it to disk\"\"\"\n if self.playlist_dir is None:\n self.set_dir(out_dir=out_dir)\n self.cover_path: Path = self.playlist_dir / \"cover.jpg\"\n if not self.cover_path.exists():\n download_cover_image(\n session=session,\n cover_uuid=self.metadata.square_image,\n output_dir=self.playlist_dir,\n dimension=1080,\n )\n else:\n self.playlist_cover_saved = True\n\n def save_description(self):\n \"\"\"Requests self.metadata.description and attempts to write it to disk\"\"\"\n description_path: Path = self.playlist_dir / \"PlaylistDescription.txt\"\n if self.metadata.description is not None and len(self.metadata.description) > 0:\n if not description_path.exists():\n description_path.write_text(f\"{self.metadata.description}\\n\")\n\n def get_items(self, session: Session, audio_format: AudioFormat):\n \"\"\"Using either Track.get() or Video.get(), attempt to request\n the data for each track or video in self.items\"\"\"\n if len(self.items) == 0:\n return\n tracks_videos: list = [None] * len(self.items)\n for i, item in enumerate(self.items):\n if item is None:\n tracks_videos[i] = None\n continue\n elif isinstance(item, TracksEndpointResponseJSON):\n track: Track = Track(track_id=item.id)\n track.get(\n session=session,\n audio_format=audio_format,\n out_dir=self.playlist_dir,\n metadata=item,\n )\n tracks_videos[i] = track\n elif isinstance(item, VideosEndpointResponseJSON):\n video: Video = Video(video_id=item.id)\n video.get(\n session=session,\n out_dir=self.playlist_dir,\n metadata=item,\n )\n tracks_videos[i] = video\n else:\n tracks_videos[i] = None\n continue\n else:\n self.tracks_videos: Tuple[\n Tuple[int, Optional[Union[Track, Video]]]\n ] = tuple(tracks_videos)\n return tracks_videos\n\n def flatten_playlist_dir(self):\n \"\"\"When self.get_items() is called, the tracks and/or videos in\n self.items are downloaded using their self-contained .get() logic;\n this means that they will be downloaded to albums. This function\n \"flattens\" self.playlist_dir, meaning that it moves all downloaded\n audio and video files to self.playlist_dir, and removes the various\n subdirectories created\"\"\"\n files: List[Dict[int, Optional[str]]] = [None] * len(self.tracks_videos)\n if len(self.tracks_videos) == 0:\n return\n subdirs: Set[Path] = set()\n\n for i, tv in enumerate(self.tracks_videos, 1):\n if getattr(tv, \"outfile\") is None:\n try:\n getattr(tv, \"album_dir\")\n except AttributeError:\n pass\n else:\n subdirs.add(tv.album_dir)\n subdirs.add(tv.album_dir.parent)\n files[i - 1] = {i: None}\n continue\n\n _path: Optional[Path] = Path(tv.outfile) if tv is not None else None\n # if the item never got turned into a track or video\n if _path is None:\n files[i - 1] = {i: None}\n continue\n\n # if the track or video didn't download\n if _path.exists():\n if _path.stat().st_size == 0:\n files[i - 1] = {i: None}\n continue\n else:\n files[i - 1] = {i: None}\n continue\n\n # otherwise, move files and clean up\n if isinstance(tv, Track):\n new_path: Path = self.playlist_dir / f\"{i:03d} - {tv.trackname}\"\n new_path.write_bytes(_path.read_bytes())\n _path.unlink()\n files[i - 1] = {i: str(new_path.absolute())}\n elif isinstance(tv, Video):\n new_path: Path = self.playlist_dir / f\"{i:03d} - {_path.name}\"\n new_path.write_bytes(_path.read_bytes())\n _path.unlink()\n files[i - 1] = {i: str(new_path.absolute())}\n else:\n self.files: List[Dict[int, Optional[str]]] = files\n\n # Find all subdirectories written to\n subdirs: Set[Path] = set()\n for tv in self.tracks_videos:\n if isinstance(tv, Track):\n try:\n getattr(tv, \"album_dir\")\n except AttributeError:\n pass\n else:\n subdirs.add(tv.album_dir)\n subdirs.add(tv.album_dir.parent)\n elif isinstance(tv, Video):\n subdirs.add(tv.artist_dir)\n\n # Copy all artist images, artist bio JSON files out\n # of subdirs\n artist_images: Set[Path] = set()\n for subdir in subdirs:\n for p in subdir.glob(\"*.jpg\"):\n if p.name == \"cover.jpg\":\n continue\n artist_images.add(p)\n else:\n for artist_image_path in artist_images:\n if artist_image_path.exists():\n shutil.copyfile(\n artist_image_path.absolute(),\n self.playlist_dir / artist_image_path.name,\n )\n\n artist_bios: Set[Path] = set()\n for subdir in subdirs:\n for p in subdir.glob(\"*bio.json\"):\n artist_bios.add(p)\n else:\n for artist_bio_path in artist_bios:\n if artist_bio_path.exists():\n shutil.copyfile(\n artist_bio_path.absolute(),\n self.playlist_dir / artist_bio_path.name,\n )\n\n # Remove all subdirs\n for subdir in subdirs:\n if subdir.exists():\n shutil.rmtree(subdir)\n else:\n return self.playlist_dir\n\n def craft_m3u8_text(self):\n \"\"\"This method creates a file called playlist.m3u8 in self.playlist_dir\n that is a standard M3U. Needs to be called after self.flatten_playlist_dir\n in order to be able to access self.files\n N.b. the already-written file is temporarily copied to a .mp4 version in a\n temporary directory because .m4a files cannot be read with mutagen.\"\"\"\n m3u_text: str = f\"#EXTM3U\\n#EXTENC:UTF-8\\n#EXTIMG:{str(self.cover_path.absolute())}\\n#PLAYLIST:{self.name}\\n\"\n\n logger.info(\n f\"Creating .m3u8 playlist file for Playlist with ID '{self.playlist_id}'\"\n )\n for d in self.files:\n file: str = next(iter(d.values()))\n if file is None:\n continue\n elif file.endswith(\".flac\"):\n m = mutagen.File(file)\n artist: str = m.get(\"artist\", [\"\"])[0]\n title: str = m.get(\"title\", [\"\"])[0]\n extinf: str = (\n f\"#EXTINF:{math.ceil(m.info.length)},\"\n f\"{artist} - {title}\\n{file}\\n\"\n )\n m3u_text += extinf\n elif file.endswith(\".mka\"):\n m = mutagen.File(file)\n artist: str = m.get(\"ARTI\", [\"\"])[0]\n title: str = m.get(\"TITL\", [\"\"])[0]\n extinf: str = (\n f\"#EXTINF:{math.ceil(m.info.length)},\"\n f\"{artist} - {title}\\n{file}\\n\"\n )\n m3u_text += extinf\n elif file.endswith(\".m4a\"):\n # Mutagen cannot read .m4a files, so make a copy with all\n # of the metadata tags as a .mp4 in a temporary directory\n with temporary_file(suffix=\".mp4\") as tf:\n ffmpeg.input(file, hide_banner=None, y=None).output(\n tf.name,\n acodec=\"copy\",\n vcodec=\"copy\",\n loglevel=\"quiet\",\n ).run()\n\n m = mutagen.File(tf.name)\n artist: str = m.get(\"\\xa9ART\", [\"\"])[0]\n title: str = m.get(\"\\xa9nam\", [\"\"])[0]\n extinf: str = (\n f\"#EXTINF:{math.ceil(m.info.length)},\"\n f\"{artist} - {title}\\n{file}\\n\"\n )\n m3u_text += extinf\n else:\n return m3u_text\n\n def dumps(self):\n return json.dumps(self.files)\n\n def dump(self, fp=sys.stdout):\n json.dump(self.files, fp)\n\n def get(self, session: Session, audio_format: AudioFormat, out_dir: Path):\n \"\"\"The main method of this class, executing a number of other methods\n in a row:\n - self.get_metadata()\n - self.set_items()\n - self.set_dir()\n - self.save_cover_image()\n - self.save_description()\n - self.get_items()\n - self.flatten_playlist_dir()\n \"\"\"\n self.get_metadata(session)\n \n if self.metadata is None:\n self.files = {}\n return\n \n self.set_items(session)\n self.set_dir(out_dir)\n self.save_cover_image(session, out_dir)\n try:\n self.save_description()\n except Exception:\n pass\n\n _get_items = self.get_items(session, audio_format)\n if _get_items is None:\n logger.critical(f\"Could not retrieve playlist with ID '{self.playlist_id}'\")\n return\n\n self.flatten_playlist_dir()\n\n try:\n m3u8_text: str = self.craft_m3u8_text()\n except Exception as e:\n logger.warning(\n \"Unable to create playlist.m3u8 file for \"\n f\"playlist with ID '{self.playlist_id}'\"\n )\n logger.debug(e)\n else:\n with open(self.playlist_dir / \"playlist.m3u8\", \"w\") as f:\n f.write(m3u8_text)\n\n logger.info(f\"Playlist files written to '{self.playlist_dir}'\")" }, { "identifier": "Track", "path": "tidal_wave/track.py", "snippet": "class Track:\n track_id: int\n\n def __post_init__(self):\n self._has_lyrics: Optional[bool] = None\n self.tags: dict = {}\n self.album_cover_saved: bool = False\n\n def get_metadata(self, session: Session):\n self.metadata: Optional[TracksEndpointResponseJSON] = request_tracks(\n session, self.track_id\n )\n\n def get_album(self, session: Session):\n self.album: Optional[AlbumsEndpointResponseJSON] = request_albums(\n session, self.metadata.album.id\n )\n\n def get_credits(self, session: Session):\n self.credits: Optional[TracksCreditsResponseJSON] = request_credits(\n session, self.track_id\n )\n\n def get_lyrics(self, session: Session):\n if self._has_lyrics is None:\n self.lyrics: Optional[TracksLyricsResponseJSON] = request_lyrics(\n session, self.track_id\n )\n if self.lyrics is None:\n self._has_lyrics = False\n else:\n self._has_lyrics = True\n else:\n return self.lyrics\n\n def get_stream(self, session: Session, audio_format: AudioFormat):\n \"\"\"Populates self.stream, self.manifest\"\"\"\n aq: Optional[str] = af_aq.get(audio_format)\n self.stream: Optional[TracksEndpointStreamResponseJSON] = request_stream(\n session, self.track_id, aq\n )\n\n def set_manifest(self):\n \"\"\"This method sets self.manifest and self.codec\"\"\"\n self.manifest: Manifest = manifester(self.stream)\n # https://dashif.org/codecs/audio/\n if self.manifest.codecs == \"flac\":\n self.codec = \"flac\"\n elif self.manifest.codecs == \"mqa\":\n self.codec = \"flac\"\n elif self.manifest.codecs == \"mha1\": # Sony 360 Reality Audio\n self.codec = \"mka\"\n elif self.manifest.codecs == \"mp4a.40.5\": # HE-AAC\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"mp4a.40.29\": # HE-AAC v2\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"mp4a.40.2\": # AAC-LC\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"eac3\": # Enhanced AC-3\n self.codec = \"m4a\"\n elif self.manifest.codecs == \"mp4a.40.34\": # MP3\n self.codec = \"mp3\"\n\n def set_album_dir(self, out_dir: Path):\n \"\"\"This method sets self.album_dir, based on self.album and\n out_dir. In particular, self.album_dir is a subdirectory of out_dir\n based on the name of the album's artist\"\"\"\n artist_substring: str = self.album.artist.name.replace(\"..\", \"\")\n album_substring: str = (\n f\"{self.album.name} \" f\"[{self.album.id}] [{self.album.release_date.year}]\"\n )\n self.album_dir: Path = out_dir / artist_substring / album_substring\n self.album_dir.mkdir(parents=True, exist_ok=True)\n\n if self.album.number_of_volumes > 1:\n volume_substring: str = f\"Volume {self.metadata.volume_number}\"\n (self.album_dir / volume_substring).mkdir(parents=True, exist_ok=True)\n\n def set_filename(self, audio_format: AudioFormat):\n \"\"\"This method sets self.filename. It's based on self.metadata\n as well as audio_format. Additionally, if the available codecs in\n self.manifest don't match audio_format, warnings are logged\"\"\"\n _track_part: str = f\"{self.metadata.track_number:02d} - {self.metadata.name}\"\n if audio_format == AudioFormat.low:\n track_substring: str = f\"{_track_part} [L]\"\n elif audio_format == AudioFormat.high:\n track_substring: str = f\"{_track_part} [H]\"\n elif audio_format == AudioFormat.lossless:\n track_substring: str = f\"{_track_part} [CD]\"\n elif audio_format == AudioFormat.mqa:\n track_substring: str = f\"{_track_part} [Q]\"\n elif audio_format == AudioFormat.hi_res:\n track_substring: str = f\"{_track_part} [HiRes]\"\n elif audio_format == AudioFormat.dolby_atmos:\n track_substring: str = f\"{_track_part} [A]\"\n elif audio_format == AudioFormat.sony_360_reality_audio:\n track_substring: str = f\"{_track_part} [360]\"\n else:\n track_substring: str = _track_part\n\n # Check for MQA masquerading as HiRes here\n if audio_format == AudioFormat.hi_res:\n if self.manifest.codecs == \"mqa\":\n logger.warning(\n \"Even though HiRes audio format was requested, this track is only \"\n \"available in MQA format. TIDAL regards this as 'HiRes' even though \"\n \"it is probably only lossless; i.e. 16-bit 44.1 kHz quality. \"\n \"Downloading of track will continue, but it will be marked as MQA.\"\n )\n self.filename: Optional[str] = f\"{_track_part} [Q].{self.codec}\"\n elif (self.stream.bit_depth == 16) and (self.stream.sample_rate == 44100):\n logger.warning(\n \"Even though HiRes audio format was requested, and TIDAL responded to \"\n \"that request without error, this track is only available in lossless \"\n \"format; i.e. 16-bit 44.1 kHz quality. Downloading of track will \"\n \"continue, but it will be marked as Lossless ([CD]).\"\n )\n self.filename: Optional[str] = f\"{_track_part} [CD].{self.codec}\"\n else:\n self.filename: Optional[str] = f\"{track_substring}.{self.codec}\"\n else:\n self.filename: Optional[str] = f\"{track_substring}.{self.codec}\"\n\n # for use in playlist file ordering\n self.trackname: str = re.match(r\"(?:\\d{2,3} - )(.+?$)\", self.filename).groups()[\n 0\n ]\n\n def set_outfile(self):\n \"\"\"Uses self.album_dir and self.metadata and self.filename\n to craft the pathlib.Path object, self.outfile, that is a\n reference to where the track will be written on disk.\"\"\"\n if self.album.number_of_volumes > 1:\n self.outfile: Path = (\n self.album_dir / f\"Volume {self.metadata.volume_number}\" / self.filename\n )\n self.absolute_outfile = str(self.outfile.absolute())\n else:\n self.outfile: Path = self.album_dir / self.filename\n self.absolute_outfile = str(self.outfile.absolute())\n\n if (self.outfile.exists()) and (self.outfile.stat().st_size > 0):\n logger.info(\n f\"Track {self.absolute_outfile} already exists \"\n \"and therefore will not be overwritten\"\n )\n return\n else:\n return self.outfile\n\n def save_artist_image(self, session: Session):\n \"\"\"This method writes a JPEG file with the name of each of\n self.metadata.artists to self.album_dir\"\"\"\n for a in self.metadata.artists:\n track_artist_image: Path = (\n self.album_dir / f\"{a.name.replace('..', '')}.jpg\"\n )\n if not track_artist_image.exists():\n download_artist_image(session, a, self.album_dir)\n\n def save_artist_bio(self, session: Session):\n \"\"\"This method writes a JSON file with the name of each of\n self.metadata.artists to self.album_dir\"\"\"\n for a in self.metadata.artists:\n track_artist_bio_json: Path = self.album_dir / f\"{a.name}-bio.json\"\n if not track_artist_bio_json.exists():\n artist_bio: Optional[ArtistsBioResponseJSON] = request_artist_bio(\n session, a.id\n )\n if artist_bio is not None:\n logger.info(\n f\"Writing artist bio for artist {a.id} to \"\n f\"'{str(track_artist_bio_json.absolute())}\"\n )\n track_artist_bio_json.write_text(artist_bio.to_json())\n\n def save_album_cover(self, session: Session):\n \"\"\"This method saves cover.jpg to self.album_dir; the bytes for cover.jpg\n come from self.album.cover\"\"\"\n self.cover_path: Path = self.album_dir / \"cover.jpg\"\n if (not self.cover_path.exists()) or (not self.album_cover_saved):\n download_cover_image(\n session=session, cover_uuid=self.album.cover, output_dir=self.album_dir\n )\n else:\n self.album_cover_saved = True\n\n def set_urls(self, session: Session):\n \"\"\"This method sets self.urls based on self.manifest\"\"\"\n if isinstance(self.manifest, JSONDASHManifest):\n self.urls: List[str] = self.manifest.urls\n elif isinstance(self.manifest, XMLDASHManifest):\n self.urls: List[str] = self.manifest.build_urls(session=session)\n self.download_headers: Dict[str, str] = {\"Accept\": self.manifest.mime_type}\n if session.session_id is not None:\n self.download_headers[\"sessionId\"] = session.session_id\n self.download_params = {k: None for k in session.params}\n\n def download_url(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"This method downloads self.urls[0], for use in situations when\n the manifest returned by TIDAL API contains one URL. It relies on\n byte range headers to incrementally get all content from a URL\"\"\"\n logger.info(f\"Writing track {self.track_id} to '{self.absolute_outfile}'\")\n\n with temporary_file() as ntf:\n # Implement HTTP range requests here to mimic official clients\n range_size: int = 1024 * 1024 # 1 MiB\n content_length: int = fetch_content_length(\n session=session, url=self.urls[0]\n )\n if content_length == 0:\n return\n\n range_headers: Iterable[str] = http_request_range_headers(\n content_length=content_length,\n range_size=range_size,\n return_tuple=False,\n )\n for rh in range_headers:\n with session.get(\n self.urls[0], params=self.download_params, headers={\"Range\": rh}\n ) as rr:\n if not rr.ok:\n logger.warning(f\"Could not download {self}\")\n return\n else:\n ntf.write(rr.content)\n else:\n ntf.seek(0)\n\n if self.codec == \"flac\":\n # Have to use FFMPEG to re-mux the audio bytes, otherwise\n # mutagen chokes on NoFlacHeaderError\n ffmpeg.input(ntf.name, hide_banner=None, y=None).output(\n self.absolute_outfile,\n acodec=\"copy\",\n loglevel=\"quiet\",\n ).run()\n elif self.codec == \"m4a\":\n shutil.copyfile(ntf.name, self.outfile)\n elif self.codec == \"mka\":\n shutil.copyfile(ntf.name, self.outfile)\n\n logger.info(\n f\"Track {self.track_id} written to '{str(self.outfile.absolute())}'\"\n )\n return self.outfile\n\n def download_urls(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"This method writes the contents from self.urls to a temporary\n directory, then uses FFmpeg to re-mux the data to self.outfile\"\"\"\n logger.info(f\"Writing track {self.track_id} to '{self.absolute_outfile}'\")\n\n with temporary_file() as ntf:\n for u in self.urls:\n with session.get(\n url=u, headers=self.download_headers, params=self.download_params\n ) as resp:\n if not resp.ok:\n logger.warning(f\"Could not download {self}\")\n return\n else:\n ntf.write(resp.content)\n else:\n ntf.seek(0)\n\n if self.codec == \"flac\":\n # Have to use FFmpeg to re-mux the audio bytes, otherwise\n # mutagen chokes on NoFlacHeaderError\n ffmpeg.input(ntf.name, hide_banner=None, y=None).output(\n self.absolute_outfile, acodec=\"copy\", loglevel=\"quiet\"\n ).run()\n elif self.codec == \"m4a\":\n shutil.copyfile(ntf.name, self.outfile)\n elif self.codec == \"mka\":\n shutil.copyfile(ntf.name, self.outfile)\n\n logger.info(f\"Track {self.track_id} written to '{self.absolute_outfile}'\")\n return self.outfile\n\n def download(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"This method GETs the data from self.urls and writes it\n to self.outfile.\"\"\"\n if len(self.urls) == 1:\n outfile: Optional[Path] = self.download_url(\n session=session, out_dir=out_dir\n )\n else:\n outfile: Optional[Path] = self.download_urls(\n session=session, out_dir=out_dir\n )\n\n return outfile\n\n def craft_tags(self):\n \"\"\"Using the TAG_MAPPING dictionary,\n write the correct values of various metadata tags to the file.\n E.g. for .flac files, the album's artist is 'ALBUMARTIST',\n but for .m4a files, the album's artist is 'aART'.\"\"\"\n tags = dict()\n if (self.codec == \"flac\") or (self.codec == \"mka\"):\n tag_map = {k: v[\"flac\"] for k, v in TAG_MAPPING.items()}\n elif self.codec == \"m4a\":\n tag_map = {k: v[\"m4a\"] for k, v in TAG_MAPPING.items()}\n\n tags[tag_map[\"album\"]] = self.album.title\n tags[tag_map[\"album_artist\"]] = \";\".join((a.name for a in self.album.artists))\n tags[tag_map[\"album_peak_amplitude\"]] = f\"{self.stream.album_peak_amplitude}\"\n tags[tag_map[\"album_replay_gain\"]] = f\"{self.stream.album_replay_gain}\"\n tags[tag_map[\"artist\"]] = \";\".join((a.name for a in self.metadata.artists))\n tags[tag_map[\"artists\"]] = [a.name for a in self.metadata.artists]\n tags[tag_map[\"barcode\"]] = self.album.upc\n tags[tag_map[\"comment\"]] = self.metadata.url\n tags[tag_map[\"copyright\"]] = self.metadata.copyright\n tags[tag_map[\"date\"]] = str(self.album.release_date)\n tags[tag_map[\"isrc\"]] = self.metadata.isrc\n tags[tag_map[\"title\"]] = self.metadata.name\n tags[tag_map[\"track_peak_amplitude\"]] = f\"{self.metadata.peak}\"\n tags[tag_map[\"track_replay_gain\"]] = f\"{self.metadata.replay_gain}\"\n # credits\n for tag in {\"composer\", \"engineer\", \"lyricist\", \"mixer\", \"producer\", \"remixer\"}:\n try:\n _credits_tag = \";\".join(getattr(self.credits, tag))\n except (TypeError, AttributeError): # NoneType problems\n continue\n else:\n tags[tag_map[tag]] = _credits_tag\n # lyrics\n try:\n _lyrics = self.lyrics.subtitles\n except (TypeError, AttributeError): # NoneType problems\n pass\n else:\n tags[tag_map[\"lyrics\"]] = _lyrics\n\n if self.codec == \"flac\":\n # track and disk\n tags[\"DISCTOTAL\"] = f\"{self.album.number_of_volumes}\"\n tags[\"DISC\"] = f\"{self.metadata.volume_number}\"\n tags[\"TRACKTOTAL\"] = f\"{self.album.number_of_tracks}\"\n tags[\"TRACKNUMBER\"] = f\"{self.metadata.track_number}\"\n # instrument-specific\n # piano\n try:\n piano_credits: List[str] = [\n f\"{pc} (piano)\" for pc in self.credits.piano\n ]\n except (TypeError, AttributeError): # NoneType problems\n pass\n else:\n tags[\"PERFORMER\"] = piano_credits\n\n elif self.codec == \"m4a\":\n # Have to convert to bytes the values of the tags starting with '----'\n for k, v in tags.copy().items():\n if k.startswith(\"----\"):\n if isinstance(v, str):\n tags[k]: bytes = v.encode(\"UTF-8\")\n elif isinstance(v, list):\n tags[k]: List[bytes] = [s.encode(\"UTF-8\") for s in v]\n\n tags[\"trkn\"] = [(self.metadata.track_number, self.album.number_of_tracks)]\n tags[\"disk\"] = [(self.metadata.volume_number, self.album.number_of_volumes)]\n\n self.tags: dict = {k: v for k, v in tags.items() if v is not None}\n\n def set_tags(self):\n \"\"\"Instantiate a mutagen.File instance, add self.tags to it, and\n save it to disk\"\"\"\n self.mutagen = mutagen.File(self.outfile)\n self.mutagen.clear()\n self.mutagen.update(**self.tags)\n # add album cover\n if self.codec == \"flac\":\n p = mutagen.flac.Picture()\n p.type = mutagen.id3.PictureType.COVER_FRONT\n p.desc = \"Album Cover\"\n p.width = p.height = 1280\n p.mime = \"image/jpeg\"\n p.data = self.cover_path.read_bytes()\n self.mutagen.add_picture(p)\n elif self.codec == \"m4a\":\n self.mutagen[\"covr\"] = [\n MP4Cover(self.cover_path.read_bytes(), imageformat=MP4Cover.FORMAT_JPEG)\n ]\n\n self.mutagen.save()\n # Make sure audio track comes first because of\n # less-sophisticated audio players that only\n # recognize the first stream\n if self.codec == \"flac\":\n with temporary_file(suffix=\".mka\") as tf:\n shutil.move(str(self.outfile.absolute()), tf.name)\n cmd: List[str] = shlex.split(\n f\"\"\"ffmpeg -hide_banner -loglevel quiet -y -i \"{tf.name}\"\n -map 0:a:0 -map 0:v:0 -c:a copy -c:v copy\n -metadata:s:v title='Album cover' -metadata:s:v comment='Cover (front)'\n -disposition:v attached_pic \"{self.absolute_outfile}\" \"\"\"\n )\n subprocess.run(cmd)\n elif self.codec == \"m4a\":\n with temporary_file(suffix=\".mka\") as tf:\n cmd: List[str] = shlex.split(\n f\"\"\"ffmpeg -hide_banner -loglevel quiet -y -i \"{self.absolute_outfile}\"\n -map 0:a:0 -map 0:v:0 -c:a copy -c:v copy \"{tf.name}\" \"\"\"\n )\n subprocess.run(cmd)\n shutil.copyfile(tf.name, self.absolute_outfile)\n\n def get(\n self,\n session: Session,\n audio_format: AudioFormat,\n out_dir: Path,\n metadata: Optional[TracksEndpointResponseJSON] = None,\n album: Optional[AlbumsEndpointResponseJSON] = None,\n ) -> Optional[str]:\n if metadata is None:\n self.get_metadata(session)\n else:\n self.metadata = metadata\n\n if self.metadata is None:\n self.outfile = None\n return\n\n if \"DOLBY_ATMOS\" in self.metadata.media_metadata.tags:\n if audio_format != AudioFormat.dolby_atmos:\n logger.warning(\n f\"Track {self.track_id} is only available in Dolby Atmos \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n\n if audio_format == AudioFormat.dolby_atmos:\n if \"DOLBY_ATMOS\" not in self.metadata.media_metadata.tags:\n logger.warning(\n \"Dolby Atmos audio format was requested, but track \"\n f\"{self.track_id} is not available in Dolby Atmos \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n elif audio_format == AudioFormat.sony_360_reality_audio:\n if \"SONY_360RA\" not in self.metadata.media_metadata.tags:\n logger.warning(\n \"Sony 360 Reality Audio audio format was requested, but track \"\n f\"{self.track_id} is not available in Sony 360 Reality Audio \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n elif audio_format == AudioFormat.mqa:\n if \"MQA\" not in self.metadata.media_metadata.tags:\n logger.warning(\n \"MQA audio format was requested, but track \"\n f\"{self.track_id} is not available in MQA audio \"\n \"format. Downloading of track will not continue.\"\n )\n self.outfile = None\n return\n\n if album is None:\n self.get_album(session)\n else:\n self.album = album\n\n if self.album is None:\n self.outfile = None\n return\n\n self.get_credits(session)\n self.get_stream(session, audio_format)\n if self.stream is None:\n return\n self.set_manifest()\n self.set_album_dir(out_dir)\n self.set_filename(audio_format)\n outfile: Optional[Path] = self.set_outfile()\n if outfile is None:\n return\n\n try:\n self.get_lyrics(session)\n except Exception:\n pass\n\n self.save_album_cover(session)\n\n try:\n self.save_artist_image(session)\n except Exception:\n pass\n\n try:\n self.save_artist_bio(session)\n except Exception:\n pass\n\n self.set_urls(session)\n\n if self.download(session, out_dir) is None:\n return\n\n self.craft_tags()\n self.set_tags()\n\n return str(self.outfile.absolute())\n\n def dump(self, fp=sys.stdout):\n k: int = int(self.metadata.track_number)\n if self.outfile is None:\n v: Optional[str] = None\n elif not isinstance(self.outfile, Path):\n v: Optional[str] = None\n else:\n v: Optional[str] = str(self.outfile.absolute())\n json.dump({k: v}, fp)\n return None\n\n def dumps(self) -> str:\n k: int = int(self.metadata.track_number)\n if self.outfile is None:\n v: Optional[str] = None\n elif not isinstance(self.outfile, Path):\n v: Optional[str] = None\n else:\n v: Optional[str] = str(self.outfile.absolute())\n json.dumps({k: v})\n return None" }, { "identifier": "Video", "path": "tidal_wave/video.py", "snippet": "class Video:\n video_id: int\n\n def __post_init__(self):\n self.tags: dict = {}\n self.codec: str = \"mp4\"\n\n def get_metadata(self, session: Session):\n \"\"\"Request from TIDAL API /videos endpoint\"\"\"\n self.metadata: Optional[VideosEndpointResponseJSON] = request_videos(\n session, self.video_id\n )\n\n def get_contributors(self, session: Session):\n \"\"\"Request from TIDAL API /videos/contributors endpoint\"\"\"\n self.contributors: Optional[\n VideosContributorsResponseJSON\n ] = request_video_contributors(session, self.video_id)\n\n def get_stream(self, session: Session, video_format=VideoFormat.high):\n \"\"\"Populates self.stream by requesting from TIDAL API\n /videos/playbackinfopostpaywall endpoint\"\"\"\n self.stream: Optional[VideosEndpointStreamResponseJSON] = request_video_stream(\n session, self.video_id, video_format.value\n )\n\n def get_m3u8(self, session: Session):\n \"\"\"This method sets self.m3u8, an m3u8.M3U8 object\n following the HTTP Live Streaming specification; parsed from\n self.stream. I.e., self.get_stream() needs to have been executed\n before calling this method. N.b. self.m3u8 almost certainly will\n be a multivariant playlist, meaning further processing of its\n contents will be necessary.\"\"\"\n self.m3u8: m3u8.Playlist = playlister(session=session, vesrj=self.stream)\n\n def set_urls(self):\n \"\"\"This method uses self.m3u8, an m3u8.M3U8 object that is variant:\n (https://developer.apple.com/documentation/http-live-streaming/creating-a-multivariant-playlist)\n It retrieves the highest-quality .m3u8 in its .playlists attribute,\n and sets self.urls as the list of strings from that m3u8.Playlist\"\"\"\n # for now, just get the highest-bandwidth playlist\n playlist: m3u8.Playlist = variant_streams(self.m3u8)\n self.M3U8 = m3u8.load(playlist.uri)\n if self.M3U8 is None or len(self.M3U8.files) == 0:\n raise TidalM3U8Exception(\n f\"HLS media segments are not available for video {self.video_id}\"\n )\n self.urls: List[str] = self.M3U8.files\n\n def set_artist_dir(self, out_dir: Path):\n \"\"\"Set self.artist_dir, which is the subdirectory of `out_dir`\n with name `self.metadata.artist.name`\"\"\"\n self.artist_dir: Path = out_dir / self.metadata.artist.name\n self.artist_dir.mkdir(parents=True, exist_ok=True)\n\n def set_filename(self, out_dir: Path):\n \"\"\"Set self.filename, which is constructed from self.metadata.name\n and self.stream.video_quality\"\"\"\n self.filename: str = (\n f\"{self.metadata.name} [{self.stream.video_quality}].{self.codec}\"\n )\n\n def set_outfile(self):\n \"\"\"Uses self.artist_dir and self.metadata and self.filename\n to craft the pathlib.Path object, self.outfile, that is a\n reference to where the track will be written on disk.\"\"\"\n self.outfile: Path = self.artist_dir / self.filename\n\n if (self.outfile.exists()) and (self.outfile.stat().st_size > 0):\n logger.info(\n f\"Video {str(self.outfile.absolute())} already exists \"\n \"and therefore will not be overwritten\"\n )\n return\n else:\n return self.outfile\n\n def download(self, session: Session, out_dir: Path) -> Optional[Path]:\n \"\"\"Requests the HLS video files that constitute self.video_id.\n Writes HLS bytes to a temporary file, then uses FFmpeg to write the\n video data to self.outfile\"\"\"\n if session.session_id is not None:\n download_headers: Dict[str, str] = {\"sessionId\": session.session_id}\n else:\n download_headers: dict = dict()\n download_params: Dict[str, None] = {k: None for k in session.params}\n # self.outfile should already have been set by self.set_outfile()\n logger.info(\n f\"Writing video {self.video_id} to '{str(self.outfile.absolute())}'\"\n )\n\n with temporary_file() as ntf:\n for u in self.urls:\n with session.get(\n url=u, headers=download_headers, params=download_params\n ) as download_response:\n if not download_response.ok:\n logger.warning(f\"Could not download {self}\")\n else:\n ntf.write(download_response.content)\n else:\n ntf.seek(0)\n\n # will always be .mp4 because HLS\n ffmpeg.input(ntf.name, hide_banner=None, y=None).output(\n str(self.outfile.absolute()),\n vcodec=\"copy\",\n acodec=\"copy\",\n loglevel=\"quiet\",\n ).run()\n\n logger.info(\n f\"Video {self.video_id} written to '{str(self.outfile.absolute())}'\"\n )\n return self.outfile\n\n def craft_tags(self):\n \"\"\"Using the TAG_MAPPING dictionary, write the correct values of\n various metadata tags to the file. Videos are .mp4\"\"\"\n tags = dict()\n tag_map = {k: v[\"m4a\"] for k, v in TAG_MAPPING.items()}\n\n tags[tag_map[\"artist\"]] = \";\".join((a.name for a in self.metadata.artists))\n tags[tag_map[\"artists\"]] = [a.name for a in self.metadata.artists]\n tags[tag_map[\"comment\"]] = f\"https://tidal.com/browse/video/{self.video_id}\"\n tags[tag_map[\"date\"]] = str(self.metadata.release_date.date())\n tags[tag_map[\"title\"]] = self.metadata.title\n\n for tag in {\"composer\", \"director\", \"lyricist\", \"producer\"}:\n try:\n _credits_tag = \";\".join(getattr(self.contributors, tag))\n except (TypeError, AttributeError): # NoneType problems\n continue\n else:\n tags[tag_map[tag]] = _credits_tag\n\n # Have to convert to bytes the values of the tags starting with '----'\n for k, v in tags.copy().items():\n if k.startswith(\"----\"):\n if isinstance(v, str):\n tags[k]: bytes = v.encode(\"UTF-8\")\n elif isinstance(v, list):\n tags[k]: List[bytes] = [s.encode(\"UTF-8\") for s in v]\n\n self.tags: dict = {k: v for k, v in tags.items() if v is not None}\n\n def set_tags(self):\n \"\"\"Instantiate a mutagen.File instance, add self.tags to it, and\n save it to disk\"\"\"\n self.mutagen = mutagen.File(self.outfile)\n self.mutagen.clear()\n self.mutagen.update(**self.tags)\n self.mutagen.save()\n\n def get(\n self,\n session: Session,\n out_dir: Path,\n metadata: Optional[\"VideosEndpointResponseJSON\"] = None,\n ) -> Optional[str]:\n \"\"\"The main method of this class. Executes a number of other methods\n in a row:\n - self.get_metadata()\n - self.get_contributors()\n - self.get_stream()\n - self.get_m3u8()\n - self.set_urls()\n - self.set_artist_dir()\n - self.set_filename()\n - self.set_outfile()\n - self.download()\n - self.craft_tags()\n - self.set_tags()\n \"\"\"\n if metadata is None:\n self.get_metadata(session)\n else:\n self.metadata = metadata\n\n if self.metadata is None:\n return None\n\n self.get_contributors(session)\n self.get_stream(session)\n if self.stream is None:\n return None\n self.get_m3u8(session)\n self.set_urls()\n self.set_artist_dir(out_dir)\n self.set_filename(out_dir)\n outfile: Optional[Path] = self.set_outfile()\n if outfile is None:\n return None\n\n if self.download(session, out_dir) is None:\n return None\n\n self.craft_tags()\n self.set_tags()\n return str(self.outfile.absolute())\n\n def dump(self, fp=sys.stdout):\n json.dump({self.metadata.title: str(self.outfile.absolute())}, fp)\n\n def dumps(self) -> str:\n return json.dumps({self.metadata.title: str(self.outfile.absolute())})" }, { "identifier": "match_tidal_url", "path": "tidal_wave/models.py", "snippet": "def match_tidal_url(input_str: str) -> Optional[TidalResource]:\n \"\"\"Attempt to match the `input_str` to either the URL of a track or an\n album in the Tidal API service. Returns None if `input_str` matches\n neither, otherwise a subclass of TidalResource corresponding to the\n parsed input_str type\n \"\"\"\n resource_match: Optional[TidalResource] = None\n tidal_resources: Tuple[TidalResource] = (\n TidalTrack,\n TidalAlbum,\n TidalVideo,\n TidalPlaylist,\n TidalMix,\n TidalArtist,\n )\n for T in tidal_resources:\n try:\n resource_match = T(input_str)\n except ValueError as v:\n logger.debug(v)\n continue\n else:\n return resource_match" }, { "identifier": "TidalAlbum", "path": "tidal_wave/models.py", "snippet": "class TidalAlbum(TidalResource):\n \"\"\"Class representing a TIDAL album. Its main purpose is the\n __post_init__ checking process\"\"\"\n\n url: str\n\n def __post_init__(self):\n self.pattern: str = (\n r\"http(?:s)?://(?:listen\\.)?tidal\\.com/(?:browse/)?album/(\\d{5,9})(?:.*?)?\"\n )\n _id = self.match_url()\n\n if _id is None:\n raise ValueError(f\"'{self.url}' is not a valid TIDAL album URL\")\n else:\n self.tidal_id = int(_id)\n logger.info(f\"TIDAL album ID parsed from input: {self.tidal_id}\")" }, { "identifier": "TidalArtist", "path": "tidal_wave/models.py", "snippet": "class TidalArtist(TidalResource):\n \"\"\"Class representing a TIDAL artist. Its main purpose is the\n __post_init__ checking process\"\"\"\n\n url: str\n\n def __post_init__(self):\n self.pattern: str = (\n r\"http(?:s)?://(?:listen\\.)?tidal\\.com/(?:browse/)?artist/(\\d{7,9})(?:.*?)?\"\n )\n _id = self.match_url()\n\n if _id is None:\n raise ValueError(f\"'{self.url}' is not a valid TIDAL album URL\")\n else:\n self.tidal_id = int(_id)\n logger.info(f\"TIDAL album ID parsed from input: {self.tidal_id}\")" }, { "identifier": "TidalMix", "path": "tidal_wave/models.py", "snippet": "class TidalMix(TidalResource):\n url: str\n\n def __post_init__(self):\n self.pattern: str = (\n r\"http(?:s)?://(?:listen\\.)?tidal\\.com/(?:browse/)?mix/(\\w{30})(?:.*?)?\"\n )\n _id = self.match_url()\n\n if _id is None:\n raise ValueError(f\"'{self.url}' is not a valid TIDAL mix URL\")\n else:\n self.tidal_id = _id\n logger.info(f\"TIDAL mix ID parsed from input: {self.tidal_id}\")" }, { "identifier": "TidalPlaylist", "path": "tidal_wave/models.py", "snippet": "class TidalPlaylist(TidalResource):\n \"\"\"Class representing a TIDAL playlist. Its main purpose is the\n __post_init__ checking process\"\"\"\n\n url: str\n\n def __post_init__(self):\n self.pattern: str = (\n r\"http(?:s)?://(?:listen\\.)?tidal\\.com/(?:browse/)?playlist/\"\n r\"([0-9a-f]{8}\\-[0-9a-f]{4}\\-4[0-9a-f]{3}\\-[89ab][0-9a-f]{3}\\-[0-9a-f]{12})(?:.*?)?\"\n )\n\n _id = self.match_url()\n\n if _id is None:\n raise ValueError(f\"'{self.url}' is not a valid TIDAL playlist URL\")\n else:\n self.tidal_id = _id\n logger.info(f\"TIDAL playlist ID parsed from input: {self.tidal_id}\")" }, { "identifier": "TidalTrack", "path": "tidal_wave/models.py", "snippet": "class TidalTrack(TidalResource):\n \"\"\"Class representing a TIDAL track. Its main purpose is the\n __post_init__ checking process\"\"\"\n\n url: str\n\n def __post_init__(self):\n self.pattern: str = r\"http(?:s)?://(?:listen\\.)?tidal\\.com/(?:browse/)?(?:album/\\d{5,9}/)?track/(\\d{5,9})(?:.*?)?\"\n _id = self.match_url()\n\n if _id is None:\n raise ValueError(f\"'{self.url}' is not a valid TIDAL track URL\")\n else:\n self.tidal_id = int(_id)\n logger.info(f\"TIDAL track ID parsed from input: {self.tidal_id}\")" }, { "identifier": "TidalVideo", "path": "tidal_wave/models.py", "snippet": "class TidalVideo(TidalResource):\n \"\"\"Class representing a TIDAL video. Its main purpose is the\n __post_init__ checking process\"\"\"\n\n url: str\n\n def __post_init__(self):\n self.pattern: str = (\n r\"http(?:s)?://(?:listen\\.)?tidal\\.com/(?:browse/)?video/(\\d{7,9})(?:.*?)?\"\n )\n _id = self.match_url()\n\n if _id is None:\n raise ValueError(f\"'{self.url}' is not a valid TIDAL video URL\")\n else:\n self.tidal_id = int(_id)\n logger.info(f\"TIDAL video ID parsed from input: {self.tidal_id}\")" } ]
from contextlib import closing from pathlib import Path from typing import Optional, Union from .login import login, AudioFormat, LogLevel from .album import Album from .artist import Artist from .mix import Mix from .playlist import Playlist from .track import Track from .video import Video from .models import ( match_tidal_url, TidalAlbum, TidalArtist, TidalMix, TidalPlaylist, TidalTrack, TidalVideo, ) from platformdirs import user_music_path from typing_extensions import Annotated import logging import typer
17,350
app = typer.Typer() @app.command() def main( tidal_url: Annotated[ str, typer.Argument( help="The Tidal album or artist or mix or playlist or track or video to download" ), ], audio_format: Annotated[ AudioFormat, typer.Option(case_sensitive=False) ] = AudioFormat.lossless.value, output_directory: Annotated[ Path, typer.Argument( help="The parent directory under which directory(ies) of files will be written" ), ] = user_music_path(), loglevel: Annotated[ LogLevel, typer.Option(case_sensitive=False) ] = LogLevel.info.value, include_eps_singles: Annotated[ bool, typer.Option( "--include-eps-singles", help="No-op unless passing TIDAL artist. Whether to include artist's EPs and singles with albums", ), ] = False, ): logging.basicConfig( format="%(asctime)s,%(msecs)03d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d:%H:%M:%S", level=logging.getLevelName(loglevel.value), ) logger = logging.getLogger(__name__) tidal_resource: Optional[ Union[TidalAlbum, TidalMix, TidalPlaylist, TidalTrack, TidalVideo] ] = match_tidal_url(tidal_url) if tidal_resource is None: logger.critical( f"Cannot parse '{tidal_url}' as a TIDAL album, artist, mix, playlist, track, or video URL" ) raise typer.Exit(code=1) s, audio_format = login(audio_format=audio_format) if s is None: raise typer.Exit(code=1) with closing(s) as session: if isinstance(tidal_resource, TidalTrack): track = Track(track_id=tidal_resource.tidal_id) track.get( session=session, audio_format=audio_format, out_dir=output_directory ) if loglevel == LogLevel.debug: track.dump() raise typer.Exit(code=0) elif isinstance(tidal_resource, TidalAlbum): album = Album(album_id=tidal_resource.tidal_id) album.get( session=session, audio_format=audio_format, out_dir=output_directory ) if loglevel == LogLevel.debug: album.dump() raise typer.Exit(code=0) elif isinstance(tidal_resource, TidalArtist):
app = typer.Typer() @app.command() def main( tidal_url: Annotated[ str, typer.Argument( help="The Tidal album or artist or mix or playlist or track or video to download" ), ], audio_format: Annotated[ AudioFormat, typer.Option(case_sensitive=False) ] = AudioFormat.lossless.value, output_directory: Annotated[ Path, typer.Argument( help="The parent directory under which directory(ies) of files will be written" ), ] = user_music_path(), loglevel: Annotated[ LogLevel, typer.Option(case_sensitive=False) ] = LogLevel.info.value, include_eps_singles: Annotated[ bool, typer.Option( "--include-eps-singles", help="No-op unless passing TIDAL artist. Whether to include artist's EPs and singles with albums", ), ] = False, ): logging.basicConfig( format="%(asctime)s,%(msecs)03d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d:%H:%M:%S", level=logging.getLevelName(loglevel.value), ) logger = logging.getLogger(__name__) tidal_resource: Optional[ Union[TidalAlbum, TidalMix, TidalPlaylist, TidalTrack, TidalVideo] ] = match_tidal_url(tidal_url) if tidal_resource is None: logger.critical( f"Cannot parse '{tidal_url}' as a TIDAL album, artist, mix, playlist, track, or video URL" ) raise typer.Exit(code=1) s, audio_format = login(audio_format=audio_format) if s is None: raise typer.Exit(code=1) with closing(s) as session: if isinstance(tidal_resource, TidalTrack): track = Track(track_id=tidal_resource.tidal_id) track.get( session=session, audio_format=audio_format, out_dir=output_directory ) if loglevel == LogLevel.debug: track.dump() raise typer.Exit(code=0) elif isinstance(tidal_resource, TidalAlbum): album = Album(album_id=tidal_resource.tidal_id) album.get( session=session, audio_format=audio_format, out_dir=output_directory ) if loglevel == LogLevel.debug: album.dump() raise typer.Exit(code=0) elif isinstance(tidal_resource, TidalArtist):
artist = Artist(artist_id=tidal_resource.tidal_id)
4
2023-12-12 21:50:25+00:00
24k
ZS-YANG/FemtoDet-v3
mmdet/models/utils/misc.py
[ { "identifier": "SampleList", "path": "mmdet/structures/det_data_sample.py", "snippet": "class DetDataSample(BaseDataElement):\n def proposals(self) -> InstanceData:\n def proposals(self, value: InstanceData):\n def proposals(self):\n def gt_instances(self) -> InstanceData:\n def gt_instances(self, value: InstanceData):\n def gt_instances(self):\n def pred_instances(self) -> InstanceData:\n def pred_instances(self, value: InstanceData):\n def pred_instances(self):\n def pred_track_instances(self) -> InstanceData:\n def pred_track_instances(self, value: InstanceData):\n def pred_track_instances(self):\n def ignored_instances(self) -> InstanceData:\n def ignored_instances(self, value: InstanceData):\n def ignored_instances(self):\n def gt_panoptic_seg(self) -> PixelData:\n def gt_panoptic_seg(self, value: PixelData):\n def gt_panoptic_seg(self):\n def pred_panoptic_seg(self) -> PixelData:\n def pred_panoptic_seg(self, value: PixelData):\n def pred_panoptic_seg(self):\n def gt_sem_seg(self) -> PixelData:\n def gt_sem_seg(self, value: PixelData):\n def gt_sem_seg(self):\n def pred_sem_seg(self) -> PixelData:\n def pred_sem_seg(self, value: PixelData):\n def pred_sem_seg(self):" }, { "identifier": "BaseBoxes", "path": "mmdet/structures/bbox/base_boxes.py", "snippet": "class BaseBoxes(metaclass=ABCMeta):\n \"\"\"The base class for 2D box types.\n\n The functions of ``BaseBoxes`` lie in three fields:\n\n - Verify the boxes shape.\n - Support tensor-like operations.\n - Define abstract functions for 2D boxes.\n\n In ``__init__`` , ``BaseBoxes`` verifies the validity of the data shape\n w.r.t ``box_dim``. The tensor with the dimension >= 2 and the length\n of the last dimension being ``box_dim`` will be regarded as valid.\n ``BaseBoxes`` will restore them at the field ``tensor``. It's necessary\n to override ``box_dim`` in subclass to guarantee the data shape is\n correct.\n\n There are many basic tensor-like functions implemented in ``BaseBoxes``.\n In most cases, users can operate ``BaseBoxes`` instance like a normal\n tensor. To protect the validity of data shape, All tensor-like functions\n cannot modify the last dimension of ``self.tensor``.\n\n When creating a new box type, users need to inherit from ``BaseBoxes``\n and override abstract methods and specify the ``box_dim``. Then, register\n the new box type by using the decorator ``register_box_type``.\n\n Args:\n data (Tensor or np.ndarray or Sequence): The box data with shape\n (..., box_dim).\n dtype (torch.dtype, Optional): data type of boxes. Defaults to None.\n device (str or torch.device, Optional): device of boxes.\n Default to None.\n clone (bool): Whether clone ``boxes`` or not. Defaults to True.\n \"\"\"\n\n # Used to verify the last dimension length\n # Should override it in subclass.\n box_dim: int = 0\n\n def __init__(self,\n data: Union[Tensor, np.ndarray, Sequence],\n dtype: Optional[torch.dtype] = None,\n device: Optional[DeviceType] = None,\n clone: bool = True) -> None:\n if isinstance(data, (np.ndarray, Tensor, Sequence)):\n data = torch.as_tensor(data)\n else:\n raise TypeError('boxes should be Tensor, ndarray, or Sequence, ',\n f'but got {type(data)}')\n\n if device is not None or dtype is not None:\n data = data.to(dtype=dtype, device=device)\n # Clone the data to avoid potential bugs\n if clone:\n data = data.clone()\n # handle the empty input like []\n if data.numel() == 0:\n data = data.reshape((-1, self.box_dim))\n\n assert data.dim() >= 2 and data.size(-1) == self.box_dim, \\\n ('The boxes dimension must >= 2 and the length of the last '\n f'dimension must be {self.box_dim}, but got boxes with '\n f'shape {data.shape}.')\n self.tensor = data\n\n def convert_to(self, dst_type: Union[str, type]) -> 'BaseBoxes':\n \"\"\"Convert self to another box type.\n\n Args:\n dst_type (str or type): destination box type.\n\n Returns:\n :obj:`BaseBoxes`: destination box type object .\n \"\"\"\n from .box_type import convert_box_type\n return convert_box_type(self, dst_type=dst_type)\n\n def empty_boxes(self: T,\n dtype: Optional[torch.dtype] = None,\n device: Optional[DeviceType] = None) -> T:\n \"\"\"Create empty box.\n\n Args:\n dtype (torch.dtype, Optional): data type of boxes.\n device (str or torch.device, Optional): device of boxes.\n\n Returns:\n T: empty boxes with shape of (0, box_dim).\n \"\"\"\n empty_box = self.tensor.new_zeros(\n 0, self.box_dim, dtype=dtype, device=device)\n return type(self)(empty_box, clone=False)\n\n def fake_boxes(self: T,\n sizes: Tuple[int],\n fill: float = 0,\n dtype: Optional[torch.dtype] = None,\n device: Optional[DeviceType] = None) -> T:\n \"\"\"Create fake boxes with specific sizes and fill values.\n\n Args:\n sizes (Tuple[int]): The size of fake boxes. The last value must\n be equal with ``self.box_dim``.\n fill (float): filling value. Defaults to 0.\n dtype (torch.dtype, Optional): data type of boxes.\n device (str or torch.device, Optional): device of boxes.\n\n Returns:\n T: Fake boxes with shape of ``sizes``.\n \"\"\"\n fake_boxes = self.tensor.new_full(\n sizes, fill, dtype=dtype, device=device)\n return type(self)(fake_boxes, clone=False)\n\n def __getitem__(self: T, index: IndexType) -> T:\n \"\"\"Rewrite getitem to protect the last dimension shape.\"\"\"\n boxes = self.tensor\n if isinstance(index, np.ndarray):\n index = torch.as_tensor(index, device=self.device)\n if isinstance(index, Tensor) and index.dtype == torch.bool:\n assert index.dim() < boxes.dim()\n elif isinstance(index, tuple):\n assert len(index) < boxes.dim()\n # `Ellipsis`(...) is commonly used in index like [None, ...].\n # When `Ellipsis` is in index, it must be the last item.\n if Ellipsis in index:\n assert index[-1] is Ellipsis\n\n boxes = boxes[index]\n if boxes.dim() == 1:\n boxes = boxes.reshape(1, -1)\n return type(self)(boxes, clone=False)\n\n def __setitem__(self: T, index: IndexType, values: Union[Tensor, T]) -> T:\n \"\"\"Rewrite setitem to protect the last dimension shape.\"\"\"\n assert type(values) is type(self), \\\n 'The value to be set must be the same box type as self'\n values = values.tensor\n\n if isinstance(index, np.ndarray):\n index = torch.as_tensor(index, device=self.device)\n if isinstance(index, Tensor) and index.dtype == torch.bool:\n assert index.dim() < self.tensor.dim()\n elif isinstance(index, tuple):\n assert len(index) < self.tensor.dim()\n # `Ellipsis`(...) is commonly used in index like [None, ...].\n # When `Ellipsis` is in index, it must be the last item.\n if Ellipsis in index:\n assert index[-1] is Ellipsis\n\n self.tensor[index] = values\n\n def __len__(self) -> int:\n \"\"\"Return the length of self.tensor first dimension.\"\"\"\n return self.tensor.size(0)\n\n def __deepcopy__(self, memo):\n \"\"\"Only clone the ``self.tensor`` when applying deepcopy.\"\"\"\n cls = self.__class__\n other = cls.__new__(cls)\n memo[id(self)] = other\n other.tensor = self.tensor.clone()\n return other\n\n def __repr__(self) -> str:\n \"\"\"Return a strings that describes the object.\"\"\"\n return self.__class__.__name__ + '(\\n' + str(self.tensor) + ')'\n\n def new_tensor(self, *args, **kwargs) -> Tensor:\n \"\"\"Reload ``new_tensor`` from self.tensor.\"\"\"\n return self.tensor.new_tensor(*args, **kwargs)\n\n def new_full(self, *args, **kwargs) -> Tensor:\n \"\"\"Reload ``new_full`` from self.tensor.\"\"\"\n return self.tensor.new_full(*args, **kwargs)\n\n def new_empty(self, *args, **kwargs) -> Tensor:\n \"\"\"Reload ``new_empty`` from self.tensor.\"\"\"\n return self.tensor.new_empty(*args, **kwargs)\n\n def new_ones(self, *args, **kwargs) -> Tensor:\n \"\"\"Reload ``new_ones`` from self.tensor.\"\"\"\n return self.tensor.new_ones(*args, **kwargs)\n\n def new_zeros(self, *args, **kwargs) -> Tensor:\n \"\"\"Reload ``new_zeros`` from self.tensor.\"\"\"\n return self.tensor.new_zeros(*args, **kwargs)\n\n def size(self, dim: Optional[int] = None) -> Union[int, torch.Size]:\n \"\"\"Reload new_zeros from self.tensor.\"\"\"\n # self.tensor.size(dim) cannot work when dim=None.\n return self.tensor.size() if dim is None else self.tensor.size(dim)\n\n def dim(self) -> int:\n \"\"\"Reload ``dim`` from self.tensor.\"\"\"\n return self.tensor.dim()\n\n @property\n def device(self) -> torch.device:\n \"\"\"Reload ``device`` from self.tensor.\"\"\"\n return self.tensor.device\n\n @property\n def dtype(self) -> torch.dtype:\n \"\"\"Reload ``dtype`` from self.tensor.\"\"\"\n return self.tensor.dtype\n\n @property\n def shape(self) -> torch.Size:\n return self.tensor.shape\n\n def numel(self) -> int:\n \"\"\"Reload ``numel`` from self.tensor.\"\"\"\n return self.tensor.numel()\n\n def numpy(self) -> np.ndarray:\n \"\"\"Reload ``numpy`` from self.tensor.\"\"\"\n return self.tensor.numpy()\n\n def to(self: T, *args, **kwargs) -> T:\n \"\"\"Reload ``to`` from self.tensor.\"\"\"\n return type(self)(self.tensor.to(*args, **kwargs), clone=False)\n\n def cpu(self: T) -> T:\n \"\"\"Reload ``cpu`` from self.tensor.\"\"\"\n return type(self)(self.tensor.cpu(), clone=False)\n\n def cuda(self: T, *args, **kwargs) -> T:\n \"\"\"Reload ``cuda`` from self.tensor.\"\"\"\n return type(self)(self.tensor.cuda(*args, **kwargs), clone=False)\n\n def clone(self: T) -> T:\n \"\"\"Reload ``clone`` from self.tensor.\"\"\"\n return type(self)(self.tensor)\n\n def detach(self: T) -> T:\n \"\"\"Reload ``detach`` from self.tensor.\"\"\"\n return type(self)(self.tensor.detach(), clone=False)\n\n def view(self: T, *shape: Tuple[int]) -> T:\n \"\"\"Reload ``view`` from self.tensor.\"\"\"\n return type(self)(self.tensor.view(shape), clone=False)\n\n def reshape(self: T, *shape: Tuple[int]) -> T:\n \"\"\"Reload ``reshape`` from self.tensor.\"\"\"\n return type(self)(self.tensor.reshape(shape), clone=False)\n\n def expand(self: T, *sizes: Tuple[int]) -> T:\n \"\"\"Reload ``expand`` from self.tensor.\"\"\"\n return type(self)(self.tensor.expand(sizes), clone=False)\n\n def repeat(self: T, *sizes: Tuple[int]) -> T:\n \"\"\"Reload ``repeat`` from self.tensor.\"\"\"\n return type(self)(self.tensor.repeat(sizes), clone=False)\n\n def transpose(self: T, dim0: int, dim1: int) -> T:\n \"\"\"Reload ``transpose`` from self.tensor.\"\"\"\n ndim = self.tensor.dim()\n assert dim0 != -1 and dim0 != ndim - 1\n assert dim1 != -1 and dim1 != ndim - 1\n return type(self)(self.tensor.transpose(dim0, dim1), clone=False)\n\n def permute(self: T, *dims: Tuple[int]) -> T:\n \"\"\"Reload ``permute`` from self.tensor.\"\"\"\n assert dims[-1] == -1 or dims[-1] == self.tensor.dim() - 1\n return type(self)(self.tensor.permute(dims), clone=False)\n\n def split(self: T,\n split_size_or_sections: Union[int, Sequence[int]],\n dim: int = 0) -> List[T]:\n \"\"\"Reload ``split`` from self.tensor.\"\"\"\n assert dim != -1 and dim != self.tensor.dim() - 1\n boxes_list = self.tensor.split(split_size_or_sections, dim=dim)\n return [type(self)(boxes, clone=False) for boxes in boxes_list]\n\n def chunk(self: T, chunks: int, dim: int = 0) -> List[T]:\n \"\"\"Reload ``chunk`` from self.tensor.\"\"\"\n assert dim != -1 and dim != self.tensor.dim() - 1\n boxes_list = self.tensor.chunk(chunks, dim=dim)\n return [type(self)(boxes, clone=False) for boxes in boxes_list]\n\n def unbind(self: T, dim: int = 0) -> T:\n \"\"\"Reload ``unbind`` from self.tensor.\"\"\"\n assert dim != -1 and dim != self.tensor.dim() - 1\n boxes_list = self.tensor.unbind(dim=dim)\n return [type(self)(boxes, clone=False) for boxes in boxes_list]\n\n def flatten(self: T, start_dim: int = 0, end_dim: int = -2) -> T:\n \"\"\"Reload ``flatten`` from self.tensor.\"\"\"\n assert end_dim != -1 and end_dim != self.tensor.dim() - 1\n return type(self)(self.tensor.flatten(start_dim, end_dim), clone=False)\n\n def squeeze(self: T, dim: Optional[int] = None) -> T:\n \"\"\"Reload ``squeeze`` from self.tensor.\"\"\"\n boxes = self.tensor.squeeze() if dim is None else \\\n self.tensor.squeeze(dim)\n return type(self)(boxes, clone=False)\n\n def unsqueeze(self: T, dim: int) -> T:\n \"\"\"Reload ``unsqueeze`` from self.tensor.\"\"\"\n assert dim != -1 and dim != self.tensor.dim()\n return type(self)(self.tensor.unsqueeze(dim), clone=False)\n\n @classmethod\n def cat(cls: Type[T], box_list: Sequence[T], dim: int = 0) -> T:\n \"\"\"Cancatenates a box instance list into one single box instance.\n Similar to ``torch.cat``.\n\n Args:\n box_list (Sequence[T]): A sequence of box instances.\n dim (int): The dimension over which the box are concatenated.\n Defaults to 0.\n\n Returns:\n T: Concatenated box instance.\n \"\"\"\n assert isinstance(box_list, Sequence)\n if len(box_list) == 0:\n raise ValueError('box_list should not be a empty list.')\n\n assert dim != -1 and dim != box_list[0].dim() - 1\n assert all(isinstance(boxes, cls) for boxes in box_list)\n\n th_box_list = [boxes.tensor for boxes in box_list]\n return cls(torch.cat(th_box_list, dim=dim), clone=False)\n\n @classmethod\n def stack(cls: Type[T], box_list: Sequence[T], dim: int = 0) -> T:\n \"\"\"Concatenates a sequence of tensors along a new dimension. Similar to\n ``torch.stack``.\n\n Args:\n box_list (Sequence[T]): A sequence of box instances.\n dim (int): Dimension to insert. Defaults to 0.\n\n Returns:\n T: Concatenated box instance.\n \"\"\"\n assert isinstance(box_list, Sequence)\n if len(box_list) == 0:\n raise ValueError('box_list should not be a empty list.')\n\n assert dim != -1 and dim != box_list[0].dim()\n assert all(isinstance(boxes, cls) for boxes in box_list)\n\n th_box_list = [boxes.tensor for boxes in box_list]\n return cls(torch.stack(th_box_list, dim=dim), clone=False)\n\n @abstractproperty\n def centers(self) -> Tensor:\n \"\"\"Return a tensor representing the centers of boxes.\"\"\"\n pass\n\n @abstractproperty\n def areas(self) -> Tensor:\n \"\"\"Return a tensor representing the areas of boxes.\"\"\"\n pass\n\n @abstractproperty\n def widths(self) -> Tensor:\n \"\"\"Return a tensor representing the widths of boxes.\"\"\"\n pass\n\n @abstractproperty\n def heights(self) -> Tensor:\n \"\"\"Return a tensor representing the heights of boxes.\"\"\"\n pass\n\n @abstractmethod\n def flip_(self,\n img_shape: Tuple[int, int],\n direction: str = 'horizontal') -> None:\n \"\"\"Flip boxes horizontally or vertically in-place.\n\n Args:\n img_shape (Tuple[int, int]): A tuple of image height and width.\n direction (str): Flip direction, options are \"horizontal\",\n \"vertical\" and \"diagonal\". Defaults to \"horizontal\"\n \"\"\"\n pass\n\n @abstractmethod\n def translate_(self, distances: Tuple[float, float]) -> None:\n \"\"\"Translate boxes in-place.\n\n Args:\n distances (Tuple[float, float]): translate distances. The first\n is horizontal distance and the second is vertical distance.\n \"\"\"\n pass\n\n @abstractmethod\n def clip_(self, img_shape: Tuple[int, int]) -> None:\n \"\"\"Clip boxes according to the image shape in-place.\n\n Args:\n img_shape (Tuple[int, int]): A tuple of image height and width.\n \"\"\"\n pass\n\n @abstractmethod\n def rotate_(self, center: Tuple[float, float], angle: float) -> None:\n \"\"\"Rotate all boxes in-place.\n\n Args:\n center (Tuple[float, float]): Rotation origin.\n angle (float): Rotation angle represented in degrees. Positive\n values mean clockwise rotation.\n \"\"\"\n pass\n\n @abstractmethod\n def project_(self, homography_matrix: Union[Tensor, np.ndarray]) -> None:\n \"\"\"Geometric transformat boxes in-place.\n\n Args:\n homography_matrix (Tensor or np.ndarray]):\n Shape (3, 3) for geometric transformation.\n \"\"\"\n pass\n\n @abstractmethod\n def rescale_(self, scale_factor: Tuple[float, float]) -> None:\n \"\"\"Rescale boxes w.r.t. rescale_factor in-place.\n\n Note:\n Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes\n w.r.t ``scale_facotr``. The difference is that ``resize_`` only\n changes the width and the height of boxes, but ``rescale_`` also\n rescales the box centers simultaneously.\n\n Args:\n scale_factor (Tuple[float, float]): factors for scaling boxes.\n The length should be 2.\n \"\"\"\n pass\n\n @abstractmethod\n def resize_(self, scale_factor: Tuple[float, float]) -> None:\n \"\"\"Resize the box width and height w.r.t scale_factor in-place.\n\n Note:\n Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes\n w.r.t ``scale_facotr``. The difference is that ``resize_`` only\n changes the width and the height of boxes, but ``rescale_`` also\n rescales the box centers simultaneously.\n\n Args:\n scale_factor (Tuple[float, float]): factors for scaling box\n shapes. The length should be 2.\n \"\"\"\n pass\n\n @abstractmethod\n def is_inside(self,\n img_shape: Tuple[int, int],\n all_inside: bool = False,\n allowed_border: int = 0) -> BoolTensor:\n \"\"\"Find boxes inside the image.\n\n Args:\n img_shape (Tuple[int, int]): A tuple of image height and width.\n all_inside (bool): Whether the boxes are all inside the image or\n part inside the image. Defaults to False.\n allowed_border (int): Boxes that extend beyond the image shape\n boundary by more than ``allowed_border`` are considered\n \"outside\" Defaults to 0.\n Returns:\n BoolTensor: A BoolTensor indicating whether the box is inside\n the image. Assuming the original boxes have shape (m, n, box_dim),\n the output has shape (m, n).\n \"\"\"\n pass\n\n @abstractmethod\n def find_inside_points(self,\n points: Tensor,\n is_aligned: bool = False) -> BoolTensor:\n \"\"\"Find inside box points. Boxes dimension must be 2.\n\n Args:\n points (Tensor): Points coordinates. Has shape of (m, 2).\n is_aligned (bool): Whether ``points`` has been aligned with boxes\n or not. If True, the length of boxes and ``points`` should be\n the same. Defaults to False.\n\n Returns:\n BoolTensor: A BoolTensor indicating whether a point is inside\n boxes. Assuming the boxes has shape of (n, box_dim), if\n ``is_aligned`` is False. The index has shape of (m, n). If\n ``is_aligned`` is True, m should be equal to n and the index has\n shape of (m, ).\n \"\"\"\n pass\n\n @abstractstaticmethod\n def overlaps(boxes1: 'BaseBoxes',\n boxes2: 'BaseBoxes',\n mode: str = 'iou',\n is_aligned: bool = False,\n eps: float = 1e-6) -> Tensor:\n \"\"\"Calculate overlap between two set of boxes with their types\n converted to the present box type.\n\n Args:\n boxes1 (:obj:`BaseBoxes`): BaseBoxes with shape of (m, box_dim)\n or empty.\n boxes2 (:obj:`BaseBoxes`): BaseBoxes with shape of (n, box_dim)\n or empty.\n mode (str): \"iou\" (intersection over union), \"iof\" (intersection\n over foreground). Defaults to \"iou\".\n is_aligned (bool): If True, then m and n must be equal. Defaults\n to False.\n eps (float): A value added to the denominator for numerical\n stability. Defaults to 1e-6.\n\n Returns:\n Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,)\n \"\"\"\n pass\n\n @abstractstaticmethod\n def from_instance_masks(masks: MaskType) -> 'BaseBoxes':\n \"\"\"Create boxes from instance masks.\n\n Args:\n masks (:obj:`BitmapMasks` or :obj:`PolygonMasks`): BitmapMasks or\n PolygonMasks instance with length of n.\n\n Returns:\n :obj:`BaseBoxes`: Converted boxes with shape of (n, box_dim).\n \"\"\"\n pass" }, { "identifier": "get_box_type", "path": "mmdet/structures/bbox/box_type.py", "snippet": "def get_box_type(box_type: Union[str, type]) -> Tuple[str, type]:\n \"\"\"get both box type name and class.\n\n Args:\n box_type (str or type): Single box type name or class.\n\n Returns:\n Tuple[str, type]: A tuple of box type name and class.\n \"\"\"\n if isinstance(box_type, str):\n type_name = box_type.lower()\n assert type_name in box_types, \\\n f\"Box type {type_name} hasn't been registered in box_types.\"\n type_cls = box_types[type_name]\n elif issubclass(box_type, BaseBoxes):\n assert box_type in _box_type_to_name, \\\n f\"Box type {box_type} hasn't been registered in box_types.\"\n type_name = _box_type_to_name[box_type]\n type_cls = box_type\n else:\n raise KeyError('box_type must be a str or class inheriting from '\n f'BaseBoxes, but got {type(box_type)}.')\n return type_name, type_cls" }, { "identifier": "stack_boxes", "path": "mmdet/structures/bbox/transforms.py", "snippet": "def stack_boxes(data_list: List[Union[Tensor, BaseBoxes]],\n dim: int = 0) -> Union[Tensor, BaseBoxes]:\n \"\"\"Stack boxes with type of tensor or box type.\n\n Args:\n data_list (List[Union[Tensor, :obj:`BaseBoxes`]]): A list of tensors\n or box types need to be stacked.\n dim (int): The dimension over which the box are stacked.\n Defaults to 0.\n\n Returns:\n Union[Tensor, :obj`BaseBoxes`]: Stacked results.\n \"\"\"\n if data_list and isinstance(data_list[0], BaseBoxes):\n return data_list[0].stack(data_list, dim=dim)\n else:\n return torch.stack(data_list, dim=dim)" }, { "identifier": "BitmapMasks", "path": "mmdet/structures/mask/structures.py", "snippet": "class BitmapMasks(BaseInstanceMasks):\n \"\"\"This class represents masks in the form of bitmaps.\n\n Args:\n masks (ndarray): ndarray of masks in shape (N, H, W), where N is\n the number of objects.\n height (int): height of masks\n width (int): width of masks\n\n Example:\n >>> from mmdet.data_elements.mask.structures import * # NOQA\n >>> num_masks, H, W = 3, 32, 32\n >>> rng = np.random.RandomState(0)\n >>> masks = (rng.rand(num_masks, H, W) > 0.1).astype(np.int64)\n >>> self = BitmapMasks(masks, height=H, width=W)\n\n >>> # demo crop_and_resize\n >>> num_boxes = 5\n >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes)\n >>> out_shape = (14, 14)\n >>> inds = torch.randint(0, len(self), size=(num_boxes,))\n >>> device = 'cpu'\n >>> interpolation = 'bilinear'\n >>> new = self.crop_and_resize(\n ... bboxes, out_shape, inds, device, interpolation)\n >>> assert len(new) == num_boxes\n >>> assert new.height, new.width == out_shape\n \"\"\"\n\n def __init__(self, masks, height, width):\n self.height = height\n self.width = width\n if len(masks) == 0:\n self.masks = np.empty((0, self.height, self.width), dtype=np.uint8)\n else:\n assert isinstance(masks, (list, np.ndarray))\n if isinstance(masks, list):\n assert isinstance(masks[0], np.ndarray)\n assert masks[0].ndim == 2 # (H, W)\n else:\n assert masks.ndim == 3 # (N, H, W)\n\n self.masks = np.stack(masks).reshape(-1, height, width)\n assert self.masks.shape[1] == self.height\n assert self.masks.shape[2] == self.width\n\n def __getitem__(self, index):\n \"\"\"Index the BitmapMask.\n\n Args:\n index (int | ndarray): Indices in the format of integer or ndarray.\n\n Returns:\n :obj:`BitmapMasks`: Indexed bitmap masks.\n \"\"\"\n masks = self.masks[index].reshape(-1, self.height, self.width)\n return BitmapMasks(masks, self.height, self.width)\n\n def __iter__(self):\n return iter(self.masks)\n\n def __repr__(self):\n s = self.__class__.__name__ + '('\n s += f'num_masks={len(self.masks)}, '\n s += f'height={self.height}, '\n s += f'width={self.width})'\n return s\n\n def __len__(self):\n \"\"\"Number of masks.\"\"\"\n return len(self.masks)\n\n def rescale(self, scale, interpolation='nearest'):\n \"\"\"See :func:`BaseInstanceMasks.rescale`.\"\"\"\n if len(self.masks) == 0:\n new_w, new_h = mmcv.rescale_size((self.width, self.height), scale)\n rescaled_masks = np.empty((0, new_h, new_w), dtype=np.uint8)\n else:\n rescaled_masks = np.stack([\n mmcv.imrescale(mask, scale, interpolation=interpolation)\n for mask in self.masks\n ])\n height, width = rescaled_masks.shape[1:]\n return BitmapMasks(rescaled_masks, height, width)\n\n def resize(self, out_shape, interpolation='nearest'):\n \"\"\"See :func:`BaseInstanceMasks.resize`.\"\"\"\n if len(self.masks) == 0:\n resized_masks = np.empty((0, *out_shape), dtype=np.uint8)\n else:\n resized_masks = np.stack([\n mmcv.imresize(\n mask, out_shape[::-1], interpolation=interpolation)\n for mask in self.masks\n ])\n return BitmapMasks(resized_masks, *out_shape)\n\n def flip(self, flip_direction='horizontal'):\n \"\"\"See :func:`BaseInstanceMasks.flip`.\"\"\"\n assert flip_direction in ('horizontal', 'vertical', 'diagonal')\n\n if len(self.masks) == 0:\n flipped_masks = self.masks\n else:\n flipped_masks = np.stack([\n mmcv.imflip(mask, direction=flip_direction)\n for mask in self.masks\n ])\n return BitmapMasks(flipped_masks, self.height, self.width)\n\n def pad(self, out_shape, pad_val=0):\n \"\"\"See :func:`BaseInstanceMasks.pad`.\"\"\"\n if len(self.masks) == 0:\n padded_masks = np.empty((0, *out_shape), dtype=np.uint8)\n else:\n padded_masks = np.stack([\n mmcv.impad(mask, shape=out_shape, pad_val=pad_val)\n for mask in self.masks\n ])\n return BitmapMasks(padded_masks, *out_shape)\n\n def crop(self, bbox):\n \"\"\"See :func:`BaseInstanceMasks.crop`.\"\"\"\n assert isinstance(bbox, np.ndarray)\n assert bbox.ndim == 1\n\n # clip the boundary\n bbox = bbox.copy()\n bbox[0::2] = np.clip(bbox[0::2], 0, self.width)\n bbox[1::2] = np.clip(bbox[1::2], 0, self.height)\n x1, y1, x2, y2 = bbox\n w = np.maximum(x2 - x1, 1)\n h = np.maximum(y2 - y1, 1)\n\n if len(self.masks) == 0:\n cropped_masks = np.empty((0, h, w), dtype=np.uint8)\n else:\n cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w]\n return BitmapMasks(cropped_masks, h, w)\n\n def crop_and_resize(self,\n bboxes,\n out_shape,\n inds,\n device='cpu',\n interpolation='bilinear',\n binarize=True):\n \"\"\"See :func:`BaseInstanceMasks.crop_and_resize`.\"\"\"\n if len(self.masks) == 0:\n empty_masks = np.empty((0, *out_shape), dtype=np.uint8)\n return BitmapMasks(empty_masks, *out_shape)\n\n # convert bboxes to tensor\n if isinstance(bboxes, np.ndarray):\n bboxes = torch.from_numpy(bboxes).to(device=device)\n if isinstance(inds, np.ndarray):\n inds = torch.from_numpy(inds).to(device=device)\n\n num_bbox = bboxes.shape[0]\n fake_inds = torch.arange(\n num_bbox, device=device).to(dtype=bboxes.dtype)[:, None]\n rois = torch.cat([fake_inds, bboxes], dim=1) # Nx5\n rois = rois.to(device=device)\n if num_bbox > 0:\n gt_masks_th = torch.from_numpy(self.masks).to(device).index_select(\n 0, inds).to(dtype=rois.dtype)\n targets = roi_align(gt_masks_th[:, None, :, :], rois, out_shape,\n 1.0, 0, 'avg', True).squeeze(1)\n if binarize:\n resized_masks = (targets >= 0.5).cpu().numpy()\n else:\n resized_masks = targets.cpu().numpy()\n else:\n resized_masks = []\n return BitmapMasks(resized_masks, *out_shape)\n\n def expand(self, expanded_h, expanded_w, top, left):\n \"\"\"See :func:`BaseInstanceMasks.expand`.\"\"\"\n if len(self.masks) == 0:\n expanded_mask = np.empty((0, expanded_h, expanded_w),\n dtype=np.uint8)\n else:\n expanded_mask = np.zeros((len(self), expanded_h, expanded_w),\n dtype=np.uint8)\n expanded_mask[:, top:top + self.height,\n left:left + self.width] = self.masks\n return BitmapMasks(expanded_mask, expanded_h, expanded_w)\n\n def translate(self,\n out_shape,\n offset,\n direction='horizontal',\n border_value=0,\n interpolation='bilinear'):\n \"\"\"Translate the BitmapMasks.\n\n Args:\n out_shape (tuple[int]): Shape for output mask, format (h, w).\n offset (int | float): The offset for translate.\n direction (str): The translate direction, either \"horizontal\"\n or \"vertical\".\n border_value (int | float): Border value. Default 0 for masks.\n interpolation (str): Same as :func:`mmcv.imtranslate`.\n\n Returns:\n BitmapMasks: Translated BitmapMasks.\n\n Example:\n >>> from mmdet.data_elements.mask.structures import BitmapMasks\n >>> self = BitmapMasks.random(dtype=np.uint8)\n >>> out_shape = (32, 32)\n >>> offset = 4\n >>> direction = 'horizontal'\n >>> border_value = 0\n >>> interpolation = 'bilinear'\n >>> # Note, There seem to be issues when:\n >>> # * the mask dtype is not supported by cv2.AffineWarp\n >>> new = self.translate(out_shape, offset, direction,\n >>> border_value, interpolation)\n >>> assert len(new) == len(self)\n >>> assert new.height, new.width == out_shape\n \"\"\"\n if len(self.masks) == 0:\n translated_masks = np.empty((0, *out_shape), dtype=np.uint8)\n else:\n masks = self.masks\n if masks.shape[-2:] != out_shape:\n empty_masks = np.zeros((masks.shape[0], *out_shape),\n dtype=masks.dtype)\n min_h = min(out_shape[0], masks.shape[1])\n min_w = min(out_shape[1], masks.shape[2])\n empty_masks[:, :min_h, :min_w] = masks[:, :min_h, :min_w]\n masks = empty_masks\n translated_masks = mmcv.imtranslate(\n masks.transpose((1, 2, 0)),\n offset,\n direction,\n border_value=border_value,\n interpolation=interpolation)\n if translated_masks.ndim == 2:\n translated_masks = translated_masks[:, :, None]\n translated_masks = translated_masks.transpose(\n (2, 0, 1)).astype(self.masks.dtype)\n return BitmapMasks(translated_masks, *out_shape)\n\n def shear(self,\n out_shape,\n magnitude,\n direction='horizontal',\n border_value=0,\n interpolation='bilinear'):\n \"\"\"Shear the BitmapMasks.\n\n Args:\n out_shape (tuple[int]): Shape for output mask, format (h, w).\n magnitude (int | float): The magnitude used for shear.\n direction (str): The shear direction, either \"horizontal\"\n or \"vertical\".\n border_value (int | tuple[int]): Value used in case of a\n constant border.\n interpolation (str): Same as in :func:`mmcv.imshear`.\n\n Returns:\n BitmapMasks: The sheared masks.\n \"\"\"\n if len(self.masks) == 0:\n sheared_masks = np.empty((0, *out_shape), dtype=np.uint8)\n else:\n sheared_masks = mmcv.imshear(\n self.masks.transpose((1, 2, 0)),\n magnitude,\n direction,\n border_value=border_value,\n interpolation=interpolation)\n if sheared_masks.ndim == 2:\n sheared_masks = sheared_masks[:, :, None]\n sheared_masks = sheared_masks.transpose(\n (2, 0, 1)).astype(self.masks.dtype)\n return BitmapMasks(sheared_masks, *out_shape)\n\n def rotate(self,\n out_shape,\n angle,\n center=None,\n scale=1.0,\n border_value=0,\n interpolation='bilinear'):\n \"\"\"Rotate the BitmapMasks.\n\n Args:\n out_shape (tuple[int]): Shape for output mask, format (h, w).\n angle (int | float): Rotation angle in degrees. Positive values\n mean counter-clockwise rotation.\n center (tuple[float], optional): Center point (w, h) of the\n rotation in source image. If not specified, the center of\n the image will be used.\n scale (int | float): Isotropic scale factor.\n border_value (int | float): Border value. Default 0 for masks.\n interpolation (str): Same as in :func:`mmcv.imrotate`.\n\n Returns:\n BitmapMasks: Rotated BitmapMasks.\n \"\"\"\n if len(self.masks) == 0:\n rotated_masks = np.empty((0, *out_shape), dtype=self.masks.dtype)\n else:\n rotated_masks = mmcv.imrotate(\n self.masks.transpose((1, 2, 0)),\n angle,\n center=center,\n scale=scale,\n border_value=border_value,\n interpolation=interpolation)\n if rotated_masks.ndim == 2:\n # case when only one mask, (h, w)\n rotated_masks = rotated_masks[:, :, None] # (h, w, 1)\n rotated_masks = rotated_masks.transpose(\n (2, 0, 1)).astype(self.masks.dtype)\n return BitmapMasks(rotated_masks, *out_shape)\n\n @property\n def areas(self):\n \"\"\"See :py:attr:`BaseInstanceMasks.areas`.\"\"\"\n return self.masks.sum((1, 2))\n\n def to_ndarray(self):\n \"\"\"See :func:`BaseInstanceMasks.to_ndarray`.\"\"\"\n return self.masks\n\n def to_tensor(self, dtype, device):\n \"\"\"See :func:`BaseInstanceMasks.to_tensor`.\"\"\"\n return torch.tensor(self.masks, dtype=dtype, device=device)\n\n @classmethod\n def random(cls,\n num_masks=3,\n height=32,\n width=32,\n dtype=np.uint8,\n rng=None):\n \"\"\"Generate random bitmap masks for demo / testing purposes.\n\n Example:\n >>> from mmdet.data_elements.mask.structures import BitmapMasks\n >>> self = BitmapMasks.random()\n >>> print('self = {}'.format(self))\n self = BitmapMasks(num_masks=3, height=32, width=32)\n \"\"\"\n from mmdet.utils.util_random import ensure_rng\n rng = ensure_rng(rng)\n masks = (rng.rand(num_masks, height, width) > 0.1).astype(dtype)\n self = cls(masks, height=height, width=width)\n return self\n\n @classmethod\n def cat(cls: Type[T], masks: Sequence[T]) -> T:\n \"\"\"Concatenate a sequence of masks into one single mask instance.\n\n Args:\n masks (Sequence[BitmapMasks]): A sequence of mask instances.\n\n Returns:\n BitmapMasks: Concatenated mask instance.\n \"\"\"\n assert isinstance(masks, Sequence)\n if len(masks) == 0:\n raise ValueError('masks should not be an empty list.')\n assert all(isinstance(m, cls) for m in masks)\n\n mask_array = np.concatenate([m.masks for m in masks], axis=0)\n return cls(mask_array, *mask_array.shape[1:])" }, { "identifier": "PolygonMasks", "path": "mmdet/structures/mask/structures.py", "snippet": "class PolygonMasks(BaseInstanceMasks):\n \"\"\"This class represents masks in the form of polygons.\n\n Polygons is a list of three levels. The first level of the list\n corresponds to objects, the second level to the polys that compose the\n object, the third level to the poly coordinates\n\n Args:\n masks (list[list[ndarray]]): The first level of the list\n corresponds to objects, the second level to the polys that\n compose the object, the third level to the poly coordinates\n height (int): height of masks\n width (int): width of masks\n\n Example:\n >>> from mmdet.data_elements.mask.structures import * # NOQA\n >>> masks = [\n >>> [ np.array([0, 0, 10, 0, 10, 10., 0, 10, 0, 0]) ]\n >>> ]\n >>> height, width = 16, 16\n >>> self = PolygonMasks(masks, height, width)\n\n >>> # demo translate\n >>> new = self.translate((16, 16), 4., direction='horizontal')\n >>> assert np.all(new.masks[0][0][1::2] == masks[0][0][1::2])\n >>> assert np.all(new.masks[0][0][0::2] == masks[0][0][0::2] + 4)\n\n >>> # demo crop_and_resize\n >>> num_boxes = 3\n >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes)\n >>> out_shape = (16, 16)\n >>> inds = torch.randint(0, len(self), size=(num_boxes,))\n >>> device = 'cpu'\n >>> interpolation = 'bilinear'\n >>> new = self.crop_and_resize(\n ... bboxes, out_shape, inds, device, interpolation)\n >>> assert len(new) == num_boxes\n >>> assert new.height, new.width == out_shape\n \"\"\"\n\n def __init__(self, masks, height, width):\n assert isinstance(masks, list)\n if len(masks) > 0:\n assert isinstance(masks[0], list)\n assert isinstance(masks[0][0], np.ndarray)\n\n self.height = height\n self.width = width\n self.masks = masks\n\n def __getitem__(self, index):\n \"\"\"Index the polygon masks.\n\n Args:\n index (ndarray | List): The indices.\n\n Returns:\n :obj:`PolygonMasks`: The indexed polygon masks.\n \"\"\"\n if isinstance(index, np.ndarray):\n if index.dtype == bool:\n index = np.where(index)[0].tolist()\n else:\n index = index.tolist()\n if isinstance(index, list):\n masks = [self.masks[i] for i in index]\n else:\n try:\n masks = self.masks[index]\n except Exception:\n raise ValueError(\n f'Unsupported input of type {type(index)} for indexing!')\n if len(masks) and isinstance(masks[0], np.ndarray):\n masks = [masks] # ensure a list of three levels\n return PolygonMasks(masks, self.height, self.width)\n\n def __iter__(self):\n return iter(self.masks)\n\n def __repr__(self):\n s = self.__class__.__name__ + '('\n s += f'num_masks={len(self.masks)}, '\n s += f'height={self.height}, '\n s += f'width={self.width})'\n return s\n\n def __len__(self):\n \"\"\"Number of masks.\"\"\"\n return len(self.masks)\n\n def rescale(self, scale, interpolation=None):\n \"\"\"see :func:`BaseInstanceMasks.rescale`\"\"\"\n new_w, new_h = mmcv.rescale_size((self.width, self.height), scale)\n if len(self.masks) == 0:\n rescaled_masks = PolygonMasks([], new_h, new_w)\n else:\n rescaled_masks = self.resize((new_h, new_w))\n return rescaled_masks\n\n def resize(self, out_shape, interpolation=None):\n \"\"\"see :func:`BaseInstanceMasks.resize`\"\"\"\n if len(self.masks) == 0:\n resized_masks = PolygonMasks([], *out_shape)\n else:\n h_scale = out_shape[0] / self.height\n w_scale = out_shape[1] / self.width\n resized_masks = []\n for poly_per_obj in self.masks:\n resized_poly = []\n for p in poly_per_obj:\n p = p.copy()\n p[0::2] = p[0::2] * w_scale\n p[1::2] = p[1::2] * h_scale\n resized_poly.append(p)\n resized_masks.append(resized_poly)\n resized_masks = PolygonMasks(resized_masks, *out_shape)\n return resized_masks\n\n def flip(self, flip_direction='horizontal'):\n \"\"\"see :func:`BaseInstanceMasks.flip`\"\"\"\n assert flip_direction in ('horizontal', 'vertical', 'diagonal')\n if len(self.masks) == 0:\n flipped_masks = PolygonMasks([], self.height, self.width)\n else:\n flipped_masks = []\n for poly_per_obj in self.masks:\n flipped_poly_per_obj = []\n for p in poly_per_obj:\n p = p.copy()\n if flip_direction == 'horizontal':\n p[0::2] = self.width - p[0::2]\n elif flip_direction == 'vertical':\n p[1::2] = self.height - p[1::2]\n else:\n p[0::2] = self.width - p[0::2]\n p[1::2] = self.height - p[1::2]\n flipped_poly_per_obj.append(p)\n flipped_masks.append(flipped_poly_per_obj)\n flipped_masks = PolygonMasks(flipped_masks, self.height,\n self.width)\n return flipped_masks\n\n def crop(self, bbox):\n \"\"\"see :func:`BaseInstanceMasks.crop`\"\"\"\n assert isinstance(bbox, np.ndarray)\n assert bbox.ndim == 1\n\n # clip the boundary\n bbox = bbox.copy()\n bbox[0::2] = np.clip(bbox[0::2], 0, self.width)\n bbox[1::2] = np.clip(bbox[1::2], 0, self.height)\n x1, y1, x2, y2 = bbox\n w = np.maximum(x2 - x1, 1)\n h = np.maximum(y2 - y1, 1)\n\n if len(self.masks) == 0:\n cropped_masks = PolygonMasks([], h, w)\n else:\n # reference: https://github.com/facebookresearch/fvcore/blob/main/fvcore/transforms/transform.py # noqa\n crop_box = geometry.box(x1, y1, x2, y2).buffer(0.0)\n cropped_masks = []\n # suppress shapely warnings util it incorporates GEOS>=3.11.2\n # reference: https://github.com/shapely/shapely/issues/1345\n initial_settings = np.seterr()\n np.seterr(invalid='ignore')\n for poly_per_obj in self.masks:\n cropped_poly_per_obj = []\n for p in poly_per_obj:\n p = p.copy()\n p = geometry.Polygon(p.reshape(-1, 2)).buffer(0.0)\n # polygon must be valid to perform intersection.\n if not p.is_valid:\n continue\n cropped = p.intersection(crop_box)\n if cropped.is_empty:\n continue\n if isinstance(cropped,\n geometry.collection.BaseMultipartGeometry):\n cropped = cropped.geoms\n else:\n cropped = [cropped]\n # one polygon may be cropped to multiple ones\n for poly in cropped:\n # ignore lines or points\n if not isinstance(\n poly, geometry.Polygon) or not poly.is_valid:\n continue\n coords = np.asarray(poly.exterior.coords)\n # remove an extra identical vertex at the end\n coords = coords[:-1]\n coords[:, 0] -= x1\n coords[:, 1] -= y1\n cropped_poly_per_obj.append(coords.reshape(-1))\n # a dummy polygon to avoid misalignment between masks and boxes\n if len(cropped_poly_per_obj) == 0:\n cropped_poly_per_obj = [np.array([0, 0, 0, 0, 0, 0])]\n cropped_masks.append(cropped_poly_per_obj)\n np.seterr(**initial_settings)\n cropped_masks = PolygonMasks(cropped_masks, h, w)\n return cropped_masks\n\n def pad(self, out_shape, pad_val=0):\n \"\"\"padding has no effect on polygons`\"\"\"\n return PolygonMasks(self.masks, *out_shape)\n\n def expand(self, *args, **kwargs):\n \"\"\"TODO: Add expand for polygon\"\"\"\n raise NotImplementedError\n\n def crop_and_resize(self,\n bboxes,\n out_shape,\n inds,\n device='cpu',\n interpolation='bilinear',\n binarize=True):\n \"\"\"see :func:`BaseInstanceMasks.crop_and_resize`\"\"\"\n out_h, out_w = out_shape\n if len(self.masks) == 0:\n return PolygonMasks([], out_h, out_w)\n\n if not binarize:\n raise ValueError('Polygons are always binary, '\n 'setting binarize=False is unsupported')\n\n resized_masks = []\n for i in range(len(bboxes)):\n mask = self.masks[inds[i]]\n bbox = bboxes[i, :]\n x1, y1, x2, y2 = bbox\n w = np.maximum(x2 - x1, 1)\n h = np.maximum(y2 - y1, 1)\n h_scale = out_h / max(h, 0.1) # avoid too large scale\n w_scale = out_w / max(w, 0.1)\n\n resized_mask = []\n for p in mask:\n p = p.copy()\n # crop\n # pycocotools will clip the boundary\n p[0::2] = p[0::2] - bbox[0]\n p[1::2] = p[1::2] - bbox[1]\n\n # resize\n p[0::2] = p[0::2] * w_scale\n p[1::2] = p[1::2] * h_scale\n resized_mask.append(p)\n resized_masks.append(resized_mask)\n return PolygonMasks(resized_masks, *out_shape)\n\n def translate(self,\n out_shape,\n offset,\n direction='horizontal',\n border_value=None,\n interpolation=None):\n \"\"\"Translate the PolygonMasks.\n\n Example:\n >>> self = PolygonMasks.random(dtype=np.int64)\n >>> out_shape = (self.height, self.width)\n >>> new = self.translate(out_shape, 4., direction='horizontal')\n >>> assert np.all(new.masks[0][0][1::2] == self.masks[0][0][1::2])\n >>> assert np.all(new.masks[0][0][0::2] == self.masks[0][0][0::2] + 4) # noqa: E501\n \"\"\"\n assert border_value is None or border_value == 0, \\\n 'Here border_value is not '\\\n f'used, and defaultly should be None or 0. got {border_value}.'\n if len(self.masks) == 0:\n translated_masks = PolygonMasks([], *out_shape)\n else:\n translated_masks = []\n for poly_per_obj in self.masks:\n translated_poly_per_obj = []\n for p in poly_per_obj:\n p = p.copy()\n if direction == 'horizontal':\n p[0::2] = np.clip(p[0::2] + offset, 0, out_shape[1])\n elif direction == 'vertical':\n p[1::2] = np.clip(p[1::2] + offset, 0, out_shape[0])\n translated_poly_per_obj.append(p)\n translated_masks.append(translated_poly_per_obj)\n translated_masks = PolygonMasks(translated_masks, *out_shape)\n return translated_masks\n\n def shear(self,\n out_shape,\n magnitude,\n direction='horizontal',\n border_value=0,\n interpolation='bilinear'):\n \"\"\"See :func:`BaseInstanceMasks.shear`.\"\"\"\n if len(self.masks) == 0:\n sheared_masks = PolygonMasks([], *out_shape)\n else:\n sheared_masks = []\n if direction == 'horizontal':\n shear_matrix = np.stack([[1, magnitude],\n [0, 1]]).astype(np.float32)\n elif direction == 'vertical':\n shear_matrix = np.stack([[1, 0], [magnitude,\n 1]]).astype(np.float32)\n for poly_per_obj in self.masks:\n sheared_poly = []\n for p in poly_per_obj:\n p = np.stack([p[0::2], p[1::2]], axis=0) # [2, n]\n new_coords = np.matmul(shear_matrix, p) # [2, n]\n new_coords[0, :] = np.clip(new_coords[0, :], 0,\n out_shape[1])\n new_coords[1, :] = np.clip(new_coords[1, :], 0,\n out_shape[0])\n sheared_poly.append(\n new_coords.transpose((1, 0)).reshape(-1))\n sheared_masks.append(sheared_poly)\n sheared_masks = PolygonMasks(sheared_masks, *out_shape)\n return sheared_masks\n\n def rotate(self,\n out_shape,\n angle,\n center=None,\n scale=1.0,\n border_value=0,\n interpolation='bilinear'):\n \"\"\"See :func:`BaseInstanceMasks.rotate`.\"\"\"\n if len(self.masks) == 0:\n rotated_masks = PolygonMasks([], *out_shape)\n else:\n rotated_masks = []\n rotate_matrix = cv2.getRotationMatrix2D(center, -angle, scale)\n for poly_per_obj in self.masks:\n rotated_poly = []\n for p in poly_per_obj:\n p = p.copy()\n coords = np.stack([p[0::2], p[1::2]], axis=1) # [n, 2]\n # pad 1 to convert from format [x, y] to homogeneous\n # coordinates format [x, y, 1]\n coords = np.concatenate(\n (coords, np.ones((coords.shape[0], 1), coords.dtype)),\n axis=1) # [n, 3]\n rotated_coords = np.matmul(\n rotate_matrix[None, :, :],\n coords[:, :, None])[..., 0] # [n, 2, 1] -> [n, 2]\n rotated_coords[:, 0] = np.clip(rotated_coords[:, 0], 0,\n out_shape[1])\n rotated_coords[:, 1] = np.clip(rotated_coords[:, 1], 0,\n out_shape[0])\n rotated_poly.append(rotated_coords.reshape(-1))\n rotated_masks.append(rotated_poly)\n rotated_masks = PolygonMasks(rotated_masks, *out_shape)\n return rotated_masks\n\n def to_bitmap(self):\n \"\"\"convert polygon masks to bitmap masks.\"\"\"\n bitmap_masks = self.to_ndarray()\n return BitmapMasks(bitmap_masks, self.height, self.width)\n\n @property\n def areas(self):\n \"\"\"Compute areas of masks.\n\n This func is modified from `detectron2\n <https://github.com/facebookresearch/detectron2/blob/ffff8acc35ea88ad1cb1806ab0f00b4c1c5dbfd9/detectron2/structures/masks.py#L387>`_.\n The function only works with Polygons using the shoelace formula.\n\n Return:\n ndarray: areas of each instance\n \"\"\" # noqa: W501\n area = []\n for polygons_per_obj in self.masks:\n area_per_obj = 0\n for p in polygons_per_obj:\n area_per_obj += self._polygon_area(p[0::2], p[1::2])\n area.append(area_per_obj)\n return np.asarray(area)\n\n def _polygon_area(self, x, y):\n \"\"\"Compute the area of a component of a polygon.\n\n Using the shoelace formula:\n https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates\n\n Args:\n x (ndarray): x coordinates of the component\n y (ndarray): y coordinates of the component\n\n Return:\n float: the are of the component\n \"\"\" # noqa: 501\n return 0.5 * np.abs(\n np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))\n\n def to_ndarray(self):\n \"\"\"Convert masks to the format of ndarray.\"\"\"\n if len(self.masks) == 0:\n return np.empty((0, self.height, self.width), dtype=np.uint8)\n bitmap_masks = []\n for poly_per_obj in self.masks:\n bitmap_masks.append(\n polygon_to_bitmap(poly_per_obj, self.height, self.width))\n return np.stack(bitmap_masks)\n\n def to_tensor(self, dtype, device):\n \"\"\"See :func:`BaseInstanceMasks.to_tensor`.\"\"\"\n if len(self.masks) == 0:\n return torch.empty((0, self.height, self.width),\n dtype=dtype,\n device=device)\n ndarray_masks = self.to_ndarray()\n return torch.tensor(ndarray_masks, dtype=dtype, device=device)\n\n @classmethod\n def random(cls,\n num_masks=3,\n height=32,\n width=32,\n n_verts=5,\n dtype=np.float32,\n rng=None):\n \"\"\"Generate random polygon masks for demo / testing purposes.\n\n Adapted from [1]_\n\n References:\n .. [1] https://gitlab.kitware.com/computer-vision/kwimage/-/blob/928cae35ca8/kwimage/structs/polygon.py#L379 # noqa: E501\n\n Example:\n >>> from mmdet.data_elements.mask.structures import PolygonMasks\n >>> self = PolygonMasks.random()\n >>> print('self = {}'.format(self))\n \"\"\"\n from mmdet.utils.util_random import ensure_rng\n rng = ensure_rng(rng)\n\n def _gen_polygon(n, irregularity, spikeyness):\n \"\"\"Creates the polygon by sampling points on a circle around the\n centre. Random noise is added by varying the angular spacing\n between sequential points, and by varying the radial distance of\n each point from the centre.\n\n Based on original code by Mike Ounsworth\n\n Args:\n n (int): number of vertices\n irregularity (float): [0,1] indicating how much variance there\n is in the angular spacing of vertices. [0,1] will map to\n [0, 2pi/numberOfVerts]\n spikeyness (float): [0,1] indicating how much variance there is\n in each vertex from the circle of radius aveRadius. [0,1]\n will map to [0, aveRadius]\n\n Returns:\n a list of vertices, in CCW order.\n \"\"\"\n from scipy.stats import truncnorm\n\n # Generate around the unit circle\n cx, cy = (0.0, 0.0)\n radius = 1\n\n tau = np.pi * 2\n\n irregularity = np.clip(irregularity, 0, 1) * 2 * np.pi / n\n spikeyness = np.clip(spikeyness, 1e-9, 1)\n\n # generate n angle steps\n lower = (tau / n) - irregularity\n upper = (tau / n) + irregularity\n angle_steps = rng.uniform(lower, upper, n)\n\n # normalize the steps so that point 0 and point n+1 are the same\n k = angle_steps.sum() / (2 * np.pi)\n angles = (angle_steps / k).cumsum() + rng.uniform(0, tau)\n\n # Convert high and low values to be wrt the standard normal range\n # https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.truncnorm.html\n low = 0\n high = 2 * radius\n mean = radius\n std = spikeyness\n a = (low - mean) / std\n b = (high - mean) / std\n tnorm = truncnorm(a=a, b=b, loc=mean, scale=std)\n\n # now generate the points\n radii = tnorm.rvs(n, random_state=rng)\n x_pts = cx + radii * np.cos(angles)\n y_pts = cy + radii * np.sin(angles)\n\n points = np.hstack([x_pts[:, None], y_pts[:, None]])\n\n # Scale to 0-1 space\n points = points - points.min(axis=0)\n points = points / points.max(axis=0)\n\n # Randomly place within 0-1 space\n points = points * (rng.rand() * .8 + .2)\n min_pt = points.min(axis=0)\n max_pt = points.max(axis=0)\n\n high = (1 - max_pt)\n low = (0 - min_pt)\n offset = (rng.rand(2) * (high - low)) + low\n points = points + offset\n return points\n\n def _order_vertices(verts):\n \"\"\"\n References:\n https://stackoverflow.com/questions/1709283/how-can-i-sort-a-coordinate-list-for-a-rectangle-counterclockwise\n \"\"\"\n mlat = verts.T[0].sum() / len(verts)\n mlng = verts.T[1].sum() / len(verts)\n\n tau = np.pi * 2\n angle = (np.arctan2(mlat - verts.T[0], verts.T[1] - mlng) +\n tau) % tau\n sortx = angle.argsort()\n verts = verts.take(sortx, axis=0)\n return verts\n\n # Generate a random exterior for each requested mask\n masks = []\n for _ in range(num_masks):\n exterior = _order_vertices(_gen_polygon(n_verts, 0.9, 0.9))\n exterior = (exterior * [(width, height)]).astype(dtype)\n masks.append([exterior.ravel()])\n\n self = cls(masks, height, width)\n return self\n\n @classmethod\n def cat(cls: Type[T], masks: Sequence[T]) -> T:\n \"\"\"Concatenate a sequence of masks into one single mask instance.\n\n Args:\n masks (Sequence[PolygonMasks]): A sequence of mask instances.\n\n Returns:\n PolygonMasks: Concatenated mask instance.\n \"\"\"\n assert isinstance(masks, Sequence)\n if len(masks) == 0:\n raise ValueError('masks should not be an empty list.')\n assert all(isinstance(m, cls) for m in masks)\n\n mask_list = list(itertools.chain(*[m.masks for m in masks]))\n return cls(mask_list, masks[0].height, masks[0].width)" }, { "identifier": "OptInstanceList", "path": "mmdet/utils/typing_utils.py", "snippet": "" } ]
from functools import partial from typing import List, Optional, Sequence, Tuple, Union from mmengine.structures import InstanceData from mmengine.utils import digit_version from six.moves import map, zip from torch import Tensor from torch.autograd import Function from torch.nn import functional as F from mmdet.structures import SampleList from mmdet.structures.bbox import BaseBoxes, get_box_type, stack_boxes from mmdet.structures.mask import BitmapMasks, PolygonMasks from mmdet.utils import OptInstanceList import numpy as np import torch
17,513
def empty_instances(batch_img_metas: List[dict], device: torch.device, task_type: str, instance_results: OptInstanceList = None, mask_thr_binary: Union[int, float] = 0, box_type: Union[str, type] = 'hbox', use_box_type: bool = False, num_classes: int = 80, score_per_cls: bool = False) -> List[InstanceData]: """Handle predicted instances when RoI is empty. Note: If ``instance_results`` is not None, it will be modified in place internally, and then return ``instance_results`` Args: batch_img_metas (list[dict]): List of image information. device (torch.device): Device of tensor. task_type (str): Expected returned task type. it currently supports bbox and mask. instance_results (list[:obj:`InstanceData`]): List of instance results. mask_thr_binary (int, float): mask binarization threshold. Defaults to 0. box_type (str or type): The empty box type. Defaults to `hbox`. use_box_type (bool): Whether to warp boxes with the box type. Defaults to False. num_classes (int): num_classes of bbox_head. Defaults to 80. score_per_cls (bool): Whether to generate classwise score for the empty instance. ``score_per_cls`` will be True when the model needs to produce raw results without nms. Defaults to False. Returns: list[:obj:`InstanceData`]: Detection results of each image """ assert task_type in ('bbox', 'mask'), 'Only support bbox and mask,' \ f' but got {task_type}' if instance_results is not None: assert len(instance_results) == len(batch_img_metas) results_list = [] for img_id in range(len(batch_img_metas)): if instance_results is not None: results = instance_results[img_id] assert isinstance(results, InstanceData) else: results = InstanceData() if task_type == 'bbox': _, box_type = get_box_type(box_type) bboxes = torch.zeros(0, box_type.box_dim, device=device) if use_box_type: bboxes = box_type(bboxes, clone=False) results.bboxes = bboxes score_shape = (0, num_classes + 1) if score_per_cls else (0, ) results.scores = torch.zeros(score_shape, device=device) results.labels = torch.zeros((0, ), device=device, dtype=torch.long) else: # TODO: Handle the case where rescale is false img_h, img_w = batch_img_metas[img_id]['ori_shape'][:2] # the type of `im_mask` will be torch.bool or torch.uint8, # where uint8 if for visualization and debugging. im_mask = torch.zeros( 0, img_h, img_w, device=device, dtype=torch.bool if mask_thr_binary >= 0 else torch.uint8) results.masks = im_mask results_list.append(results) return results_list def multi_apply(func, *args, **kwargs): """Apply function to a list of arguments. Note: This function applies the ``func`` to multiple inputs and map the multiple outputs of the ``func`` into different list. Each list contains the same type of outputs corresponding to different inputs. Args: func (Function): A function that will be applied to a list of arguments Returns: tuple(list): A tuple containing multiple list, each list contains \ a kind of returned results by the function """ pfunc = partial(func, **kwargs) if kwargs else func map_results = map(pfunc, *args) return tuple(map(list, zip(*map_results))) def unmap(data, count, inds, fill=0): """Unmap a subset of item (data) back to the original set of items (of size count)""" if data.dim() == 1: ret = data.new_full((count, ), fill) ret[inds.type(torch.bool)] = data else: new_size = (count, ) + data.size()[1:] ret = data.new_full(new_size, fill) ret[inds.type(torch.bool), :] = data return ret def mask2ndarray(mask): """Convert Mask to ndarray.. Args: mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or torch.Tensor or np.ndarray): The mask to be converted. Returns: np.ndarray: Ndarray mask of shape (n, h, w) that has been converted """
# Copyright (c) OpenMMLab. All rights reserved. class SigmoidGeometricMean(Function): """Forward and backward function of geometric mean of two sigmoid functions. This implementation with analytical gradient function substitutes the autograd function of (x.sigmoid() * y.sigmoid()).sqrt(). The original implementation incurs none during gradient backprapagation if both x and y are very small values. """ @staticmethod def forward(ctx, x, y): x_sigmoid = x.sigmoid() y_sigmoid = y.sigmoid() z = (x_sigmoid * y_sigmoid).sqrt() ctx.save_for_backward(x_sigmoid, y_sigmoid, z) return z @staticmethod def backward(ctx, grad_output): x_sigmoid, y_sigmoid, z = ctx.saved_tensors grad_x = grad_output * z * (1 - x_sigmoid) / 2 grad_y = grad_output * z * (1 - y_sigmoid) / 2 return grad_x, grad_y sigmoid_geometric_mean = SigmoidGeometricMean.apply def interpolate_as(source, target, mode='bilinear', align_corners=False): """Interpolate the `source` to the shape of the `target`. The `source` must be a Tensor, but the `target` can be a Tensor or a np.ndarray with the shape (..., target_h, target_w). Args: source (Tensor): A 3D/4D Tensor with the shape (N, H, W) or (N, C, H, W). target (Tensor | np.ndarray): The interpolation target with the shape (..., target_h, target_w). mode (str): Algorithm used for interpolation. The options are the same as those in F.interpolate(). Default: ``'bilinear'``. align_corners (bool): The same as the argument in F.interpolate(). Returns: Tensor: The interpolated source Tensor. """ assert len(target.shape) >= 2 def _interpolate_as(source, target, mode='bilinear', align_corners=False): """Interpolate the `source` (4D) to the shape of the `target`.""" target_h, target_w = target.shape[-2:] source_h, source_w = source.shape[-2:] if target_h != source_h or target_w != source_w: source = F.interpolate( source, size=(target_h, target_w), mode=mode, align_corners=align_corners) return source if len(source.shape) == 3: source = source[:, None, :, :] source = _interpolate_as(source, target, mode, align_corners) return source[:, 0, :, :] else: return _interpolate_as(source, target, mode, align_corners) def unpack_gt_instances(batch_data_samples: SampleList) -> tuple: """Unpack ``gt_instances``, ``gt_instances_ignore`` and ``img_metas`` based on ``batch_data_samples`` Args: batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. Returns: tuple: - batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. - batch_gt_instances_ignore (list[:obj:`InstanceData`]): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. - batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. """ batch_gt_instances = [] batch_gt_instances_ignore = [] batch_img_metas = [] for data_sample in batch_data_samples: batch_img_metas.append(data_sample.metainfo) batch_gt_instances.append(data_sample.gt_instances) if 'ignored_instances' in data_sample: batch_gt_instances_ignore.append(data_sample.ignored_instances) else: batch_gt_instances_ignore.append(None) return batch_gt_instances, batch_gt_instances_ignore, batch_img_metas def empty_instances(batch_img_metas: List[dict], device: torch.device, task_type: str, instance_results: OptInstanceList = None, mask_thr_binary: Union[int, float] = 0, box_type: Union[str, type] = 'hbox', use_box_type: bool = False, num_classes: int = 80, score_per_cls: bool = False) -> List[InstanceData]: """Handle predicted instances when RoI is empty. Note: If ``instance_results`` is not None, it will be modified in place internally, and then return ``instance_results`` Args: batch_img_metas (list[dict]): List of image information. device (torch.device): Device of tensor. task_type (str): Expected returned task type. it currently supports bbox and mask. instance_results (list[:obj:`InstanceData`]): List of instance results. mask_thr_binary (int, float): mask binarization threshold. Defaults to 0. box_type (str or type): The empty box type. Defaults to `hbox`. use_box_type (bool): Whether to warp boxes with the box type. Defaults to False. num_classes (int): num_classes of bbox_head. Defaults to 80. score_per_cls (bool): Whether to generate classwise score for the empty instance. ``score_per_cls`` will be True when the model needs to produce raw results without nms. Defaults to False. Returns: list[:obj:`InstanceData`]: Detection results of each image """ assert task_type in ('bbox', 'mask'), 'Only support bbox and mask,' \ f' but got {task_type}' if instance_results is not None: assert len(instance_results) == len(batch_img_metas) results_list = [] for img_id in range(len(batch_img_metas)): if instance_results is not None: results = instance_results[img_id] assert isinstance(results, InstanceData) else: results = InstanceData() if task_type == 'bbox': _, box_type = get_box_type(box_type) bboxes = torch.zeros(0, box_type.box_dim, device=device) if use_box_type: bboxes = box_type(bboxes, clone=False) results.bboxes = bboxes score_shape = (0, num_classes + 1) if score_per_cls else (0, ) results.scores = torch.zeros(score_shape, device=device) results.labels = torch.zeros((0, ), device=device, dtype=torch.long) else: # TODO: Handle the case where rescale is false img_h, img_w = batch_img_metas[img_id]['ori_shape'][:2] # the type of `im_mask` will be torch.bool or torch.uint8, # where uint8 if for visualization and debugging. im_mask = torch.zeros( 0, img_h, img_w, device=device, dtype=torch.bool if mask_thr_binary >= 0 else torch.uint8) results.masks = im_mask results_list.append(results) return results_list def multi_apply(func, *args, **kwargs): """Apply function to a list of arguments. Note: This function applies the ``func`` to multiple inputs and map the multiple outputs of the ``func`` into different list. Each list contains the same type of outputs corresponding to different inputs. Args: func (Function): A function that will be applied to a list of arguments Returns: tuple(list): A tuple containing multiple list, each list contains \ a kind of returned results by the function """ pfunc = partial(func, **kwargs) if kwargs else func map_results = map(pfunc, *args) return tuple(map(list, zip(*map_results))) def unmap(data, count, inds, fill=0): """Unmap a subset of item (data) back to the original set of items (of size count)""" if data.dim() == 1: ret = data.new_full((count, ), fill) ret[inds.type(torch.bool)] = data else: new_size = (count, ) + data.size()[1:] ret = data.new_full(new_size, fill) ret[inds.type(torch.bool), :] = data return ret def mask2ndarray(mask): """Convert Mask to ndarray.. Args: mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or torch.Tensor or np.ndarray): The mask to be converted. Returns: np.ndarray: Ndarray mask of shape (n, h, w) that has been converted """
if isinstance(mask, (BitmapMasks, PolygonMasks)):
5
2023-12-11 15:23:03+00:00
24k
chinhsuanwu/ifusion
model/zero123.py
[ { "identifier": "inject_trainable_lora_extended", "path": "ldm/lora.py", "snippet": "def inject_trainable_lora_extended(\n model: nn.Module,\n target_replace_module: Set[str] = UNET_EXTENDED_TARGET_REPLACE,\n r: int = 4,\n loras=None, # path to lora .pt\n eval=True,\n):\n \"\"\"\n inject lora into model, and returns lora parameter groups.\n \"\"\"\n\n require_grad_params = []\n names = []\n\n if loras != None:\n loras = torch.load(loras, map_location=model.device)\n\n for _module, name, _child_module in _find_modules(\n model, target_replace_module, search_class=[nn.Linear, nn.Conv2d]\n ):\n if _child_module.__class__ == nn.Linear:\n weight = _child_module.weight\n bias = _child_module.bias\n _tmp = LoraInjectedLinear(\n _child_module.in_features,\n _child_module.out_features,\n _child_module.bias is not None,\n r=r,\n )\n _tmp.linear.weight = weight\n if bias is not None:\n _tmp.linear.bias = bias\n elif _child_module.__class__ == nn.Conv2d:\n weight = _child_module.weight\n bias = _child_module.bias\n _tmp = LoraInjectedConv2d(\n _child_module.in_channels,\n _child_module.out_channels,\n _child_module.kernel_size,\n _child_module.stride,\n _child_module.padding,\n _child_module.dilation,\n _child_module.groups,\n _child_module.bias is not None,\n r=r,\n )\n\n _tmp.conv.weight = weight\n if bias is not None:\n _tmp.conv.bias = bias\n\n # switch the module\n _tmp.to(_child_module.weight.device).to(_child_module.weight.dtype)\n if bias is not None:\n _tmp.to(_child_module.bias.device).to(_child_module.bias.dtype)\n\n _module._modules[name] = _tmp\n\n require_grad_params.append(_module._modules[name].lora_up.parameters())\n require_grad_params.append(_module._modules[name].lora_down.parameters())\n\n if loras != None:\n _module._modules[name].lora_up.weight = nn.Parameter(loras.pop(0).to(model.dtype))\n _module._modules[name].lora_down.weight = nn.Parameter(loras.pop(0).to(model.dtype))\n\n _module._modules[name].lora_up.weight.requires_grad = True if not eval else False\n _module._modules[name].lora_down.weight.requires_grad = True if not eval else False\n names.append(name)\n\n return require_grad_params, names" }, { "identifier": "monkeypatch_remove_lora", "path": "ldm/lora.py", "snippet": "def monkeypatch_remove_lora(model):\n for _module, name, _child_module in _find_modules(\n model, search_class=[LoraInjectedLinear, LoraInjectedConv2d]\n ):\n if isinstance(_child_module, LoraInjectedLinear):\n _source = _child_module.linear\n weight, bias = _source.weight, _source.bias\n\n _tmp = nn.Linear(\n _source.in_features, _source.out_features, bias is not None\n )\n\n _tmp.weight = weight\n if bias is not None:\n _tmp.bias = bias\n\n else:\n _source = _child_module.conv\n weight, bias = _source.weight, _source.bias\n\n _tmp = nn.Conv2d(\n in_channels=_source.in_channels,\n out_channels=_source.out_channels,\n kernel_size=_source.kernel_size,\n stride=_source.stride,\n padding=_source.padding,\n dilation=_source.dilation,\n groups=_source.groups,\n bias=bias is not None,\n )\n\n _tmp.weight = weight\n if bias is not None:\n _tmp.bias = bias\n\n _module._modules[name] = _tmp" }, { "identifier": "save_lora_weight", "path": "ldm/lora.py", "snippet": "def save_lora_weight(\n model,\n path=\"./lora.pt\",\n target_replace_module=DEFAULT_TARGET_REPLACE,\n):\n weights = []\n for _up, _down in extract_lora_ups_down(\n model, target_replace_module=target_replace_module\n ):\n weights.append(_up.weight.to(\"cpu\").to(torch.float16))\n weights.append(_down.weight.to(\"cpu\").to(torch.float16))\n\n torch.save(weights, path)" }, { "identifier": "LatentDiffusion", "path": "ldm/models/diffusion/ddpm.py", "snippet": "class LatentDiffusion(DDPM):\n \"\"\"main class\"\"\"\n\n def __init__(\n self,\n first_stage_config,\n cond_stage_config,\n num_timesteps_cond=None,\n cond_stage_key=\"image_cond\",\n cond_stage_trainable=False,\n concat_mode=True,\n cond_stage_forward=None,\n conditioning_key=None,\n scale_factor=1.0,\n scale_by_std=False,\n unet_trainable=True,\n *args,\n **kwargs,\n ):\n self.num_timesteps_cond = default(num_timesteps_cond, 1)\n self.scale_by_std = scale_by_std\n assert self.num_timesteps_cond <= kwargs[\"timesteps\"]\n # for backwards compatibility after implementation of DiffusionWrapper\n if conditioning_key is None:\n conditioning_key = \"concat\" if concat_mode else \"crossattn\"\n if cond_stage_config == \"__is_unconditional__\":\n conditioning_key = None\n ckpt_path = kwargs.pop(\"ckpt_path\", None)\n ignore_keys = kwargs.pop(\"ignore_keys\", [])\n super().__init__(conditioning_key=conditioning_key, *args, **kwargs)\n self.concat_mode = concat_mode\n self.cond_stage_trainable = cond_stage_trainable\n self.unet_trainable = unet_trainable\n self.cond_stage_key = cond_stage_key\n try:\n self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1\n except:\n self.num_downs = 0\n if not scale_by_std:\n self.scale_factor = scale_factor\n else:\n self.register_buffer(\"scale_factor\", torch.tensor(scale_factor))\n self.instantiate_first_stage(first_stage_config)\n self.instantiate_cond_stage(cond_stage_config)\n self.cond_stage_forward = cond_stage_forward\n\n # construct linear projection layer for concatenating image CLIP embedding and RT\n self.cc_projection = nn.Linear(772, 768)\n nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768])\n nn.init.zeros_(list(self.cc_projection.parameters())[1])\n self.cc_projection.requires_grad_(True)\n\n self.clip_denoised = False\n self.bbox_tokenizer = None\n\n self.restarted_from_ckpt = False\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys)\n self.restarted_from_ckpt = True\n\n def make_cond_schedule(\n self,\n ):\n self.cond_ids = torch.full(\n size=(self.num_timesteps,),\n fill_value=self.num_timesteps - 1,\n dtype=torch.long,\n )\n ids = torch.round(\n torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)\n ).long()\n self.cond_ids[: self.num_timesteps_cond] = ids\n\n @rank_zero_only\n @torch.no_grad()\n def on_train_batch_start(self, batch, batch_idx, dataloader_idx):\n # only for very first batch\n if (\n self.scale_by_std\n and self.current_epoch == 0\n and self.global_step == 0\n and batch_idx == 0\n and not self.restarted_from_ckpt\n ):\n assert (\n self.scale_factor == 1.0\n ), \"rather not use custom rescaling and std-rescaling simultaneously\"\n # set rescale weight to 1./std of encodings\n print(\"### USING STD-RESCALING ###\")\n x = super().get_input(batch, self.first_stage_key)\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n del self.scale_factor\n self.register_buffer(\"scale_factor\", 1.0 / z.flatten().std())\n print(f\"setting self.scale_factor to {self.scale_factor}\")\n print(\"### USING STD-RESCALING ###\")\n\n def register_schedule(\n self,\n given_betas=None,\n beta_schedule=\"linear\",\n timesteps=1000,\n linear_start=1e-4,\n linear_end=2e-2,\n cosine_s=8e-3,\n ):\n super().register_schedule(\n given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s\n )\n\n self.shorten_cond_schedule = self.num_timesteps_cond > 1\n if self.shorten_cond_schedule:\n self.make_cond_schedule()\n\n def instantiate_first_stage(self, config):\n model = instantiate_from_config(config)\n self.first_stage_model = model.eval()\n self.first_stage_model.train = disabled_train\n for param in self.first_stage_model.parameters():\n param.requires_grad = False\n\n def instantiate_cond_stage(self, config):\n if not self.cond_stage_trainable:\n if config == \"__is_first_stage__\":\n print(\"Using first stage also as cond stage.\")\n self.cond_stage_model = self.first_stage_model\n elif config == \"__is_unconditional__\":\n print(f\"Training {self.__class__.__name__} as an unconditional model.\")\n self.cond_stage_model = None\n # self.be_unconditional = True\n else:\n model = instantiate_from_config(config)\n self.cond_stage_model = model.eval()\n self.cond_stage_model.train = disabled_train\n for param in self.cond_stage_model.parameters():\n param.requires_grad = False\n else:\n assert config != \"__is_first_stage__\"\n assert config != \"__is_unconditional__\"\n model = instantiate_from_config(config)\n self.cond_stage_model = model\n\n def _get_denoise_row_from_list(\n self, samples, desc=\"\", force_no_decoder_quantization=False\n ):\n denoise_row = []\n for zd in tqdm(samples, desc=desc):\n denoise_row.append(\n self.decode_first_stage(\n zd.to(self.device), force_not_quantize=force_no_decoder_quantization\n )\n )\n n_imgs_per_row = len(denoise_row)\n denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W\n denoise_grid = rearrange(denoise_row, \"n b c h w -> b n c h w\")\n denoise_grid = rearrange(denoise_grid, \"b n c h w -> (b n) c h w\")\n denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)\n return denoise_grid\n\n def get_first_stage_encoding(self, encoder_posterior):\n if isinstance(encoder_posterior, DiagonalGaussianDistribution):\n z = encoder_posterior.sample()\n elif isinstance(encoder_posterior, torch.Tensor):\n z = encoder_posterior\n else:\n raise NotImplementedError(\n f\"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented\"\n )\n return self.scale_factor * z\n\n def get_learned_conditioning(self, c):\n if self.cond_stage_forward is None:\n if hasattr(self.cond_stage_model, \"encode\") and callable(\n self.cond_stage_model.encode\n ):\n c = self.cond_stage_model.encode(c)\n if isinstance(c, DiagonalGaussianDistribution):\n c = c.mode()\n else:\n c = self.cond_stage_model(c)\n else:\n assert hasattr(self.cond_stage_model, self.cond_stage_forward)\n c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)\n return c\n\n def meshgrid(self, h, w):\n y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)\n x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)\n\n arr = torch.cat([y, x], dim=-1)\n return arr\n\n def delta_border(self, h, w):\n \"\"\"\n :param h: height\n :param w: width\n :return: normalized distance to image border,\n wtith min distance = 0 at border and max dist = 0.5 at image center\n \"\"\"\n lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)\n arr = self.meshgrid(h, w) / lower_right_corner\n dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]\n dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]\n edge_dist = torch.min(\n torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1\n )[0]\n return edge_dist\n\n def get_weighting(self, h, w, Ly, Lx, device):\n weighting = self.delta_border(h, w)\n weighting = torch.clip(\n weighting,\n self.split_input_params[\"clip_min_weight\"],\n self.split_input_params[\"clip_max_weight\"],\n )\n weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)\n\n if self.split_input_params[\"tie_braker\"]:\n L_weighting = self.delta_border(Ly, Lx)\n L_weighting = torch.clip(\n L_weighting,\n self.split_input_params[\"clip_min_tie_weight\"],\n self.split_input_params[\"clip_max_tie_weight\"],\n )\n\n L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)\n weighting = weighting * L_weighting\n return weighting\n\n def get_fold_unfold(\n self, x, kernel_size, stride, uf=1, df=1\n ): # todo load once not every time, shorten code\n \"\"\"\n :param x: img of size (bs, c, h, w)\n :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])\n \"\"\"\n bs, nc, h, w = x.shape\n\n # number of crops in image\n Ly = (h - kernel_size[0]) // stride[0] + 1\n Lx = (w - kernel_size[1]) // stride[1] + 1\n\n if uf == 1 and df == 1:\n fold_params = dict(\n kernel_size=kernel_size, dilation=1, padding=0, stride=stride\n )\n unfold = torch.nn.Unfold(**fold_params)\n\n fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)\n\n weighting = self.get_weighting(\n kernel_size[0], kernel_size[1], Ly, Lx, x.device\n ).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))\n\n elif uf > 1 and df == 1:\n fold_params = dict(\n kernel_size=kernel_size, dilation=1, padding=0, stride=stride\n )\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(\n kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),\n dilation=1,\n padding=0,\n stride=(stride[0] * uf, stride[1] * uf),\n )\n fold = torch.nn.Fold(\n output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2\n )\n\n weighting = self.get_weighting(\n kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device\n ).to(x.dtype)\n normalization = fold(weighting).view(\n 1, 1, h * uf, w * uf\n ) # normalizes the overlap\n weighting = weighting.view(\n (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)\n )\n\n elif df > 1 and uf == 1:\n fold_params = dict(\n kernel_size=kernel_size, dilation=1, padding=0, stride=stride\n )\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(\n kernel_size=(kernel_size[0] // df, kernel_size[0] // df),\n dilation=1,\n padding=0,\n stride=(stride[0] // df, stride[1] // df),\n )\n fold = torch.nn.Fold(\n output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2\n )\n\n weighting = self.get_weighting(\n kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device\n ).to(x.dtype)\n normalization = fold(weighting).view(\n 1, 1, h // df, w // df\n ) # normalizes the overlap\n weighting = weighting.view(\n (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)\n )\n\n else:\n raise NotImplementedError\n\n return fold, unfold, normalization, weighting\n\n @torch.no_grad()\n def get_input(\n self,\n batch,\n k,\n return_first_stage_outputs=False,\n force_c_encode=False,\n cond_key=None,\n return_original_cond=False,\n bs=None,\n uncond=0.05,\n ):\n x = super().get_input(batch, k)\n T = batch[\"T\"].to(memory_format=torch.contiguous_format).float()\n\n if bs is not None:\n x = x[:bs]\n T = T[:bs].to(self.device)\n\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n cond_key = cond_key or self.cond_stage_key\n xc = super().get_input(batch, cond_key).to(self.device)\n if bs is not None:\n xc = xc[:bs]\n cond = {}\n\n # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%.\n random = torch.rand(x.size(0), device=x.device)\n prompt_mask = rearrange(random < 2 * uncond, \"n -> n 1 1\")\n input_mask = 1 - rearrange(\n (random >= uncond).float() * (random < 3 * uncond).float(), \"n -> n 1 1 1\"\n )\n null_prompt = self.get_learned_conditioning([\"\"])\n\n # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768]\n # print('=========== xc shape ===========', xc.shape)\n with torch.enable_grad():\n clip_emb = self.get_learned_conditioning(xc).detach()\n null_prompt = self.get_learned_conditioning([\"\"]).detach()\n cond[\"c_crossattn\"] = [\n self.cc_projection(\n torch.cat(\n [\n torch.where(prompt_mask, null_prompt, clip_emb),\n T[:, None, :],\n ],\n dim=-1,\n )\n )\n ]\n cond[\"c_concat\"] = [\n input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach()\n ]\n out = [z, cond]\n if return_first_stage_outputs:\n xrec = self.decode_first_stage(z)\n out.extend([x, xrec])\n if return_original_cond:\n out.append(xc)\n return out\n\n # @torch.no_grad()\n def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):\n if predict_cids:\n if z.dim() == 4:\n z = torch.argmax(z.exp(), dim=1).long()\n z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)\n z = rearrange(z, \"b h w c -> b c h w\").contiguous()\n\n z = 1.0 / self.scale_factor * z\n\n if hasattr(self, \"split_input_params\"):\n if self.split_input_params[\"patch_distributed_vq\"]:\n ks = self.split_input_params[\"ks\"] # eg. (128, 128)\n stride = self.split_input_params[\"stride\"] # eg. (64, 64)\n uf = self.split_input_params[\"vqf\"]\n bs, nc, h, w = z.shape\n if ks[0] > h or ks[1] > w:\n ks = (min(ks[0], h), min(ks[1], w))\n print(\"reducing Kernel\")\n\n if stride[0] > h or stride[1] > w:\n stride = (min(stride[0], h), min(stride[1], w))\n print(\"reducing stride\")\n\n fold, unfold, normalization, weighting = self.get_fold_unfold(\n z, ks, stride, uf=uf\n )\n\n z = unfold(z) # (bn, nc * prod(**ks), L)\n # 1. Reshape to img shape\n z = z.view(\n (z.shape[0], -1, ks[0], ks[1], z.shape[-1])\n ) # (bn, nc, ks[0], ks[1], L )\n\n # 2. apply model loop over last dim\n if isinstance(self.first_stage_model, VQModelInterface):\n output_list = [\n self.first_stage_model.decode(\n z[:, :, :, :, i],\n force_not_quantize=predict_cids or force_not_quantize,\n )\n for i in range(z.shape[-1])\n ]\n else:\n output_list = [\n self.first_stage_model.decode(z[:, :, :, :, i])\n for i in range(z.shape[-1])\n ]\n\n o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)\n o = o * weighting\n # Reverse 1. reshape to img shape\n o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)\n # stitch crops together\n decoded = fold(o)\n decoded = decoded / normalization # norm is shape (1, 1, h, w)\n return decoded\n else:\n if isinstance(self.first_stage_model, VQModelInterface):\n return self.first_stage_model.decode(\n z, force_not_quantize=predict_cids or force_not_quantize\n )\n else:\n return self.first_stage_model.decode(z)\n\n else:\n if isinstance(self.first_stage_model, VQModelInterface):\n return self.first_stage_model.decode(\n z, force_not_quantize=predict_cids or force_not_quantize\n )\n else:\n return self.first_stage_model.decode(z)\n\n @torch.no_grad()\n def encode_first_stage(self, x):\n if hasattr(self, \"split_input_params\"):\n if self.split_input_params[\"patch_distributed_vq\"]:\n ks = self.split_input_params[\"ks\"] # eg. (128, 128)\n stride = self.split_input_params[\"stride\"] # eg. (64, 64)\n df = self.split_input_params[\"vqf\"]\n self.split_input_params[\"original_image_size\"] = x.shape[-2:]\n bs, nc, h, w = x.shape\n if ks[0] > h or ks[1] > w:\n ks = (min(ks[0], h), min(ks[1], w))\n print(\"reducing Kernel\")\n\n if stride[0] > h or stride[1] > w:\n stride = (min(stride[0], h), min(stride[1], w))\n print(\"reducing stride\")\n\n fold, unfold, normalization, weighting = self.get_fold_unfold(\n x, ks, stride, df=df\n )\n z = unfold(x) # (bn, nc * prod(**ks), L)\n # Reshape to img shape\n z = z.view(\n (z.shape[0], -1, ks[0], ks[1], z.shape[-1])\n ) # (bn, nc, ks[0], ks[1], L )\n\n output_list = [\n self.first_stage_model.encode(z[:, :, :, :, i])\n for i in range(z.shape[-1])\n ]\n\n o = torch.stack(output_list, axis=-1)\n o = o * weighting\n\n # Reverse reshape to img shape\n o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)\n # stitch crops together\n decoded = fold(o)\n decoded = decoded / normalization\n return decoded\n\n else:\n return self.first_stage_model.encode(x)\n else:\n return self.first_stage_model.encode(x)\n\n def shared_step(self, batch, step_ratio=None, **kwargs):\n x, c = self.get_input(batch, self.first_stage_key)\n loss = self(x, c, step_ratio=step_ratio)\n return loss\n\n def forward(self, x, c, step_ratio=None, *args, **kwargs):\n if step_ratio is not None:\n t = np.round((1 - step_ratio) * self.num_timesteps).clip(0, self.num_timesteps - 1)\n t = torch.full((x.shape[0],), t, dtype=torch.long, device=self.device)\n else:\n t = torch.randint(\n 0, self.num_timesteps, (x.shape[0],), device=self.device\n ).long()\n if self.model.conditioning_key is not None:\n assert c is not None\n # if self.cond_stage_trainable:\n # c = self.get_learned_conditioning(c)\n if self.shorten_cond_schedule: # TODO: drop this option\n tc = self.cond_ids[t].to(self.device)\n c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))\n return self.p_losses(x, c, t, *args, **kwargs)\n\n def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset\n def rescale_bbox(bbox):\n x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])\n y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])\n w = min(bbox[2] / crop_coordinates[2], 1 - x0)\n h = min(bbox[3] / crop_coordinates[3], 1 - y0)\n return x0, y0, w, h\n\n return [rescale_bbox(b) for b in bboxes]\n\n def apply_model(self, x_noisy, t, cond, return_ids=False):\n if isinstance(cond, dict):\n # hybrid case, cond is exptected to be a dict\n pass\n else:\n if not isinstance(cond, list):\n cond = [cond]\n key = (\n \"c_concat\" if self.model.conditioning_key == \"concat\" else \"c_crossattn\"\n )\n cond = {key: cond}\n\n if hasattr(self, \"split_input_params\"):\n assert len(cond) == 1 # todo can only deal with one conditioning atm\n assert not return_ids\n ks = self.split_input_params[\"ks\"] # eg. (128, 128)\n stride = self.split_input_params[\"stride\"] # eg. (64, 64)\n\n h, w = x_noisy.shape[-2:]\n\n fold, unfold, normalization, weighting = self.get_fold_unfold(\n x_noisy, ks, stride\n )\n\n z = unfold(x_noisy) # (bn, nc * prod(**ks), L)\n # Reshape to img shape\n z = z.view(\n (z.shape[0], -1, ks[0], ks[1], z.shape[-1])\n ) # (bn, nc, ks[0], ks[1], L )\n z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]\n\n if (\n self.cond_stage_key in [\"image\", \"LR_image\", \"segmentation\", \"bbox_img\"]\n and self.model.conditioning_key\n ): # todo check for completeness\n c_key = next(iter(cond.keys())) # get key\n c = next(iter(cond.values())) # get value\n assert len(c) == 1 # todo extend to list with more than one elem\n c = c[0] # get element\n\n c = unfold(c)\n c = c.view(\n (c.shape[0], -1, ks[0], ks[1], c.shape[-1])\n ) # (bn, nc, ks[0], ks[1], L )\n\n cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]\n\n elif self.cond_stage_key == \"coordinates_bbox\":\n assert (\n \"original_image_size\" in self.split_input_params\n ), \"BoudingBoxRescaling is missing original_image_size\"\n\n # assuming padding of unfold is always 0 and its dilation is always 1\n n_patches_per_row = int((w - ks[0]) / stride[0] + 1)\n full_img_h, full_img_w = self.split_input_params[\"original_image_size\"]\n # as we are operating on latents, we need the factor from the original image size to the\n # spatial latent size to properly rescale the crops for regenerating the bbox annotations\n num_downs = self.first_stage_model.encoder.num_resolutions - 1\n rescale_latent = 2 ** (num_downs)\n\n # get top left postions of patches as conforming for the bbbox tokenizer, therefore we\n # need to rescale the tl patch coordinates to be in between (0,1)\n tl_patch_coordinates = [\n (\n rescale_latent\n * stride[0]\n * (patch_nr % n_patches_per_row)\n / full_img_w,\n rescale_latent\n * stride[1]\n * (patch_nr // n_patches_per_row)\n / full_img_h,\n )\n for patch_nr in range(z.shape[-1])\n ]\n\n # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)\n patch_limits = [\n (\n x_tl,\n y_tl,\n rescale_latent * ks[0] / full_img_w,\n rescale_latent * ks[1] / full_img_h,\n )\n for x_tl, y_tl in tl_patch_coordinates\n ]\n # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]\n\n # tokenize crop coordinates for the bounding boxes of the respective patches\n patch_limits_tknzd = [\n torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(\n self.device\n )\n for bbox in patch_limits\n ] # list of length l with tensors of shape (1, 2)\n # cut tknzd crop position from conditioning\n assert isinstance(cond, dict), \"cond must be dict to be fed into model\"\n cut_cond = cond[\"c_crossattn\"][0][..., :-2].to(self.device)\n\n adapted_cond = torch.stack(\n [torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]\n )\n adapted_cond = rearrange(adapted_cond, \"l b n -> (l b) n\")\n adapted_cond = self.get_learned_conditioning(adapted_cond)\n adapted_cond = rearrange(\n adapted_cond, \"(l b) n d -> l b n d\", l=z.shape[-1]\n )\n\n cond_list = [{\"c_crossattn\": [e]} for e in adapted_cond]\n\n else:\n cond_list = [\n cond for i in range(z.shape[-1])\n ] # Todo make this more efficient\n\n # apply model by loop over crops\n output_list = [\n self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])\n ]\n assert not isinstance(\n output_list[0], tuple\n ) # todo cant deal with multiple model outputs check this never happens\n\n o = torch.stack(output_list, axis=-1)\n o = o * weighting\n # Reverse reshape to img shape\n o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)\n # stitch crops together\n x_recon = fold(o) / normalization\n\n else:\n x_recon = self.model(x_noisy, t, **cond)\n\n if isinstance(x_recon, tuple) and not return_ids:\n return x_recon[0]\n else:\n return x_recon\n\n def _predict_eps_from_xstart(self, x_t, t, pred_xstart):\n return (\n extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t\n - pred_xstart\n ) / extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)\n\n def _prior_bpd(self, x_start):\n \"\"\"\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n This term can't be optimized, as it only depends on the encoder.\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch element.\n \"\"\"\n batch_size = x_start.shape[0]\n t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)\n qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)\n kl_prior = normal_kl(\n mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0\n )\n return mean_flat(kl_prior) / np.log(2.0)\n\n def p_losses(self, x_start, cond, t, noise=None):\n noise = default(noise, lambda: torch.randn_like(x_start))\n x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)\n model_output = self.apply_model(x_noisy, t, cond)\n\n loss_dict = {}\n prefix = \"train\" if self.training else \"val\"\n\n if self.parameterization == \"x0\":\n target = x_start\n elif self.parameterization == \"eps\":\n target = noise\n else:\n raise NotImplementedError()\n\n loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])\n loss_dict.update({f\"{prefix}/loss_simple\": loss_simple.mean()})\n\n if self.logvar.device != self.device:\n self.logvar = self.logvar.to(self.device)\n\n logvar_t = self.logvar[t].to(self.device)\n loss = loss_simple / torch.exp(logvar_t) + logvar_t\n # loss = loss_simple / torch.exp(self.logvar) + self.logvar\n if self.learn_logvar:\n loss_dict.update({f\"{prefix}/loss_gamma\": loss.mean()})\n loss_dict.update({\"logvar\": self.logvar.data.mean()})\n\n loss = self.l_simple_weight * loss.mean()\n\n loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))\n loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()\n loss_dict.update({f\"{prefix}/loss_vlb\": loss_vlb})\n loss += self.original_elbo_weight * loss_vlb\n loss_dict.update({f\"{prefix}/loss\": loss})\n\n return loss, loss_dict\n\n def p_mean_variance(\n self,\n x,\n c,\n t,\n clip_denoised: bool,\n return_codebook_ids=False,\n quantize_denoised=False,\n return_x0=False,\n score_corrector=None,\n corrector_kwargs=None,\n ):\n t_in = t\n model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)\n\n if score_corrector is not None:\n assert self.parameterization == \"eps\"\n model_out = score_corrector.modify_score(\n self, model_out, x, t, c, **corrector_kwargs\n )\n\n if return_codebook_ids:\n model_out, logits = model_out\n\n if self.parameterization == \"eps\":\n x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)\n elif self.parameterization == \"x0\":\n x_recon = model_out\n else:\n raise NotImplementedError()\n\n if clip_denoised:\n x_recon.clamp_(-1.0, 1.0)\n if quantize_denoised:\n x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)\n model_mean, posterior_variance, posterior_log_variance = self.q_posterior(\n x_start=x_recon, x_t=x, t=t\n )\n if return_codebook_ids:\n return model_mean, posterior_variance, posterior_log_variance, logits\n elif return_x0:\n return model_mean, posterior_variance, posterior_log_variance, x_recon\n else:\n return model_mean, posterior_variance, posterior_log_variance\n\n @torch.no_grad()\n def p_sample(\n self,\n x,\n c,\n t,\n clip_denoised=False,\n repeat_noise=False,\n return_codebook_ids=False,\n quantize_denoised=False,\n return_x0=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n ):\n b, *_, device = *x.shape, x.device\n outputs = self.p_mean_variance(\n x=x,\n c=c,\n t=t,\n clip_denoised=clip_denoised,\n return_codebook_ids=return_codebook_ids,\n quantize_denoised=quantize_denoised,\n return_x0=return_x0,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n )\n if return_codebook_ids:\n raise DeprecationWarning(\"Support dropped.\")\n model_mean, _, model_log_variance, logits = outputs\n elif return_x0:\n model_mean, _, model_log_variance, x0 = outputs\n else:\n model_mean, _, model_log_variance = outputs\n\n noise = noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n # no noise when t == 0\n nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))\n\n if return_codebook_ids:\n return model_mean + nonzero_mask * (\n 0.5 * model_log_variance\n ).exp() * noise, logits.argmax(dim=1)\n if return_x0:\n return (\n model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise,\n x0,\n )\n else:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise\n\n @torch.no_grad()\n def progressive_denoising(\n self,\n cond,\n shape,\n verbose=True,\n callback=None,\n quantize_denoised=False,\n img_callback=None,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n batch_size=None,\n x_T=None,\n start_T=None,\n log_every_t=None,\n ):\n if not log_every_t:\n log_every_t = self.log_every_t\n timesteps = self.num_timesteps\n if batch_size is not None:\n b = batch_size if batch_size is not None else shape[0]\n shape = [batch_size] + list(shape)\n else:\n b = batch_size = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=self.device)\n else:\n img = x_T\n intermediates = []\n if cond is not None:\n if isinstance(cond, dict):\n cond = {\n key: cond[key][:batch_size]\n if not isinstance(cond[key], list)\n else list(map(lambda x: x[:batch_size], cond[key]))\n for key in cond\n }\n else:\n cond = (\n [c[:batch_size] for c in cond]\n if isinstance(cond, list)\n else cond[:batch_size]\n )\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = (\n tqdm(\n reversed(range(0, timesteps)),\n desc=\"Progressive Generation\",\n total=timesteps,\n )\n if verbose\n else reversed(range(0, timesteps))\n )\n if type(temperature) == float:\n temperature = [temperature] * timesteps\n\n for i in iterator:\n ts = torch.full((b,), i, device=self.device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != \"hybrid\"\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img, x0_partial = self.p_sample(\n img,\n cond,\n ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised,\n return_x0=True,\n temperature=temperature[i],\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n )\n if mask is not None:\n assert x0 is not None\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1.0 - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(x0_partial)\n if callback:\n callback(i)\n if img_callback:\n img_callback(img, i)\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_loop(\n self,\n cond,\n shape,\n return_intermediates=False,\n x_T=None,\n verbose=True,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n start_T=None,\n log_every_t=None,\n ):\n if not log_every_t:\n log_every_t = self.log_every_t\n device = self.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n intermediates = [img]\n if timesteps is None:\n timesteps = self.num_timesteps\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = (\n tqdm(reversed(range(0, timesteps)), desc=\"Sampling t\", total=timesteps)\n if verbose\n else reversed(range(0, timesteps))\n )\n\n if mask is not None:\n assert x0 is not None\n assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match\n\n for i in iterator:\n ts = torch.full((b,), i, device=device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != \"hybrid\"\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img = self.p_sample(\n img,\n cond,\n ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised,\n )\n if mask is not None:\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1.0 - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(img)\n if callback:\n callback(i)\n if img_callback:\n img_callback(img, i)\n\n if return_intermediates:\n return img, intermediates\n return img\n\n @torch.no_grad()\n def sample(\n self,\n cond,\n batch_size=16,\n return_intermediates=False,\n x_T=None,\n verbose=True,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n shape=None,\n **kwargs,\n ):\n if shape is None:\n shape = (batch_size, self.channels, self.image_size, self.image_size)\n if cond is not None:\n if isinstance(cond, dict):\n cond = {\n key: cond[key][:batch_size]\n if not isinstance(cond[key], list)\n else list(map(lambda x: x[:batch_size], cond[key]))\n for key in cond\n }\n else:\n cond = (\n [c[:batch_size] for c in cond]\n if isinstance(cond, list)\n else cond[:batch_size]\n )\n return self.p_sample_loop(\n cond,\n shape,\n return_intermediates=return_intermediates,\n x_T=x_T,\n verbose=verbose,\n timesteps=timesteps,\n quantize_denoised=quantize_denoised,\n mask=mask,\n x0=x0,\n )\n\n @torch.no_grad()\n def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):\n if ddim:\n ddim_sampler = DDIMSampler(self)\n shape = (self.channels, self.image_size, self.image_size)\n samples, intermediates = ddim_sampler.sample(\n ddim_steps, batch_size, shape, cond, verbose=False, **kwargs\n )\n\n else:\n samples, intermediates = self.sample(\n cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs\n )\n\n return samples, intermediates\n\n @torch.no_grad()\n def get_unconditional_conditioning(\n self, batch_size, null_label=None, image_size=512\n ):\n if null_label is not None:\n xc = null_label\n if isinstance(xc, ListConfig):\n xc = list(xc)\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n if hasattr(xc, \"to\"):\n xc = xc.to(self.device)\n c = self.get_learned_conditioning(xc)\n else:\n # todo: get null label from cond_stage_model\n raise NotImplementedError()\n c = repeat(c, \"1 ... -> b ...\", b=batch_size).to(self.device)\n cond = {}\n cond[\"c_crossattn\"] = [c]\n cond[\"c_concat\"] = [\n torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to(\n self.device\n )\n ]\n return cond\n\n @torch.no_grad()\n def log_images(\n self,\n batch,\n N=8,\n n_row=4,\n sample=True,\n ddim_steps=200,\n ddim_eta=1.0,\n return_keys=None,\n quantize_denoised=True,\n inpaint=True,\n plot_denoise_rows=False,\n plot_progressive_rows=True,\n plot_diffusion_rows=True,\n unconditional_guidance_scale=1.0,\n unconditional_guidance_label=None,\n use_ema_scope=True,\n **kwargs,\n ):\n ema_scope = self.ema_scope if use_ema_scope else nullcontext\n use_ddim = ddim_steps is not None\n\n log = dict()\n z, c, x, xrec, xc = self.get_input(\n batch,\n self.first_stage_key,\n return_first_stage_outputs=True,\n force_c_encode=True,\n return_original_cond=True,\n bs=N,\n )\n N = min(x.shape[0], N)\n n_row = min(x.shape[0], n_row)\n log[\"inputs\"] = x\n log[\"reconstruction\"] = xrec\n if self.model.conditioning_key is not None:\n if hasattr(self.cond_stage_model, \"decode\"):\n xc = self.cond_stage_model.decode(c)\n log[\"conditioning\"] = xc\n elif self.cond_stage_key in [\"caption\", \"txt\"]:\n xc = log_txt_as_img(\n (x.shape[2], x.shape[3]),\n batch[self.cond_stage_key],\n size=x.shape[2] // 25,\n )\n log[\"conditioning\"] = xc\n elif self.cond_stage_key == \"class_label\":\n xc = log_txt_as_img(\n (x.shape[2], x.shape[3]),\n batch[\"human_label\"],\n size=x.shape[2] // 25,\n )\n log[\"conditioning\"] = xc\n elif isimage(xc):\n log[\"conditioning\"] = xc\n if ismap(xc):\n log[\"original_conditioning\"] = self.to_rgb(xc)\n\n if plot_diffusion_rows:\n # get diffusion row\n diffusion_row = list()\n z_start = z[:n_row]\n for t in range(self.num_timesteps):\n if t % self.log_every_t == 0 or t == self.num_timesteps - 1:\n t = repeat(torch.tensor([t]), \"1 -> b\", b=n_row)\n t = t.to(self.device).long()\n noise = torch.randn_like(z_start)\n z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)\n diffusion_row.append(self.decode_first_stage(z_noisy))\n\n diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W\n diffusion_grid = rearrange(diffusion_row, \"n b c h w -> b n c h w\")\n diffusion_grid = rearrange(diffusion_grid, \"b n c h w -> (b n) c h w\")\n diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])\n log[\"diffusion_row\"] = diffusion_grid\n\n if sample:\n # get denoise row\n with ema_scope(\"Sampling\"):\n samples, z_denoise_row = self.sample_log(\n cond=c,\n batch_size=N,\n ddim=use_ddim,\n ddim_steps=ddim_steps,\n eta=ddim_eta,\n )\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)\n x_samples = self.decode_first_stage(samples)\n log[\"samples\"] = x_samples\n if plot_denoise_rows:\n denoise_grid = self._get_denoise_row_from_list(z_denoise_row)\n log[\"denoise_row\"] = denoise_grid\n\n if (\n quantize_denoised\n and not isinstance(self.first_stage_model, AutoencoderKL)\n and not isinstance(self.first_stage_model, IdentityFirstStage)\n ):\n # also display when quantizing x0 while sampling\n with ema_scope(\"Plotting Quantized Denoised\"):\n samples, z_denoise_row = self.sample_log(\n cond=c,\n batch_size=N,\n ddim=use_ddim,\n ddim_steps=ddim_steps,\n eta=ddim_eta,\n quantize_denoised=True,\n )\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,\n # quantize_denoised=True)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_x0_quantized\"] = x_samples\n\n if unconditional_guidance_scale > 1.0:\n uc = self.get_unconditional_conditioning(\n N, unconditional_guidance_label, image_size=x.shape[-1]\n )\n # uc = torch.zeros_like(c)\n with ema_scope(\"Sampling with classifier-free guidance\"):\n samples_cfg, _ = self.sample_log(\n cond=c,\n batch_size=N,\n ddim=use_ddim,\n ddim_steps=ddim_steps,\n eta=ddim_eta,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=uc,\n )\n x_samples_cfg = self.decode_first_stage(samples_cfg)\n log[\n f\"samples_cfg_scale_{unconditional_guidance_scale:.2f}\"\n ] = x_samples_cfg\n\n if inpaint:\n # make a simple center square\n b, h, w = z.shape[0], z.shape[2], z.shape[3]\n mask = torch.ones(N, h, w).to(self.device)\n # zeros will be filled in\n mask[:, h // 4 : 3 * h // 4, w // 4 : 3 * w // 4] = 0.0\n mask = mask[:, None, ...]\n with ema_scope(\"Plotting Inpaint\"):\n samples, _ = self.sample_log(\n cond=c,\n batch_size=N,\n ddim=use_ddim,\n eta=ddim_eta,\n ddim_steps=ddim_steps,\n x0=z[:N],\n mask=mask,\n )\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_inpainting\"] = x_samples\n log[\"mask\"] = mask\n\n # outpaint\n mask = 1.0 - mask\n with ema_scope(\"Plotting Outpaint\"):\n samples, _ = self.sample_log(\n cond=c,\n batch_size=N,\n ddim=use_ddim,\n eta=ddim_eta,\n ddim_steps=ddim_steps,\n x0=z[:N],\n mask=mask,\n )\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_outpainting\"] = x_samples\n\n if plot_progressive_rows:\n with ema_scope(\"Plotting Progressives\"):\n img, progressives = self.progressive_denoising(\n c,\n shape=(self.channels, self.image_size, self.image_size),\n batch_size=N,\n )\n prog_row = self._get_denoise_row_from_list(\n progressives, desc=\"Progressive Generation\"\n )\n log[\"progressive_row\"] = prog_row\n\n if return_keys:\n if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:\n return log\n else:\n return {key: log[key] for key in return_keys}\n return log\n\n def configure_optimizers(self):\n lr = self.learning_rate\n params = []\n if self.unet_trainable == \"attn\":\n print(\"Training only unet attention layers\")\n for n, m in self.model.named_modules():\n if isinstance(m, CrossAttention) and n.endswith(\"attn2\"):\n params.extend(m.parameters())\n if self.unet_trainable == \"conv_in\":\n print(\"Training only unet input conv layers\")\n params = list(self.model.diffusion_model.input_blocks[0][0].parameters())\n elif self.unet_trainable is True or self.unet_trainable == \"all\":\n print(\"Training the full unet\")\n params = list(self.model.parameters())\n else:\n raise ValueError(\n f\"Unrecognised setting for unet_trainable: {self.unet_trainable}\"\n )\n\n if self.cond_stage_trainable:\n print(f\"{self.__class__.__name__}: Also optimizing conditioner params!\")\n params = params + list(self.cond_stage_model.parameters())\n if self.learn_logvar:\n print(\"Diffusion model optimizing logvar\")\n params.append(self.logvar)\n\n if self.cc_projection is not None:\n params = params + list(self.cc_projection.parameters())\n print(\"========== optimizing for cc projection weight ==========\")\n\n opt = torch.optim.AdamW(\n [\n {\"params\": self.model.parameters(), \"lr\": lr},\n {\"params\": self.cc_projection.parameters(), \"lr\": 10.0 * lr},\n ],\n lr=lr,\n )\n if self.use_scheduler:\n assert \"target\" in self.scheduler_config\n scheduler = instantiate_from_config(self.scheduler_config)\n\n print(\"Setting up LambdaLR scheduler...\")\n scheduler = [\n {\n \"scheduler\": LambdaLR(opt, lr_lambda=scheduler.schedule),\n \"interval\": \"step\",\n \"frequency\": 1,\n }\n ]\n return [opt], scheduler\n return opt\n\n @torch.no_grad()\n def to_rgb(self, x):\n x = x.float()\n if not hasattr(self, \"colorize\"):\n self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)\n x = nn.functional.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x" }, { "identifier": "load_model_from_config", "path": "ldm/util.py", "snippet": "def load_model_from_config(config, ckpt, device, vram_O=False, verbose=False):\n print(f\"[INFO] Loading model from {ckpt}\")\n pl_sd = torch.load(ckpt, map_location=\"cpu\")\n\n if \"global_step\" in pl_sd and verbose:\n print(f'[INFO] Global Step: {pl_sd[\"global_step\"]}')\n\n sd = pl_sd[\"state_dict\"]\n\n model = instantiate_from_config(config.model)\n m, u = model.load_state_dict(sd, strict=False)\n\n if len(m) > 0 and verbose:\n print(\"[INFO] Missing keys: \\n\", m)\n if len(u) > 0 and verbose:\n print(\"[INFO] Unexpected keys: \\n\", u)\n\n # manually load ema and delete it to save GPU memory\n if model.use_ema:\n if verbose:\n print(\"[INFO] Loading EMA\")\n model.model_ema.copy_to(model.model)\n del model.model_ema\n\n if vram_O:\n # we don't need decoder\n del model.first_stage_model.decoder\n\n torch.cuda.empty_cache()\n model.eval().to(device)\n\n return model" }, { "identifier": "make_T", "path": "util/pose.py", "snippet": "def make_T(theta, azimuth, distance, in_deg=False):\n if in_deg:\n theta, azimuth = theta.deg2rad(), azimuth.deg2rad()\n return torch.stack(\n (\n theta,\n torch.sin(azimuth),\n torch.cos(azimuth),\n distance,\n )\n )" }, { "identifier": "default", "path": "util/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" } ]
import itertools import torch import torch.nn as nn from dataclasses import dataclass from diffusers import DDIMScheduler from einops import rearrange from omegaconf import OmegaConf from ldm.lora import ( inject_trainable_lora_extended, monkeypatch_remove_lora, save_lora_weight, ) from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import load_model_from_config from util.pose import make_T from util.typing import * from util.util import default
15,836
self.configure() def configure(self) -> None: print("[INFO] Loading Zero123...") self.pretrained_config = OmegaConf.load(self.config.pretrained_config) self.weights_dtype = torch.float32 self.model: LatentDiffusion = load_model_from_config( self.pretrained_config, self.config.pretrained_model_name_or_path, device=self.device, vram_O=self.config.vram_O, ) for p in self.model.parameters(): p.requires_grad_(False) self.num_train_timesteps = self.pretrained_config.model.params.timesteps self.scheduler = DDIMScheduler( self.num_train_timesteps, self.pretrained_config.model.params.linear_start, self.pretrained_config.model.params.linear_end, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, steps_offset=1, ) self.num_train_timesteps = self.scheduler.config.num_train_timesteps self.set_min_max_steps( min_step_percent=self.config.min_step_percent, max_step_percent=self.config.max_step_percent, ) print("[INFO] Loaded Zero123") @torch.cuda.amp.autocast(enabled=False) def set_min_max_steps( self, min_step_percent: float = 0.02, max_step_percent: float = 0.98 ): self.min_step = int(self.num_train_timesteps * min_step_percent) self.max_step = int(self.num_train_timesteps * max_step_percent) @torch.cuda.amp.autocast(enabled=False) @torch.no_grad() def get_image_embeds( self, image: Float[Tensor, "B 3 256 256"] ) -> Tuple[Float[Tensor, "B 1 768"], Float[Tensor, "B 4 32 32"]]: c_crossattn = self.model.get_learned_conditioning(image.to(self.weights_dtype)) c_concat = self.model.encode_first_stage(image.to(self.weights_dtype)).mode() return c_crossattn, c_concat @torch.cuda.amp.autocast(enabled=False) def encode_image( self, image: Float[Tensor, "B 3 256 256"] ) -> Float[Tensor, "B 4 32 32"]: input_dtype = image.dtype latent = self.model.get_first_stage_encoding( self.model.encode_first_stage(image.to(self.weights_dtype)) ) return latent.to(input_dtype) # [B, 4, 32, 32] Latent space image @torch.cuda.amp.autocast(enabled=False) def decode_latent( self, latent: Float[Tensor, "B 4 H W"], ) -> Float[Tensor, "B 3 512 512"]: input_dtype = latent.dtype image = self.model.decode_first_stage(latent) image = (image * 0.5 + 0.5).clamp(0, 1) return image.to(input_dtype) @staticmethod @torch.no_grad() def make_cond(cond): """Add zeros to the beginning of cond""" return {k: [torch.cat([torch.zeros_like(v), v])] for k, v in cond.items()} @torch.cuda.amp.autocast(enabled=False) @torch.no_grad() def clip_camera_projection( self, theta: Float[Tensor, "B"], azimuth: Float[Tensor, "B"], distance: Float[Tensor, "B"], c_crossattn: Float[Tensor, "B 1 768"], in_deg: bool = False, ): T = make_T(theta, azimuth, distance, in_deg=in_deg).T[:, None, :] clip_emb = self.model.cc_projection(torch.cat([c_crossattn, T], dim=-1)) return clip_emb def inject_lora( self, ckpt_fp: str = None, rank: int = 12, target_replace_module: List[str] = ["CrossAttention", "GEGLU"], eval: bool = False, ): print( f"[INFO] Injecting LoRA from " + (str(ckpt_fp) if ckpt_fp is not None else "scratch"), ) lora_params, _ = inject_trainable_lora_extended( self.model.model, target_replace_module=set(target_replace_module), r=rank, loras=ckpt_fp, eval=eval, ) if not eval: self.require_grad_params += itertools.chain(*lora_params) return self def save_lora( self, ckpt_fp: str, target_replace_module: List[str] = ["CrossAttention", "GEGLU"], ):
class Zero123(nn.Module): @dataclass class Config: pretrained_model_name_or_path: str = "ldm/ckpt/zero123-xl.ckpt" pretrained_config: str = "ldm/ckpt/sd-objaverse-finetune-c_concat-256.yaml" vram_O: bool = False min_step_percent: float = 0.02 max_step_percent: float = 0.98 config: Config def __init__(self, **kwargs) -> None: super().__init__() self.config = OmegaConf.structured(self.Config(**kwargs)) self.device = "cuda" self.require_grad_params = [] self.configure() def configure(self) -> None: print("[INFO] Loading Zero123...") self.pretrained_config = OmegaConf.load(self.config.pretrained_config) self.weights_dtype = torch.float32 self.model: LatentDiffusion = load_model_from_config( self.pretrained_config, self.config.pretrained_model_name_or_path, device=self.device, vram_O=self.config.vram_O, ) for p in self.model.parameters(): p.requires_grad_(False) self.num_train_timesteps = self.pretrained_config.model.params.timesteps self.scheduler = DDIMScheduler( self.num_train_timesteps, self.pretrained_config.model.params.linear_start, self.pretrained_config.model.params.linear_end, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, steps_offset=1, ) self.num_train_timesteps = self.scheduler.config.num_train_timesteps self.set_min_max_steps( min_step_percent=self.config.min_step_percent, max_step_percent=self.config.max_step_percent, ) print("[INFO] Loaded Zero123") @torch.cuda.amp.autocast(enabled=False) def set_min_max_steps( self, min_step_percent: float = 0.02, max_step_percent: float = 0.98 ): self.min_step = int(self.num_train_timesteps * min_step_percent) self.max_step = int(self.num_train_timesteps * max_step_percent) @torch.cuda.amp.autocast(enabled=False) @torch.no_grad() def get_image_embeds( self, image: Float[Tensor, "B 3 256 256"] ) -> Tuple[Float[Tensor, "B 1 768"], Float[Tensor, "B 4 32 32"]]: c_crossattn = self.model.get_learned_conditioning(image.to(self.weights_dtype)) c_concat = self.model.encode_first_stage(image.to(self.weights_dtype)).mode() return c_crossattn, c_concat @torch.cuda.amp.autocast(enabled=False) def encode_image( self, image: Float[Tensor, "B 3 256 256"] ) -> Float[Tensor, "B 4 32 32"]: input_dtype = image.dtype latent = self.model.get_first_stage_encoding( self.model.encode_first_stage(image.to(self.weights_dtype)) ) return latent.to(input_dtype) # [B, 4, 32, 32] Latent space image @torch.cuda.amp.autocast(enabled=False) def decode_latent( self, latent: Float[Tensor, "B 4 H W"], ) -> Float[Tensor, "B 3 512 512"]: input_dtype = latent.dtype image = self.model.decode_first_stage(latent) image = (image * 0.5 + 0.5).clamp(0, 1) return image.to(input_dtype) @staticmethod @torch.no_grad() def make_cond(cond): """Add zeros to the beginning of cond""" return {k: [torch.cat([torch.zeros_like(v), v])] for k, v in cond.items()} @torch.cuda.amp.autocast(enabled=False) @torch.no_grad() def clip_camera_projection( self, theta: Float[Tensor, "B"], azimuth: Float[Tensor, "B"], distance: Float[Tensor, "B"], c_crossattn: Float[Tensor, "B 1 768"], in_deg: bool = False, ): T = make_T(theta, azimuth, distance, in_deg=in_deg).T[:, None, :] clip_emb = self.model.cc_projection(torch.cat([c_crossattn, T], dim=-1)) return clip_emb def inject_lora( self, ckpt_fp: str = None, rank: int = 12, target_replace_module: List[str] = ["CrossAttention", "GEGLU"], eval: bool = False, ): print( f"[INFO] Injecting LoRA from " + (str(ckpt_fp) if ckpt_fp is not None else "scratch"), ) lora_params, _ = inject_trainable_lora_extended( self.model.model, target_replace_module=set(target_replace_module), r=rank, loras=ckpt_fp, eval=eval, ) if not eval: self.require_grad_params += itertools.chain(*lora_params) return self def save_lora( self, ckpt_fp: str, target_replace_module: List[str] = ["CrossAttention", "GEGLU"], ):
save_lora_weight(
2
2023-12-17 12:45:38+00:00
24k
penghao-wu/vstar
VisualSearch/utils/dataset.py
[ { "identifier": "conversation", "path": "VisualSearch/model/llava/conversation.py", "snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n def get_prompt(self):\n def append_message(self, role, message):\n def get_images(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def to_gradio_chatbot(self):\n def copy(self):\n def dict(self):" }, { "identifier": "DEFAULT_IMAGE_TOKEN", "path": "VisualSearch/model/llava/constants.py", "snippet": "DEFAULT_IMAGE_TOKEN = \"<image>\"" }, { "identifier": "IGNORE_INDEX", "path": "VisualSearch/model/llava/constants.py", "snippet": "IGNORE_INDEX = -100" }, { "identifier": "IMAGE_TOKEN_INDEX", "path": "VisualSearch/model/llava/constants.py", "snippet": "IMAGE_TOKEN_INDEX = -200" }, { "identifier": "tokenizer_image_token", "path": "VisualSearch/model/llava/mm_utils.py", "snippet": "def tokenizer_image_token(\n prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None\n):\n prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split(\"<image>\")]\n\n def insert_separator(X, sep):\n return [ele for sublist in zip(X, [sep] * len(X)) for ele in sublist][:-1]\n\n input_ids = []\n offset = 0\n if (\n len(prompt_chunks) > 0\n and len(prompt_chunks[0]) > 0\n and prompt_chunks[0][0] == tokenizer.bos_token_id\n ):\n offset = 1\n input_ids.append(prompt_chunks[0][0])\n\n for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):\n input_ids.extend(x[offset:])\n\n if return_tensors is not None:\n if return_tensors == \"pt\":\n return torch.tensor(input_ids, dtype=torch.long)\n raise ValueError(f\"Unsupported tensor type: {return_tensors}\")\n return input_ids" }, { "identifier": "get_mask_from_json", "path": "VisualSearch/utils/data_processing.py", "snippet": "def get_mask_from_json(json_path, img):\n try:\n with open(json_path, \"r\") as r:\n anno = json.loads(r.read())\n except:\n with open(json_path, \"r\", encoding=\"cp1252\") as r:\n anno = json.loads(r.read())\n\n inform = anno[\"shapes\"]\n comments = anno[\"text\"]\n is_sentence = anno[\"is_sentence\"]\n\n height, width = img.shape[:2]\n\n ### sort polies by area\n area_list = []\n valid_poly_list = []\n for i in inform:\n label_id = i[\"label\"]\n points = i[\"points\"]\n if \"flag\" == label_id.lower(): ## meaningless deprecated annotations\n continue\n\n tmp_mask = np.zeros((height, width), dtype=np.uint8)\n cv2.polylines(tmp_mask, np.array([points], dtype=np.int32), True, 1, 1)\n cv2.fillPoly(tmp_mask, np.array([points], dtype=np.int32), 1)\n tmp_area = tmp_mask.sum()\n\n area_list.append(tmp_area)\n valid_poly_list.append(i)\n\n ### ground-truth mask\n sort_index = np.argsort(area_list)[::-1].astype(np.int32)\n sort_index = list(sort_index)\n sort_inform = []\n for s_idx in sort_index:\n sort_inform.append(valid_poly_list[s_idx])\n\n mask = np.zeros((height, width), dtype=np.uint8)\n for i in sort_inform:\n label_id = i[\"label\"]\n points = i[\"points\"]\n\n if \"ignore\" in label_id.lower():\n label_value = 255 # ignored during evaluation\n else:\n label_value = 1 # target\n\n cv2.polylines(mask, np.array([points], dtype=np.int32), True, label_value, 1)\n cv2.fillPoly(mask, np.array([points], dtype=np.int32), label_value)\n\n return mask, comments, is_sentence" }, { "identifier": "REFER", "path": "VisualSearch/utils/refer.py", "snippet": "class REFER:\n def __init__(self, data_root, dataset=\"refcoco\", splitBy=\"unc\"):\n # provide data_root folder which contains refclef, refcoco, refcoco+ and refcocog\n # also provide dataset name and splitBy information\n # e.g., dataset = 'refcoco', splitBy = 'unc'\n print(\"loading dataset %s into memory...\" % dataset)\n self.ROOT_DIR = osp.abspath(osp.dirname(__file__))\n self.DATA_DIR = osp.join(data_root, dataset)\n if dataset in [\"refcoco\", \"refcoco+\", \"refcocog\"]:\n self.IMAGE_DIR = osp.join(data_root, \"images/mscoco/images/train2014\")\n elif dataset == \"refclef\":\n self.IMAGE_DIR = osp.join(data_root, \"images/saiapr_tc-12\")\n else:\n print(\"No refer dataset is called [%s]\" % dataset)\n sys.exit()\n\n self.dataset = dataset\n\n # load refs from data/dataset/refs(dataset).json\n tic = time.time()\n\n ref_file = osp.join(self.DATA_DIR, \"refs(\" + splitBy + \").p\")\n print(\"ref_file: \", ref_file)\n self.data = {}\n self.data[\"dataset\"] = dataset\n self.data[\"refs\"] = pickle.load(open(ref_file, \"rb\"))\n\n # load annotations from data/dataset/instances.json\n instances_file = osp.join(self.DATA_DIR, \"instances.json\")\n instances = json.load(open(instances_file, \"rb\"))\n self.data[\"images\"] = instances[\"images\"]\n self.data[\"annotations\"] = instances[\"annotations\"]\n self.data[\"categories\"] = instances[\"categories\"]\n\n # create index\n self.createIndex()\n print(\"DONE (t=%.2fs)\" % (time.time() - tic))\n\n def createIndex(self):\n # create sets of mapping\n # 1) Refs: \t \t{ref_id: ref}\n # 2) Anns: \t \t{ann_id: ann}\n # 3) Imgs:\t\t \t{image_id: image}\n # 4) Cats: \t \t{category_id: category_name}\n # 5) Sents: \t{sent_id: sent}\n # 6) imgToRefs: \t{image_id: refs}\n # 7) imgToAnns: \t{image_id: anns}\n # 8) refToAnn: \t{ref_id: ann}\n # 9) annToRef: \t{ann_id: ref}\n # 10) catToRefs: \t{category_id: refs}\n # 11) sentToRef: \t{sent_id: ref}\n # 12) sentToTokens: {sent_id: tokens}\n print(\"creating index...\")\n # fetch info from instances\n Anns, Imgs, Cats, imgToAnns = {}, {}, {}, {}\n for ann in self.data[\"annotations\"]:\n Anns[ann[\"id\"]] = ann\n imgToAnns[ann[\"image_id\"]] = imgToAnns.get(ann[\"image_id\"], []) + [ann]\n for img in self.data[\"images\"]:\n Imgs[img[\"id\"]] = img\n for cat in self.data[\"categories\"]:\n Cats[cat[\"id\"]] = cat[\"name\"]\n\n # fetch info from refs\n Refs, imgToRefs, refToAnn, annToRef, catToRefs = {}, {}, {}, {}, {}\n Sents, sentToRef, sentToTokens = {}, {}, {}\n for ref in self.data[\"refs\"]:\n # ids\n ref_id = ref[\"ref_id\"]\n ann_id = ref[\"ann_id\"]\n category_id = ref[\"category_id\"]\n image_id = ref[\"image_id\"]\n\n # add mapping related to ref\n Refs[ref_id] = ref\n imgToRefs[image_id] = imgToRefs.get(image_id, []) + [ref]\n catToRefs[category_id] = catToRefs.get(category_id, []) + [ref]\n refToAnn[ref_id] = Anns[ann_id]\n annToRef[ann_id] = ref\n\n # add mapping of sent\n for sent in ref[\"sentences\"]:\n Sents[sent[\"sent_id\"]] = sent\n sentToRef[sent[\"sent_id\"]] = ref\n sentToTokens[sent[\"sent_id\"]] = sent[\"tokens\"]\n\n # create class members\n self.Refs = Refs\n self.Anns = Anns\n self.Imgs = Imgs\n self.Cats = Cats\n self.Sents = Sents\n self.imgToRefs = imgToRefs\n self.imgToAnns = imgToAnns\n self.refToAnn = refToAnn\n self.annToRef = annToRef\n self.catToRefs = catToRefs\n self.sentToRef = sentToRef\n self.sentToTokens = sentToTokens\n print(\"index created.\")\n\n def getRefIds(self, image_ids=[], cat_ids=[], ref_ids=[], split=\"\"):\n image_ids = image_ids if type(image_ids) == list else [image_ids]\n cat_ids = cat_ids if type(cat_ids) == list else [cat_ids]\n ref_ids = ref_ids if type(ref_ids) == list else [ref_ids]\n\n if len(image_ids) == len(cat_ids) == len(ref_ids) == len(split) == 0:\n refs = self.data[\"refs\"]\n else:\n if not len(image_ids) == 0:\n refs = [self.imgToRefs[image_id] for image_id in image_ids]\n else:\n refs = self.data[\"refs\"]\n if not len(cat_ids) == 0:\n refs = [ref for ref in refs if ref[\"category_id\"] in cat_ids]\n if not len(ref_ids) == 0:\n refs = [ref for ref in refs if ref[\"ref_id\"] in ref_ids]\n if not len(split) == 0:\n if split in [\"testA\", \"testB\", \"testC\"]:\n refs = [\n ref for ref in refs if split[-1] in ref[\"split\"]\n ] # we also consider testAB, testBC, ...\n elif split in [\"testAB\", \"testBC\", \"testAC\"]:\n refs = [\n ref for ref in refs if ref[\"split\"] == split\n ] # rarely used I guess...\n elif split == \"test\":\n refs = [ref for ref in refs if \"test\" in ref[\"split\"]]\n elif split == \"train\" or split == \"val\":\n refs = [ref for ref in refs if ref[\"split\"] == split]\n else:\n print(\"No such split [%s]\" % split)\n sys.exit()\n ref_ids = [ref[\"ref_id\"] for ref in refs]\n return ref_ids\n\n def getAnnIds(self, image_ids=[], cat_ids=[], ref_ids=[]):\n image_ids = image_ids if type(image_ids) == list else [image_ids]\n cat_ids = cat_ids if type(cat_ids) == list else [cat_ids]\n ref_ids = ref_ids if type(ref_ids) == list else [ref_ids]\n\n if len(image_ids) == len(cat_ids) == len(ref_ids) == 0:\n ann_ids = [ann[\"id\"] for ann in self.data[\"annotations\"]]\n else:\n if not len(image_ids) == 0:\n lists = [\n self.imgToAnns[image_id]\n for image_id in image_ids\n if image_id in self.imgToAnns\n ] # list of [anns]\n anns = list(itertools.chain.from_iterable(lists))\n else:\n anns = self.data[\"annotations\"]\n if not len(cat_ids) == 0:\n anns = [ann for ann in anns if ann[\"category_id\"] in cat_ids]\n ann_ids = [ann[\"id\"] for ann in anns]\n if not len(ref_ids) == 0:\n ids = set(ann_ids).intersection(\n set([self.Refs[ref_id][\"ann_id\"] for ref_id in ref_ids])\n )\n return ann_ids\n\n def getImgIds(self, ref_ids=[]):\n ref_ids = ref_ids if type(ref_ids) == list else [ref_ids]\n\n if not len(ref_ids) == 0:\n image_ids = list(set([self.Refs[ref_id][\"image_id\"] for ref_id in ref_ids]))\n else:\n image_ids = self.Imgs.keys()\n return image_ids\n\n def getCatIds(self):\n return self.Cats.keys()\n\n def loadRefs(self, ref_ids=[]):\n if type(ref_ids) == list:\n return [self.Refs[ref_id] for ref_id in ref_ids]\n elif type(ref_ids) == int:\n return [self.Refs[ref_ids]]\n\n def loadAnns(self, ann_ids=[]):\n if type(ann_ids) == list:\n return [self.Anns[ann_id] for ann_id in ann_ids]\n elif type(ann_ids) == int or type(ann_ids) == unicode:\n return [self.Anns[ann_ids]]\n\n def loadImgs(self, image_ids=[]):\n if type(image_ids) == list:\n return [self.Imgs[image_id] for image_id in image_ids]\n elif type(image_ids) == int:\n return [self.Imgs[image_ids]]\n\n def loadCats(self, cat_ids=[]):\n if type(cat_ids) == list:\n return [self.Cats[cat_id] for cat_id in cat_ids]\n elif type(cat_ids) == int:\n return [self.Cats[cat_ids]]\n\n def getRefBox(self, ref_id):\n ref = self.Refs[ref_id]\n ann = self.refToAnn[ref_id]\n return ann[\"bbox\"] # [x, y, w, h]\n\n def showRef(self, ref, seg_box=\"seg\"):\n ax = plt.gca()\n # show image\n image = self.Imgs[ref[\"image_id\"]]\n I = io.imread(osp.join(self.IMAGE_DIR, image[\"file_name\"]))\n ax.imshow(I)\n # show refer expression\n for sid, sent in enumerate(ref[\"sentences\"]):\n print(\"%s. %s\" % (sid + 1, sent[\"sent\"]))\n # show segmentations\n if seg_box == \"seg\":\n ann_id = ref[\"ann_id\"]\n ann = self.Anns[ann_id]\n polygons = []\n color = []\n c = \"none\"\n if type(ann[\"segmentation\"][0]) == list:\n # polygon used for refcoco*\n for seg in ann[\"segmentation\"]:\n poly = np.array(seg).reshape((len(seg) / 2, 2))\n polygons.append(Polygon(poly, True, alpha=0.4))\n color.append(c)\n p = PatchCollection(\n polygons,\n facecolors=color,\n edgecolors=(1, 1, 0, 0),\n linewidths=3,\n alpha=1,\n )\n ax.add_collection(p) # thick yellow polygon\n p = PatchCollection(\n polygons,\n facecolors=color,\n edgecolors=(1, 0, 0, 0),\n linewidths=1,\n alpha=1,\n )\n ax.add_collection(p) # thin red polygon\n else:\n # mask used for refclef\n rle = ann[\"segmentation\"]\n m = mask.decode(rle)\n img = np.ones((m.shape[0], m.shape[1], 3))\n color_mask = np.array([2.0, 166.0, 101.0]) / 255\n for i in range(3):\n img[:, :, i] = color_mask[i]\n ax.imshow(np.dstack((img, m * 0.5)))\n # show bounding-box\n elif seg_box == \"box\":\n ann_id = ref[\"ann_id\"]\n ann = self.Anns[ann_id]\n bbox = self.getRefBox(ref[\"ref_id\"])\n box_plot = Rectangle(\n (bbox[0], bbox[1]),\n bbox[2],\n bbox[3],\n fill=False,\n edgecolor=\"green\",\n linewidth=3,\n )\n ax.add_patch(box_plot)\n\n def getMask(self, ref):\n # return mask, area and mask-center\n ann = self.refToAnn[ref[\"ref_id\"]]\n image = self.Imgs[ref[\"image_id\"]]\n if type(ann[\"segmentation\"][0]) == list: # polygon\n rle = mask.frPyObjects(ann[\"segmentation\"], image[\"height\"], image[\"width\"])\n else:\n rle = ann[\"segmentation\"]\n m = mask.decode(rle)\n m = np.sum(\n m, axis=2\n ) # sometimes there are multiple binary map (corresponding to multiple segs)\n m = m.astype(np.uint8) # convert to np.uint8\n # compute area\n area = sum(mask.area(rle)) # should be close to ann['area']\n return {\"mask\": m, \"area\": area}\n # # position\n # position_x = np.mean(np.where(m==1)[1]) # [1] means columns (matlab style) -> x (c style)\n # position_y = np.mean(np.where(m==1)[0]) # [0] means rows (matlab style) -> y (c style)\n # # mass position (if there were multiple regions, we use the largest one.)\n # label_m = label(m, connectivity=m.ndim)\n # regions = regionprops(label_m)\n # if len(regions) > 0:\n # \tlargest_id = np.argmax(np.array([props.filled_area for props in regions]))\n # \tlargest_props = regions[largest_id]\n # \tmass_y, mass_x = largest_props.centroid\n # else:\n # \tmass_x, mass_y = position_x, position_y\n # # if centroid is not in mask, we find the closest point to it from mask\n # if m[mass_y, mass_x] != 1:\n # \tprint('Finding closes mask point ...')\n # \tkernel = np.ones((10, 10),np.uint8)\n # \tme = cv2.erode(m, kernel, iterations = 1)\n # \tpoints = zip(np.where(me == 1)[0].tolist(), np.where(me == 1)[1].tolist()) # row, col style\n # \tpoints = np.array(points)\n # \tdist = np.sum((points - (mass_y, mass_x))**2, axis=1)\n # \tid = np.argsort(dist)[0]\n # \tmass_y, mass_x = points[id]\n # \t# return\n # return {'mask': m, 'area': area, 'position_x': position_x, 'position_y': position_y, 'mass_x': mass_x, 'mass_y': mass_y}\n # # show image and mask\n # I = io.imread(osp.join(self.IMAGE_DIR, image['file_name']))\n # plt.figure()\n # plt.imshow(I)\n # ax = plt.gca()\n # img = np.ones( (m.shape[0], m.shape[1], 3) )\n # color_mask = np.array([2.0,166.0,101.0])/255\n # for i in range(3):\n # img[:,:,i] = color_mask[i]\n # ax.imshow(np.dstack( (img, m*0.5) ))\n # plt.show()\n\n def showMask(self, ref):\n M = self.getMask(ref)\n msk = M[\"mask\"]\n ax = plt.gca()\n ax.imshow(msk)" }, { "identifier": "ReferSegDataset", "path": "VisualSearch/utils/refer_seg_dataset.py", "snippet": "class ReferSegDataset(torch.utils.data.Dataset):\n pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n img_size = 1024\n ignore_label = 255\n\n def __init__(\n self,\n base_dir,\n tokenizer,\n vision_tower,\n samples_per_epoch=500 * 8 * 2 * 10,\n precision: str = \"fp32\",\n num_classes_per_sample: int = 3,\n exclude_val=False,\n refer_seg_data=\"refclef||refcoco||refcoco+||refcocog\",\n ):\n self.exclude_val = exclude_val\n self.samples_per_epoch = samples_per_epoch\n self.num_classes_per_sample = num_classes_per_sample\n\n self.base_dir = base_dir\n self.tokenizer = tokenizer\n self.precision = precision\n self.transform = OwlViTProcessor.from_pretrained(\"google/owlvit-base-patch16\")\n self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)\n\n self.short_question_list = SHORT_QUESTION_LIST\n self.answer_list = ANSWER_LIST\n\n DATA_DIR = os.path.join(base_dir, \"refer_seg\")\n self.refer_seg_ds_list = refer_seg_data.split(\n \"||\"\n ) # ['refclef', 'refcoco', 'refcoco+', 'refcocog']\n self.refer_seg_data = {}\n for ds in self.refer_seg_ds_list:\n if ds == \"refcocog\":\n splitBy = \"umd\"\n else:\n splitBy = \"unc\"\n\n if ds == \"grefcoco\":\n refer_api = G_REFER(DATA_DIR, ds, splitBy)\n else:\n refer_api = REFER(DATA_DIR, ds, splitBy)\n ref_ids_train = refer_api.getRefIds(split=\"train\")\n images_ids_train = refer_api.getImgIds(ref_ids=ref_ids_train)\n refs_train = refer_api.loadRefs(ref_ids=ref_ids_train)\n\n refer_seg_ds = {}\n refer_seg_ds[\"images\"] = []\n loaded_images = refer_api.loadImgs(image_ids=images_ids_train)\n\n for item in loaded_images:\n item = item.copy()\n if ds == \"refclef\":\n item[\"file_name\"] = os.path.join(\n DATA_DIR, \"images/saiapr_tc-12\", item[\"file_name\"]\n )\n else:\n item[\"file_name\"] = os.path.join(\n DATA_DIR, \"images/mscoco/images/train2014\", item[\"file_name\"]\n )\n refer_seg_ds[\"images\"].append(item)\n refer_seg_ds[\"annotations\"] = refer_api.Anns # anns_train\n\n print(\n \"dataset {} (refs {}) (train split) has {} images and {} annotations.\".format(\n ds,\n splitBy,\n len(refer_seg_ds[\"images\"]),\n len(refer_seg_ds[\"annotations\"]),\n )\n )\n\n img2refs = {}\n for ref in refs_train:\n image_id = ref[\"image_id\"]\n img2refs[image_id] = img2refs.get(image_id, []) + [\n ref,\n ]\n refer_seg_ds[\"img2refs\"] = img2refs\n self.refer_seg_data[ds] = refer_seg_ds\n\n def __len__(self):\n return self.samples_per_epoch\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.img_size - h\n padw = self.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n\n def __getitem__(self, idx):\n ds = random.randint(0, len(self.refer_seg_ds_list) - 1)\n ds = self.refer_seg_ds_list[ds]\n refer_seg_ds = self.refer_seg_data[ds]\n images = refer_seg_ds[\"images\"]\n annotations = refer_seg_ds[\"annotations\"]\n img2refs = refer_seg_ds[\"img2refs\"]\n idx = random.randint(0, len(images) - 1)\n image_info = images[idx]\n image_path = image_info[\"file_name\"]\n image_id = image_info[\"id\"]\n refs = img2refs[image_id]\n if len(refs) == 0:\n return self.__getitem__(0)\n\n sents = []\n ann_ids = []\n for ref in refs:\n for sent in ref[\"sentences\"]:\n text = sent[\"sent\"]\n sents.append(text)\n ann_ids.append(ref[\"ann_id\"])\n if len(sents) >= self.num_classes_per_sample:\n sampled_inds = np.random.choice(\n list(range(len(sents))), size=self.num_classes_per_sample, replace=False\n )\n else:\n sampled_inds = list(range(len(sents)))\n sampled_sents = np.vectorize(sents.__getitem__)(sampled_inds).tolist()\n # sampled_ann_ids = np.vectorize(ann_ids.__getitem__)(sampled_inds).tolist()\n sampled_ann_ids = [ann_ids[ind] for ind in sampled_inds]\n sampled_classes = sampled_sents\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\")[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n\n questions = []\n answers = []\n for text in sampled_classes:\n text = text.strip()\n assert len(text.split(\"||\")) == 1\n question_template = random.choice(self.short_question_list)\n questions.append(question_template.format(class_name=text.lower()))\n answers.append(random.choice(self.answer_list))\n\n conversations = []\n conv = conversation_lib.default_conversation.copy()\n\n i = 0\n while i < len(questions):\n conv.messages = []\n conv.append_message(conv.roles[0], questions[i])\n conv.append_message(conv.roles[1], answers[i])\n conversations.append(conv.get_prompt())\n i += 1\n\n flag = False\n masks = []\n bboxes_labels = []\n for ann_id in sampled_ann_ids:\n if isinstance(ann_id, list):\n assert False\n flag = True\n if -1 in ann_id:\n assert len(ann_id) == 1\n m = np.zeros((image_info[\"height\"], image_info[\"width\"])).astype(\n np.uint8\n )\n else:\n m_final = np.zeros(\n (image_info[\"height\"], image_info[\"width\"])\n ).astype(np.uint8)\n for ann_id_i in ann_id:\n ann = annotations[ann_id_i]\n\n if len(ann[\"segmentation\"]) == 0:\n m = np.zeros(\n (image_info[\"height\"], image_info[\"width\"])\n ).astype(np.uint8)\n else:\n if type(ann[\"segmentation\"][0]) == list: # polygon\n rle = mask.frPyObjects(\n ann[\"segmentation\"],\n image_info[\"height\"],\n image_info[\"width\"],\n )\n else:\n rle = ann[\"segmentation\"]\n for i in range(len(rle)):\n if not isinstance(rle[i][\"counts\"], bytes):\n rle[i][\"counts\"] = rle[i][\"counts\"].encode()\n m = mask.decode(rle)\n m = np.sum(\n m, axis=2\n ) # sometimes there are multiple binary map (corresponding to multiple segs)\n m = m.astype(np.uint8) # convert to np.uint8\n m_final = m_final | m\n m = m_final\n masks.append(m)\n continue\n \n ann = annotations[ann_id]\n cur_bboxes = [ann['bbox']]\n cur_bboxes = torch.tensor(cur_bboxes).view(-1, 4)\n # xywh to x1y1x2y2\n cur_bboxes[:, 2:] += cur_bboxes[:, :2]\n cur_bboxes[:, 0::2].clamp_(min=0, max=original_size[1])\n cur_bboxes[:, 1::2].clamp_(min=0, max=original_size[0])\n keep = (cur_bboxes[:, 3] > cur_bboxes[:, 1]) & (cur_bboxes[:, 2] > cur_bboxes[:, 0])\n cur_bboxes = cur_bboxes[keep]\n cur_bboxes = box_xyxy_to_cxcywh(cur_bboxes)\n cur_bboxes = cur_bboxes / torch.tensor([original_size[1], original_size[0], original_size[1], original_size[0]], dtype=torch.float32)\n if len(cur_bboxes) == 0:\n return self.__getitem__(0)\n bboxes_labels.append(cur_bboxes)\n \n if len(ann[\"segmentation\"]) == 0:\n m = np.zeros((image_info[\"height\"], image_info[\"width\"])).astype(\n np.uint8\n )\n masks.append(m)\n continue\n\n if type(ann[\"segmentation\"][0]) == list: # polygon\n rle = mask.frPyObjects(\n ann[\"segmentation\"], image_info[\"height\"], image_info[\"width\"]\n )\n else:\n rle = ann[\"segmentation\"]\n for i in range(len(rle)):\n if not isinstance(rle[i][\"counts\"], bytes):\n rle[i][\"counts\"] = rle[i][\"counts\"].encode()\n m = mask.decode(rle)\n m = np.sum(\n m, axis=2\n ) # sometimes there are multiple binary map (corresponding to multiple segs)\n m = m.astype(np.uint8) # convert to np.uint8\n masks.append(m)\n bboxes_valid = [1]*len(bboxes_labels)\n masks_valid = [1]*len(bboxes_labels)\n masks = np.stack(masks, axis=0)\n\n\n masks = torch.from_numpy(masks)\n label = torch.ones(masks.shape[1], masks.shape[2]) * self.ignore_label\n\n return (\n image_path,\n image,\n image_clip,\n conversations,\n masks,\n label,\n bboxes_labels,\n bboxes_valid,\n masks_valid,\n resize,\n questions,\n sampled_classes,\n )" }, { "identifier": "SegDetDataset", "path": "VisualSearch/utils/general_segdet_dataset.py", "snippet": "class SegDetDataset(torch.utils.data.Dataset):\n pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n img_size = 1024\n ignore_label = 255\n\n def __init__(\n self,\n base_dir,\n tokenizer,\n vision_tower,\n samples_per_epoch=500 * 8 * 2 * 10,\n precision: str = \"fp32\",\n num_classes_per_sample: int = 3,\n exclude_val=False,\n general_segdet_data=\"objects365||cocostuff||paco_lvis\",\n general_segdet_sample_rate=[2,1,1]\n ):\n self.exclude_val = exclude_val\n self.samples_per_epoch = samples_per_epoch\n self.num_classes_per_sample = num_classes_per_sample\n\n self.base_dir = base_dir\n self.tokenizer = tokenizer\n self.precision = precision\n self.transform = OwlViTProcessor.from_pretrained(\"google/owlvit-base-patch16\")\n self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)\n\n self.short_question_list = SHORT_QUESTION_LIST\n self.answer_list = ANSWER_LIST\n\n self.data2list = {}\n self.data2classes = {}\n\n self.general_segdet_datas = general_segdet_data.split(\"||\")\n num_images = []\n for ds in self.general_segdet_datas:\n if ds == \"cocostuff\":\n classes, images, labels, bboxes = eval(\"init_{}\".format(ds))(base_dir)\n self.data2list[ds] = (images, labels, bboxes)\n elif ds == \"objects365\":\n classes, images, bboxes = eval(\"init_{}\".format(ds))(base_dir)\n self.data2list[ds] = (images, bboxes)\n else:\n classes, images, labels = eval(\"init_{}\".format(ds))(base_dir)\n self.data2list[ds] = (images, labels)\n self.data2classes[ds] = classes\n num_images.append(len(images))\n sample_rate = np.array(general_segdet_sample_rate)\n self.sample_rate = sample_rate / sample_rate.sum()\n\n if \"cocostuff\" in self.general_segdet_datas:\n self.cocostuff_class2index = {\n c: i for i, c in enumerate(self.data2classes[\"cocostuff\"])\n }\n\n def __len__(self):\n return self.samples_per_epoch\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.img_size - h\n padw = self.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n\n def __getitem__(self, idx):\n ds = np.random.choice(list(range(len(self.general_segdet_datas))), p=self.sample_rate)\n ds = self.general_segdet_datas[ds]\n\n if ds in [\"paco_lvis\"]:\n class_map = self.data2classes[ds]\n img_ids, coco_api = self.data2list[ds]\n idx = random.randint(0, len(img_ids) - 1)\n img_id = img_ids[idx]\n image_info = coco_api.loadImgs([img_id])[0]\n file_name = image_info[\"file_name\"]\n if ds == \"pascal_part\":\n file_name = os.path.join(\n \"VOCdevkit\", \"VOC2010\", \"JPEGImages\", file_name\n )\n image_path = os.path.join(self.base_dir, \"vlpart\", ds, file_name)\n elif ds == \"paco_lvis\":\n image_path = os.path.join(self.base_dir, \"coco2017\", file_name)\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\"\n )[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n annIds = coco_api.getAnnIds(imgIds=image_info[\"id\"])\n anns = coco_api.loadAnns(annIds)\n anns_category2instances = dict()\n for ann in anns:\n category_id = ann['category_id']\n if category_id not in anns_category2instances:\n anns_category2instances[category_id] = []\n anns_category2instances[category_id].append(ann)\n if len(anns_category2instances) == 0:\n return self.__getitem__(0)\n if len(anns_category2instances) >= self.num_classes_per_sample:\n sampled_anns = np.random.choice(\n list(anns_category2instances.keys()), size=self.num_classes_per_sample, replace=False\n ).tolist()\n else:\n sampled_anns = list(anns_category2instances.keys())\n sampled_classes = []\n for category_id in sampled_anns:\n sampled_cls = class_map[category_id]\n if isinstance(sampled_cls, tuple):\n obj, part = sampled_cls\n if random.random() < 0.5:\n name = obj + \" \" + part\n else:\n name = \"the {} of the {}\".format(part, obj)\n else:\n name = sampled_cls\n name = name.replace('_', ' ')\n sampled_classes.append(name)\n\n elif ds in [\"cocostuff\"]:\n image, labels, bboxes_all = self.data2list[ds]\n idx = random.randint(0, len(image) - 1)\n image_path = image[idx]\n label_path = labels[idx]\n bboxes = bboxes_all[idx]\n label = Image.open(label_path)\n label = np.array(label)\n if ds == \"ade20k\":\n label[label == 0] = 255\n label -= 1\n label[label == 254] = 255\n elif ds == \"cocostuff\":\n for c, i in self.cocostuff_class2index.items():\n if \"-\" in c:\n label[label == i] = 255\n img = cv2.imread(image_path)\n image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\"\n )[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n unique_label = np.unique(label).tolist()\n if 255 in unique_label:\n unique_label.remove(255)\n if len(unique_label) == 0:\n return self.__getitem__(0)\n\n classes = [self.data2classes[ds][class_id] for class_id in unique_label]\n if len(classes) >= self.num_classes_per_sample:\n sampled_classes = np.random.choice(\n classes, size=self.num_classes_per_sample, replace=False\n ).tolist()\n else:\n sampled_classes = classes\n\n elif ds in ['objects365']:\n image, bboxes_all = self.data2list[ds]\n idx = random.randint(0, len(image) - 1)\n image_path = image[idx]\n bboxes = bboxes_all[idx]\n img = cv2.imread(image_path)\n image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\"\n )[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n unique_label = set()\n for bbox_info in bboxes:\n unique_label.add(bbox_info['category_id'])\n unique_label = list(unique_label)\n if len(unique_label) == 0:\n return self.__getitem__(0)\n\n classes = [self.data2classes[ds][class_id] for class_id in unique_label]\n if len(classes) >= self.num_classes_per_sample:\n sampled_classes = np.random.choice(\n classes, size=self.num_classes_per_sample, replace=False\n ).tolist()\n else:\n sampled_classes = classes\n\n\n questions = []\n answers = []\n class_ids = []\n bboxes_labels = []\n for i, sampled_cls in enumerate(sampled_classes):\n text = sampled_cls\n if ds in ['objects365']:\n text = random.sample(text.split('/'), 1)[0]\n \n assert len(text.split(\"||\")) == 1\n question_template = random.choice(self.short_question_list)\n questions.append(question_template.format(class_name=text.lower()))\n\n answers.append(random.choice(self.answer_list))\n\n if ds in [\"paco_lvis\", \"pascal_part\"]:\n category_id = sampled_anns[i]\n cur_bboxes = [instance['bbox'] for instance in anns_category2instances[category_id]]\n cur_bboxes = torch.tensor(cur_bboxes).view(-1, 4)\n # xywh to x1y1x2y2\n cur_bboxes[:, 2:] += cur_bboxes[:, :2]\n cur_bboxes[:, 0::2].clamp_(min=0, max=original_size[1])\n cur_bboxes[:, 1::2].clamp_(min=0, max=original_size[0])\n keep = (cur_bboxes[:, 3] > cur_bboxes[:, 1]) & (cur_bboxes[:, 2] > cur_bboxes[:, 0])\n cur_bboxes = cur_bboxes[keep]\n cur_bboxes = box_xyxy_to_cxcywh(cur_bboxes)\n cur_bboxes = cur_bboxes / torch.tensor([original_size[1], original_size[0], original_size[1], original_size[0]], dtype=torch.float32)\n if len(cur_bboxes) == 0:\n return self.__getitem__(0)\n bboxes_labels.append(cur_bboxes)\n continue\n\n class_id = self.data2classes[ds].tolist().index(sampled_cls)\n class_ids.append(class_id)\n if ds in ['objects365']:\n cur_bboxes = [bbox['bbox'] for bbox in bboxes if bbox['category_id'] == class_id]\n else:\n cur_bboxes = [bbox['bbox'] for bbox in bboxes if bbox['category_id']-1 == class_id]\n cur_bboxes = cur_bboxes[:100]\n assert len(cur_bboxes) > 0\n cur_bboxes = torch.tensor(cur_bboxes).view(-1, 4)\n # xywh to x1y1x2y2\n cur_bboxes[:, 2:] += cur_bboxes[:, :2]\n cur_bboxes[:, 0::2].clamp_(min=0, max=original_size[1])\n cur_bboxes[:, 1::2].clamp_(min=0, max=original_size[0])\n keep = (cur_bboxes[:, 3] > cur_bboxes[:, 1]) & (cur_bboxes[:, 2] > cur_bboxes[:, 0])\n cur_bboxes = cur_bboxes[keep]\n cur_bboxes = box_xyxy_to_cxcywh(cur_bboxes)\n cur_bboxes = cur_bboxes / torch.tensor([original_size[1], original_size[0], original_size[1], original_size[0]], dtype=torch.float32)\n if len(cur_bboxes) == 0:\n return self.__getitem__(0)\n bboxes_labels.append(cur_bboxes)\n bboxes_valid = [1]*len(bboxes_labels)\n masks_valid = [1]*len(bboxes_labels)\n conversations = []\n conv = conversation_lib.default_conversation.copy()\n\n i = 0\n while i < len(questions):\n conv.messages = []\n conv.append_message(conv.roles[0], questions[i])\n conv.append_message(conv.roles[1], answers[i])\n conversations.append(conv.get_prompt())\n i += 1\n\n if ds in [\"paco_lvis\", \"pascal_part\"]:\n masks = []\n for category_id in sampled_anns:\n try:\n cur_anns = anns_category2instances[category_id]\n cur_mask = None\n for ann in cur_anns:\n if cur_mask is None:\n cur_mask = coco_api.annToMask(ann)\n else:\n cur_mask = cur_mask | coco_api.annToMask(ann)\n assert cur_mask is not None\n masks.append(cur_mask)\n except Exception as e:\n print(e)\n return self.__getitem__(0)\n\n masks = np.stack(masks, axis=0)\n masks = torch.from_numpy(masks)\n label = torch.ones(masks.shape[1], masks.shape[2]) * self.ignore_label\n elif ds in ['objects365']:\n masks = torch.rand(len(bboxes_labels), *original_size)\n label = torch.ones(original_size) * self.ignore_label\n masks_valid = [0]*len(bboxes_labels)\n else:\n label = torch.from_numpy(label).long()\n masks = []\n for class_id in class_ids:\n masks.append(label == class_id)\n masks = torch.stack(masks, dim=0)\n return (\n image_path,\n image,\n image_clip,\n conversations,\n masks,\n label,\n bboxes_labels,\n bboxes_valid,\n masks_valid,\n resize,\n questions,\n sampled_classes,\n )" }, { "identifier": "MixedGroundingDataset", "path": "VisualSearch/utils/mixed_grounding_dataset.py", "snippet": "class MixedGroundingDataset(torch.utils.data.Dataset):\n pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n img_size = 1024\n ignore_label = 255\n\n def __init__(\n self,\n base_dir,\n tokenizer,\n vision_tower,\n samples_per_epoch=500 * 8 * 2 * 10,\n precision: str = \"fp32\",\n num_classes_per_sample: int = 3,\n exclude_val=False,\n ):\n self.samples_per_epoch = samples_per_epoch\n self.num_classes_per_sample = num_classes_per_sample\n\n self.base_dir = base_dir\n self.tokenizer = tokenizer\n self.precision = precision\n self.transform = OwlViTProcessor.from_pretrained(\"google/owlvit-base-patch16\")\n self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)\n\n self.short_question_list = SHORT_QUESTION_LIST\n self.answer_list = ANSWER_LIST\n\n with open(os.path.join(base_dir, 'MixedGrounding', 'goldG_train.json')) as f:\n self.images = json.load(f)\n\n def __len__(self):\n return self.samples_per_epoch\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.img_size - h\n padw = self.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n\n def __getitem__(self, idx):\n\n idx = random.randint(0, len(self.images) - 1)\n image_info = self.images[idx]\n image_data_source = image_info['data_source']\n file_name = image_info[\"file_name\"]\n assert image_data_source in ['coco', 'vg', 'flickr']\n if image_data_source == 'coco':\n image_path = os.path.join(self.base_dir, 'coco2014/train2014', file_name)\n elif image_data_source == 'vg':\n image_path = os.path.join(self.base_dir, 'MixedGrounding/GQA/images', file_name)\n else:\n image_path = os.path.join(self.base_dir, 'MixedGrounding/flickr30k-images', file_name)\n caption = image_info['caption']\n instances = image_info['instances']\n if len(instances) == 0:\n return self.__getitem__(0)\n\n if len(instances) >= self.num_classes_per_sample:\n sampled_inds = np.random.choice(\n list(range(len(instances))), size=self.num_classes_per_sample, replace=False\n )\n else:\n sampled_inds = list(range(len(instances)))\n\n sampled_classes = sampled_inds\n \n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\")[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n\n questions = []\n answers = []\n bboxes_labels = []\n for sample_ind in sampled_inds:\n text = []\n tokens_positive = instances[sample_ind]['tokens_positive']\n for token in tokens_positive:\n text.append(caption[token[0]:token[1]])\n text = \" \".join(text)\n text = text.strip()\n question_template = random.choice(self.short_question_list)\n questions.append(question_template.format(class_name=text.lower()))\n answers.append(random.choice(self.answer_list))\n\n cur_bboxes = [instances[sample_ind]['bbox']]\n cur_bboxes = torch.tensor(cur_bboxes).view(-1, 4)\n # xywh to x1y1x2y2\n cur_bboxes[:, 2:] += cur_bboxes[:, :2]\n cur_bboxes[:, 0::2].clamp_(min=0, max=original_size[1])\n cur_bboxes[:, 1::2].clamp_(min=0, max=original_size[0])\n keep = (cur_bboxes[:, 3] > cur_bboxes[:, 1]) & (cur_bboxes[:, 2] > cur_bboxes[:, 0])\n cur_bboxes = cur_bboxes[keep]\n cur_bboxes = box_xyxy_to_cxcywh(cur_bboxes)\n cur_bboxes = cur_bboxes / torch.tensor([original_size[1], original_size[0], original_size[1], original_size[0]], dtype=torch.float32)\n if len(cur_bboxes) == 0:\n return self.__getitem__(0)\n bboxes_labels.append(cur_bboxes)\n\n conversations = []\n conv = conversation_lib.default_conversation.copy()\n\n i = 0\n while i < len(questions):\n conv.messages = []\n conv.append_message(conv.roles[0], questions[i])\n conv.append_message(conv.roles[1], answers[i])\n conversations.append(conv.get_prompt())\n i += 1\n \n bboxes_valid = [1]*len(bboxes_labels)\n masks_valid = [0]*len(bboxes_labels)\n masks = torch.rand(len(bboxes_labels), *original_size)\n label = torch.ones(original_size) * self.ignore_label\n\n return (\n image_path,\n image,\n image_clip,\n conversations,\n masks,\n label,\n bboxes_labels,\n bboxes_valid,\n masks_valid,\n resize,\n questions,\n sampled_classes,\n )" }, { "identifier": "VQADataset", "path": "VisualSearch/utils/vqa_dataset.py", "snippet": "class VQADataset(torch.utils.data.Dataset):\n pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n img_size = 1024\n ignore_label = 255\n\n def __init__(\n self,\n base_image_dir,\n tokenizer,\n vision_tower,\n samples_per_epoch=500 * 8 * 2 * 10,\n precision: str = \"fp32\",\n num_classes_per_sample: int = 3,\n exclude_val=False,\n vqa_data=\"possible_locations_conv_86k||llava_instruct_150k\",\n vqa_sample_rate=[2,1],\n ):\n self.exclude_val = exclude_val\n self.samples_per_epoch = samples_per_epoch\n self.num_classes_per_sample = num_classes_per_sample\n\n self.base_image_dir = base_image_dir\n self.tokenizer = tokenizer\n self.precision = precision\n self.transform = OwlViTProcessor.from_pretrained(\"google/owlvit-base-patch16\")\n self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)\n\n DATA_DIR = os.path.join(base_image_dir, \"vsm_vqa_data\")\n self.vqa_image_root = os.path.join(base_image_dir, \"coco2017/train2017\")\n vqa_datas = vqa_data.split(\"||\")\n self.vqa_datas = []\n for data in vqa_datas:\n with open(os.path.join(DATA_DIR, \"{}.json\".format(data))) as f:\n data = json.load(f)\n self.vqa_datas.append(data)\n sample_rate = np.array(vqa_sample_rate)\n self.sample_rate = sample_rate / sample_rate.sum()\n\n def __len__(self):\n return self.samples_per_epoch\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.img_size - h\n padw = self.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n\n def __getitem__(self, idx):\n ds = np.random.choice(list(range(len(self.vqa_datas))), p=self.sample_rate)\n ds = self.vqa_datas[ds]\n idx = random.randint(0, len(ds) - 1)\n item = ds[idx]\n image_path = os.path.join(self.vqa_image_root, item[\"image\"])\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n ori_size = image.shape[:2]\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\")[\"pixel_values\"][0]\n\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n\n conv = conversation_lib.default_conversation.copy()\n source = item[\"conversations\"]\n source = preprocess_multimodal(\n copy.deepcopy(source),\n mm_use_im_start_end=conv.sep_style == conversation_lib.SeparatorStyle.TWO,\n )\n roles = {\"human\": conv.roles[0], \"gpt\": conv.roles[1]}\n conversations = []\n if roles[source[0][\"from\"]] != conv.roles[0]:\n # Skip the first one if it is not from human\n source = source[1:]\n conv.messages = []\n for j, sentence in enumerate(source):\n role = roles[sentence[\"from\"]]\n assert role == conv.roles[j % 2], f\"{j}\"\n conv.append_message(role, sentence[\"value\"])\n conversations.append(conv.get_prompt())\n\n questions = conversations\n sampled_classes = conversations\n\n masks = torch.rand(1, *ori_size)\n label = torch.ones(ori_size) * self.ignore_label\n bboxes_labels = [torch.tensor([[0.5,0.5,1.0,1.0]])]\n bboxes_valid = [0]\n masks_valid = [0]\n\n return (\n image_path,\n image,\n image_clip,\n conversations,\n masks,\n label,\n bboxes_labels,\n bboxes_valid,\n masks_valid,\n resize,\n questions,\n sampled_classes,\n )" }, { "identifier": "DEFAULT_IM_END_TOKEN", "path": "VisualSearch/utils/utils.py", "snippet": "DEFAULT_IM_END_TOKEN = \"<im_end>\"" }, { "identifier": "DEFAULT_IM_START_TOKEN", "path": "VisualSearch/utils/utils.py", "snippet": "DEFAULT_IM_START_TOKEN = \"<im_start>\"" }, { "identifier": "DEFAULT_IMAGE_TOKEN", "path": "VisualSearch/utils/utils.py", "snippet": "DEFAULT_IMAGE_TOKEN = \"<image>\"" }, { "identifier": "box_xyxy_to_cxcywh", "path": "VisualSearch/utils/utils.py", "snippet": "def box_xyxy_to_cxcywh(x):\n x0, y0, x1, y1 = x.unbind(-1)\n b = [(x0 + x1) / 2, (y0 + y1) / 2,\n (x1 - x0), (y1 - y0)]\n return torch.stack(b, dim=-1)" }, { "identifier": "expand2square", "path": "VisualSearch/utils/utils.py", "snippet": "def expand2square(pil_img, background_color):\n width, height = pil_img.size\n if width == height:\n return pil_img\n elif width > height:\n result = Image.new(pil_img.mode, (width, width), background_color)\n result.paste(pil_img, (0, 0))\n return result\n else:\n result = Image.new(pil_img.mode, (height, height), background_color)\n result.paste(pil_img, (0, 0))\n return result" } ]
import glob import os import random import cv2 import numpy as np import torch import torch.nn.functional as F from PIL import Image from pycocotools import mask from transformers import CLIPImageProcessor from transformers import OwlViTProcessor from VisualSearch.model.llava import conversation as conversation_lib from VisualSearch.model.llava.constants import (DEFAULT_IMAGE_TOKEN, IGNORE_INDEX, IMAGE_TOKEN_INDEX) from VisualSearch.model.llava.mm_utils import tokenizer_image_token from VisualSearch.utils.data_processing import get_mask_from_json from VisualSearch.utils.refer import REFER from VisualSearch.utils.refer_seg_dataset import ReferSegDataset from VisualSearch.utils.general_segdet_dataset import SegDetDataset from VisualSearch.utils.mixed_grounding_dataset import MixedGroundingDataset from VisualSearch.utils.vqa_dataset import VQADataset from VisualSearch.utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IMAGE_TOKEN) from VisualSearch.utils.utils import box_xyxy_to_cxcywh, expand2square
16,059
self, base_dir, tokenizer, vision_tower, samples_per_epoch=500 * 8 * 2 * 10, precision: str = "fp32", num_classes_per_sample: int = 3, exclude_val=False, dataset="general_segdet||refer_seg||vqa||reason_seg", sample_rate=[9, 3, 3, 1], general_segdet_data="objects365||cocostuff||paco_lvis", general_segdet_sample_rate=[2,1,1], refer_seg_data="refclef||refcoco||refcoco+||refcocog", vqa_data="possible_locations_conv_86k||llava_instruct_80k", vqa_sample_rate=[2,1], ): self.exclude_val = exclude_val self.dataset = dataset self.samples_per_epoch = samples_per_epoch self.num_classes_per_sample = num_classes_per_sample sample_rate = np.array(sample_rate) self.sample_rate = sample_rate / sample_rate.sum() self.base_dir = base_dir self.tokenizer = tokenizer self.precision = precision self.datasets = dataset.split("||") self.all_datasets = [] for dataset in self.datasets: if dataset == "general_segdet": self.all_datasets.append( SegDetDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, general_segdet_data, general_segdet_sample_rate, ) ) elif dataset == "refer_seg": self.all_datasets.append( ReferSegDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, refer_seg_data, ) ) elif dataset == "vqa": self.all_datasets.append( VQADataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, vqa_data, vqa_sample_rate, ) ) elif dataset == "mixed_grounding": self.all_datasets.append( MixedGroundingDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, ) ) def __len__(self): return self.samples_per_epoch def __getitem__(self, idx): ind = np.random.choice(list(range(len(self.datasets))), p=self.sample_rate) data = self.all_datasets[ind] inference = False return *data[0], inference class ValDataset(torch.utils.data.Dataset): pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1) pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1) img_size = 1024 ignore_label = 255 def __init__( self, base_dir, tokenizer, vision_tower, val_dataset, ): self.base_dir = base_dir splits = val_dataset.split("|") if len(splits) == 2: ds, split = splits images = glob.glob( os.path.join(self.base_dir, "reason_seg", ds, split, "*.jpg") ) self.images = images self.data_type = "reason_seg" elif len(splits) == 3: self.base_dir = os.path.join(self.base_dir, 'refer_seg') ds, splitBy, split = splits
cv2.setNumThreads(1) def collate_fn( batch, tokenizer=None, conv_type="llava_v1", use_mm_start_end=True, local_rank=-1 ): image_path_list = [] images_list = [] images_clip_list = [] conversation_list = [] masks_list = [] label_list = [] bboxes_labels_list = [] bboxes_valid_list = [] masks_valid_list = [] resize_list = [] questions_list = [] sampled_classes_list = [] offset_list = [0] cnt = 0 inferences = [] for ( image_path, images, images_clip, conversations, masks, label, bboxes_labels, bboxes_valid, masks_valid, resize, questions, sampled_classes, inference, ) in batch: image_path_list.append(image_path) images_list.append(images) images_clip_list.append(images_clip) conversation_list.extend(conversations) label_list.append(label) masks_list.append(masks.float()) bboxes_labels_list.extend(bboxes_labels) bboxes_valid_list.extend(bboxes_valid) masks_valid_list.append(torch.tensor(masks_valid)) resize_list.append(resize) questions_list.append(questions) sampled_classes_list.append(sampled_classes) cnt += len(conversations) offset_list.append(cnt) inferences.append(inference) if use_mm_start_end: # replace <image> token for i in range(len(conversation_list)): replace_token = DEFAULT_IMAGE_TOKEN replace_token = ( DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN ) conversation_list[i] = conversation_list[i].replace( DEFAULT_IMAGE_TOKEN, replace_token ) input_ids = [ tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversation_list ] input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id ) attention_masks = input_ids.ne(tokenizer.pad_token_id) for i in range(len(bboxes_valid_list)): bboxes_valid = bboxes_valid_list[i] attention_mask = attention_masks[i] if not bboxes_valid: attention_mask = attention_mask & input_ids[i].ne(tokenizer("[LOC]", add_special_tokens=False).input_ids[0]) attention_masks[i] = attention_mask conv = conversation_lib.default_conversation.copy() targets = input_ids.clone() if conv_type == "llava_v1": sep = conv.sep + conv.roles[1] + ": " else: sep = "[/INST] " for conversation, target in zip(conversation_list, targets): total_len = int(target.ne(tokenizer.pad_token_id).sum()) rounds = conversation.split(conv.sep2) cur_len = 1 target[:cur_len] = IGNORE_INDEX for i, rou in enumerate(rounds): if rou == "": break parts = rou.split(sep) # if len(parts) != 2: # break assert len(parts) == 2, (len(parts), rou) parts[0] += sep if DEFAULT_IMAGE_TOKEN in conversation: round_len = len(tokenizer_image_token(rou, tokenizer)) instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2 else: round_len = len(tokenizer(rou).input_ids) instruction_len = len(tokenizer(parts[0]).input_ids) - 2 target[cur_len : cur_len + instruction_len] = IGNORE_INDEX cur_len += round_len target[cur_len:] = IGNORE_INDEX if False: z = target.clone() z = torch.where(z == IGNORE_INDEX, tokenizer.unk_token_id, z) if local_rank == 0: print( "conversation: ", conversation, "tokenizer.decode(z): ", tokenizer.decode(z), ) if cur_len < tokenizer.model_max_length: assert cur_len == total_len if inferences[0] == False: truncate_len = tokenizer.model_max_length - 255 if input_ids.shape[1] > truncate_len: input_ids = input_ids[:, :truncate_len] targets = targets[:, :truncate_len] attention_masks = attention_masks[:, :truncate_len] return { "image_paths": image_path_list, "images": torch.stack(images_list, dim=0), "images_clip": torch.stack(images_clip_list, dim=0), "input_ids": input_ids, "labels": targets, "bboxes_labels_list": bboxes_labels_list, "bboxes_valid_list": torch.tensor(bboxes_valid_list), "masks_valid_list": masks_valid_list, "attention_masks": attention_masks, "masks_list": masks_list, "label_list": label_list, "resize_list": resize_list, "offset": torch.LongTensor(offset_list), "questions_list": questions_list, "sampled_classes_list": sampled_classes_list, "inference": inferences[0], "conversation_list": conversation_list, } class HybridDataset(torch.utils.data.Dataset): pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1) pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1) img_size = 1024 ignore_label = 255 def __init__( self, base_dir, tokenizer, vision_tower, samples_per_epoch=500 * 8 * 2 * 10, precision: str = "fp32", num_classes_per_sample: int = 3, exclude_val=False, dataset="general_segdet||refer_seg||vqa||reason_seg", sample_rate=[9, 3, 3, 1], general_segdet_data="objects365||cocostuff||paco_lvis", general_segdet_sample_rate=[2,1,1], refer_seg_data="refclef||refcoco||refcoco+||refcocog", vqa_data="possible_locations_conv_86k||llava_instruct_80k", vqa_sample_rate=[2,1], ): self.exclude_val = exclude_val self.dataset = dataset self.samples_per_epoch = samples_per_epoch self.num_classes_per_sample = num_classes_per_sample sample_rate = np.array(sample_rate) self.sample_rate = sample_rate / sample_rate.sum() self.base_dir = base_dir self.tokenizer = tokenizer self.precision = precision self.datasets = dataset.split("||") self.all_datasets = [] for dataset in self.datasets: if dataset == "general_segdet": self.all_datasets.append( SegDetDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, general_segdet_data, general_segdet_sample_rate, ) ) elif dataset == "refer_seg": self.all_datasets.append( ReferSegDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, refer_seg_data, ) ) elif dataset == "vqa": self.all_datasets.append( VQADataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, vqa_data, vqa_sample_rate, ) ) elif dataset == "mixed_grounding": self.all_datasets.append( MixedGroundingDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, ) ) def __len__(self): return self.samples_per_epoch def __getitem__(self, idx): ind = np.random.choice(list(range(len(self.datasets))), p=self.sample_rate) data = self.all_datasets[ind] inference = False return *data[0], inference class ValDataset(torch.utils.data.Dataset): pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1) pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1) img_size = 1024 ignore_label = 255 def __init__( self, base_dir, tokenizer, vision_tower, val_dataset, ): self.base_dir = base_dir splits = val_dataset.split("|") if len(splits) == 2: ds, split = splits images = glob.glob( os.path.join(self.base_dir, "reason_seg", ds, split, "*.jpg") ) self.images = images self.data_type = "reason_seg" elif len(splits) == 3: self.base_dir = os.path.join(self.base_dir, 'refer_seg') ds, splitBy, split = splits
refer_api = REFER(self.base_dir, ds, splitBy)
6
2023-12-15 14:58:24+00:00
24k
sinoyou/nelf-pro
nerfstudio/viewer/server/viewer_utils.py
[ { "identifier": "Cameras", "path": "nerfstudio/cameras/cameras.py", "snippet": "class Cameras(TensorDataclass):\n \"\"\"Dataparser outputs for the image dataset and the ray generator.\n\n Note: currently only supports cameras with the same principal points and types. The reason we type\n the focal lengths, principal points, and image sizes as tensors is to allow for batched cameras\n down the line in cases where your batches of camera data don't come from the same cameras.\n\n If a single value is provided, it is broadcasted to all cameras.\n\n Args:\n camera_to_worlds: Camera to world matrices. Tensor of per-image c2w matrices, in [R | t] format\n fx: Focal length x\n fy: Focal length y\n cx: Principal point x\n cy: Principal point y\n width: Image width\n height: Image height\n distortion_params: OpenCV 6 radial distortion coefficients\n camera_type: Type of camera model. This will be an int corresponding to the CameraType enum.\n times: Timestamps for each camera\n probe_config: dict config containing the generated probe information (core and basis)\n \"\"\"\n\n camera_to_worlds: TensorType[\"num_cameras\":..., 3, 4]\n fx: TensorType[\"num_cameras\":..., 1]\n fy: TensorType[\"num_cameras\":..., 1]\n cx: TensorType[\"num_cameras\":..., 1]\n cy: TensorType[\"num_cameras\":..., 1]\n width: TensorType[\"num_cameras\":..., 1]\n height: TensorType[\"num_cameras\":..., 1]\n distortion_params: Optional[TensorType[\"num_cameras\":..., 6]]\n camera_type: TensorType[\"num_cameras\":..., 1]\n times: Optional[TensorType[\"num_cameras\":..., 1]]\n image_filenames: Optional[List[str]]\n probe_config: Optional[list]\n\n def __init__(\n self,\n camera_to_worlds: TensorType[\"batch_c2ws\":..., 3, 4],\n fx: Union[TensorType[\"batch_fxs\":..., 1], float],\n fy: Union[TensorType[\"batch_fys\":..., 1], float],\n cx: Union[TensorType[\"batch_cxs\":..., 1], float],\n cy: Union[TensorType[\"batch_cys\":..., 1], float],\n width: Optional[Union[TensorType[\"batch_ws\":..., 1], int]] = None,\n height: Optional[Union[TensorType[\"batch_hs\":..., 1], int]] = None,\n distortion_params: Optional[TensorType[\"batch_dist_params\":..., 6]] = None,\n camera_type: Optional[\n Union[\n TensorType[\"batch_cam_types\":..., 1],\n int,\n List[CameraType],\n CameraType,\n ]\n ] = CameraType.PERSPECTIVE,\n times: Optional[TensorType[\"num_cameras\"]] = None,\n image_filenames: Optional[List[str]] = None,\n probe_config: Optional[list] = None\n ):\n \"\"\"Initializes the Cameras object.\n\n Note on Input Tensor Dimensions: All of these tensors have items of dimensions TensorType[3, 4]\n (in the case of the c2w matrices), TensorType[6] (in the case of distortion params), or\n TensorType[1] (in the case of the rest of the elements). The dimensions before that are\n considered the batch dimension of that tensor (batch_c2ws, batch_fxs, etc.). We will broadcast\n all the tensors to be the same batch dimension. This means you can use any combination of the\n input types in the function signature and it won't break. Your batch size for all tensors\n must be broadcastable to the same size, and the resulting number of batch dimensions will be\n the batch dimension with the largest number of dimensions.\n \"\"\"\n\n # This will notify the tensordataclass that we have a field with more than 1 dimension\n self._field_custom_dimensions = {\"camera_to_worlds\": 2}\n\n self.camera_to_worlds = camera_to_worlds\n\n # fx fy calculation\n self.fx = self._init_get_fc_xy(fx, \"fx\") # @dataclass's post_init will take care of broadcasting\n self.fy = self._init_get_fc_xy(fy, \"fy\") # @dataclass's post_init will take care of broadcasting\n\n # cx cy calculation\n self.cx = self._init_get_fc_xy(cx, \"cx\") # @dataclass's post_init will take care of broadcasting\n self.cy = self._init_get_fc_xy(cy, \"cy\") # @dataclass's post_init will take care of broadcasting\n\n # Distortion Params Calculation:\n self.distortion_params = distortion_params # @dataclass's post_init will take care of broadcasting\n\n # @dataclass's post_init will take care of broadcasting\n self.height = self._init_get_height_width(height, self.cy)\n self.width = self._init_get_height_width(width, self.cx)\n self.camera_type = self._init_get_camera_type(camera_type)\n self.times = self._init_get_times(times)\n \n self.image_filenames = image_filenames\n self.probe_config = probe_config\n if self.probe_config is not None:\n self.probe = Probes(self.camera_to_worlds, self.probe_config)\n else:\n self.probe = None\n \n self.__post_init__() # This will do the dataclass post_init and broadcast all the tensors\n\n def _init_get_fc_xy(self, fc_xy, name):\n \"\"\"\n Parses the input focal length / principle point x or y and returns a tensor of the correct shape\n\n Only needs to make sure that we a 1 in the last dimension if it is a tensor. If it is a float, we\n just need to make it into a tensor and it will be broadcasted later in the __post_init__ function.\n\n Args:\n fc_xy: The focal length / principle point x or y\n name: The name of the variable. Used for error messages\n \"\"\"\n if isinstance(fc_xy, float):\n fc_xy = torch.Tensor([fc_xy], device=self.device)\n elif isinstance(fc_xy, torch.Tensor):\n if fc_xy.ndim == 0 or fc_xy.shape[-1] != 1:\n fc_xy = fc_xy.unsqueeze(-1)\n fc_xy = fc_xy.to(self.device)\n else:\n raise ValueError(f\"{name} must be a float or tensor, got {type(fc_xy)}\")\n return fc_xy\n\n def _init_get_camera_type(\n self,\n camera_type: Union[\n TensorType[\"batch_cam_types\":..., 1], TensorType[\"batch_cam_types\":...], int, List[CameraType], CameraType\n ],\n ) -> TensorType[\"num_cameras\":..., 1]:\n \"\"\"\n Parses the __init__() argument camera_type\n\n Camera Type Calculation:\n If CameraType, convert to int and then to tensor, then broadcast to all cameras\n If List of CameraTypes, convert to ints and then to tensor, then broadcast to all cameras\n If int, first go to tensor and then broadcast to all cameras\n If tensor, broadcast to all cameras\n\n Args:\n camera_type: camera_type argument from __init__()\n \"\"\"\n if isinstance(camera_type, CameraType):\n camera_type = torch.tensor([camera_type.value], device=self.device)\n elif isinstance(camera_type, List) and isinstance(camera_type[0], CameraType):\n camera_type = torch.tensor([[c.value] for c in camera_type], device=self.device)\n elif isinstance(camera_type, int):\n camera_type = torch.tensor([camera_type], device=self.device)\n elif isinstance(camera_type, torch.Tensor):\n assert not torch.is_floating_point(\n camera_type\n ), f\"camera_type tensor must be of type int, not: {camera_type.dtype}\"\n camera_type = camera_type.to(self.device)\n if camera_type.ndim == 0 or camera_type.shape[-1] != 1:\n camera_type = camera_type.unsqueeze(-1)\n # assert torch.all(\n # camera_type.view(-1)[0] == camera_type\n # ), \"Batched cameras of different camera_types will be allowed in the future.\"\n else:\n raise ValueError(\n 'Invalid camera_type. Must be CameraType, List[CameraType], int, or torch.Tensor[\"num_cameras\"]. \\\n Received: '\n + str(type(camera_type))\n )\n return camera_type\n\n def _init_get_height_width(\n self,\n h_w: Union[TensorType[\"batch_hws\":..., 1], TensorType[\"batch_hws\":...], int, None],\n c_x_y: TensorType[\"batch_cxys\":...],\n ) -> TensorType[\"num_cameras\":..., 1]:\n \"\"\"\n Parses the __init__() argument for height or width\n\n Height/Width Calculation:\n If int, first go to tensor and then broadcast to all cameras\n If tensor, broadcast to all cameras\n If none, use cx or cy * 2\n Else raise error\n\n Args:\n h_w: height or width argument from __init__()\n c_x_y: cx or cy for when h_w == None\n \"\"\"\n if isinstance(h_w, int):\n h_w = torch.Tensor([h_w]).to(torch.int64).to(self.device)\n elif isinstance(h_w, torch.Tensor):\n assert not torch.is_floating_point(h_w), f\"height and width tensor must be of type int, not: {h_w.dtype}\"\n h_w = h_w.to(torch.int64).to(self.device)\n if h_w.ndim == 0 or h_w.shape[-1] != 1:\n h_w = h_w.unsqueeze(-1)\n # assert torch.all(h_w == h_w.view(-1)[0]), \"Batched cameras of different h, w will be allowed in the future.\"\n elif h_w is None:\n h_w = torch.Tensor((c_x_y * 2).to(torch.int64).to(self.device))\n else:\n raise ValueError(\"Height must be an int, tensor, or None, received: \" + str(type(h_w)))\n return h_w\n\n def _init_get_times(self, times):\n if times is None:\n times = None\n elif isinstance(times, torch.Tensor):\n if times.ndim == 0 or times.shape[-1] != 1:\n times = times.unsqueeze(-1).to(self.device)\n else:\n raise ValueError(f\"times must be None or a tensor, got {type(times)}\")\n\n return times\n\n @property\n def device(self):\n \"\"\"Returns the device that the camera is on.\"\"\"\n return self.camera_to_worlds.device\n\n @property\n def image_height(self) -> TensorType[\"num_cameras\":..., 1]:\n \"\"\"Returns the height of the images.\"\"\"\n return self.height\n\n @property\n def image_width(self) -> TensorType[\"num_cameras\":..., 1]:\n \"\"\"Returns the height of the images.\"\"\"\n return self.width\n\n @property\n def is_jagged(self):\n \"\"\"\n Returns whether or not the cameras are \"jagged\" (i.e. the height and widths are different, meaning that\n you cannot concatenate the image coordinate maps together)\n \"\"\"\n h_jagged = not torch.all(self.height == self.height.view(-1)[0])\n w_jagged = not torch.all(self.width == self.width.view(-1)[0])\n return h_jagged or w_jagged\n\n def get_image_coords(\n self, pixel_offset: float = 0.5, index: Optional[Tuple] = None\n ) -> TensorType[\"height\", \"width\", 2]:\n \"\"\"This gets the image coordinates of one of the cameras in this object.\n\n If no index is specified, it will return the maximum possible sized height / width image coordinate map,\n by looking at the maximum height and width of all the cameras in this object.\n\n Args:\n pixel_offset: Offset for each pixel. Defaults to center of pixel (0.5)\n index: Tuple of indices into the batch dimensions of the camera. Defaults to None, which returns the 0th\n flattened camera\n\n Returns:\n Grid of image coordinates.\n \"\"\"\n if index is None:\n image_height = torch.max(self.image_height.view(-1))\n image_width = torch.max(self.image_width.view(-1))\n image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing=\"ij\")\n image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates\n else:\n image_height = self.image_height[index].item()\n image_width = self.image_width[index].item()\n image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing=\"ij\")\n image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates\n return image_coords\n\n def generate_rays( # pylint: disable=too-many-statements\n self,\n camera_indices: Union[TensorType[\"num_rays\":..., \"num_cameras_batch_dims\"], int],\n coords: Optional[TensorType[\"num_rays\":..., 2]] = None,\n camera_opt_to_camera: Optional[TensorType[\"num_rays\":..., 3, 4]] = None,\n distortion_params_delta: Optional[TensorType[\"num_rays\":..., 6]] = None,\n keep_shape: Optional[bool] = None,\n disable_distortion: bool = False,\n ) -> RayBundle:\n \"\"\"Generates rays for the given camera indices.\n\n This function will standardize the input arguments and then call the _generate_rays_from_coords function\n to generate the rays. Our goal is to parse the arguments and then get them into the right shape:\n - camera_indices: (num_rays:..., num_cameras_batch_dims)\n - coords: (num_rays:..., 2)\n - camera_opt_to_camera: (num_rays:..., 3, 4) or None\n - distortion_params_delta: (num_rays:..., 6) or None\n\n Read the docstring for _generate_rays_from_coords for more information on how we generate the rays\n after we have standardized the arguments.\n\n We are only concerned about different combinations of camera_indices and coords matrices, and the following\n are the 4 cases we have to deal with:\n 1. isinstance(camera_indices, int) and coords == None\n - In this case we broadcast our camera_indices / coords shape (h, w, 1 / 2 respectively)\n 2. isinstance(camera_indices, int) and coords != None\n - In this case, we broadcast camera_indices to the same batch dim as coords\n 3. not isinstance(camera_indices, int) and coords == None\n - In this case, we will need to set coords so that it is of shape (h, w, num_rays, 2), and broadcast\n all our other args to match the new definition of num_rays := (h, w) + num_rays\n 4. not isinstance(camera_indices, int) and coords != None\n - In this case, we have nothing to do, only check that the arguments are of the correct shape\n\n There is one more edge case we need to be careful with: when we have \"jagged cameras\" (ie: different heights\n and widths for each camera). This isn't problematic when we specify coords, since coords is already a tensor.\n When coords == None (ie: when we render out the whole image associated with this camera), we run into problems\n since there's no way to stack each coordinate map as all coordinate maps are all different shapes. In this case,\n we will need to flatten each individual coordinate map and concatenate them, giving us only one batch dimension,\n regaurdless of the number of prepended extra batch dimensions in the camera_indices tensor.\n\n\n Args:\n camera_indices: Camera indices of the flattened cameras object to generate rays for.\n coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered.\n camera_opt_to_camera: Optional transform for the camera to world matrices.\n distortion_params_delta: Optional delta for the distortion parameters.\n keep_shape: If None, then we default to the regular behavior of flattening if cameras is jagged, otherwise\n keeping dimensions. If False, we flatten at the end. If True, then we keep the shape of the\n camera_indices and coords tensors (if we can).\n disable_distortion: If True, disables distortion.\n\n Returns:\n Rays for the given camera indices and coords.\n \"\"\"\n # Check the argument types to make sure they're valid and all shaped correctly\n assert isinstance(camera_indices, (torch.Tensor, int)), \"camera_indices must be a tensor or int\"\n assert coords is None or isinstance(coords, torch.Tensor), \"coords must be a tensor or None\"\n assert camera_opt_to_camera is None or isinstance(camera_opt_to_camera, torch.Tensor)\n assert distortion_params_delta is None or isinstance(distortion_params_delta, torch.Tensor)\n if isinstance(camera_indices, torch.Tensor) and isinstance(coords, torch.Tensor):\n num_rays_shape = camera_indices.shape[:-1]\n errormsg = \"Batch dims of inputs must match when inputs are all tensors\"\n assert coords.shape[:-1] == num_rays_shape, errormsg\n assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == num_rays_shape, errormsg\n assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == num_rays_shape, errormsg\n\n # If zero dimensional, we need to unsqueeze to get a batch dimension and then squeeze later\n if not self.shape:\n cameras = self.reshape((1,))\n assert torch.all(\n torch.tensor(camera_indices == 0) if isinstance(camera_indices, int) else camera_indices == 0\n ), \"Can only index into single camera with no batch dimensions if index is zero\"\n else:\n cameras = self\n\n # If the camera indices are an int, then we need to make sure that the camera batch is 1D\n if isinstance(camera_indices, int):\n assert (\n len(cameras.shape) == 1\n ), \"camera_indices must be a tensor if cameras are batched with more than 1 batch dimension\"\n camera_indices = torch.tensor([camera_indices], device=cameras.device)\n\n assert camera_indices.shape[-1] == len(\n cameras.shape\n ), \"camera_indices must have shape (num_rays:..., num_cameras_batch_dims)\"\n\n # If keep_shape is True, then we need to make sure that the camera indices in question\n # are all the same height and width and can actually be batched while maintaining the image\n # shape\n if keep_shape is True:\n assert torch.all(cameras.height[camera_indices] == cameras.height[camera_indices[0]]) and torch.all(\n cameras.width[camera_indices] == cameras.width[camera_indices[0]]\n ), \"Can only keep shape if all cameras have the same height and width\"\n\n # If the cameras don't all have same height / width, if coords is not none, we will need to generate\n # a flat list of coords for each camera and then concatenate otherwise our rays will be jagged.\n # Camera indices, camera_opt, and distortion will also need to be broadcasted accordingly which is non-trivial\n if cameras.is_jagged and coords is None and (keep_shape is None or keep_shape is False):\n index_dim = camera_indices.shape[-1]\n camera_indices = camera_indices.reshape(-1, index_dim)\n _coords = [cameras.get_image_coords(index=tuple(index)).reshape(-1, 2) for index in camera_indices]\n camera_indices = torch.cat(\n [index.unsqueeze(0).repeat(coords.shape[0], 1) for index, coords in zip(camera_indices, _coords)],\n )\n coords = torch.cat(_coords, dim=0)\n assert coords.shape[0] == camera_indices.shape[0]\n # Need to get the coords of each indexed camera and flatten all coordinate maps and concatenate them\n\n # The case where we aren't jagged && keep_shape (since otherwise coords is already set) and coords\n # is None. In this case we append (h, w) to the num_rays dimensions for all tensors. In this case,\n # each image in camera_indices has to have the same shape since otherwise we would have error'd when\n # we checked keep_shape is valid or we aren't jagged.\n if coords is None:\n index_dim = camera_indices.shape[-1]\n index = camera_indices.reshape(-1, index_dim)[0]\n coords: torch.Tensor = cameras.get_image_coords(index=tuple(index)) # (h, w, 2)\n coords = coords.reshape(coords.shape[:2] + (1,) * len(camera_indices.shape[:-1]) + (2,)) # (h, w, 1..., 2)\n coords = coords.expand(coords.shape[:2] + camera_indices.shape[:-1] + (2,)) # (h, w, num_rays, 2)\n camera_opt_to_camera = ( # (h, w, num_rays, 3, 4) or None\n camera_opt_to_camera.broadcast_to(coords.shape[:-1] + (3, 4))\n if camera_opt_to_camera is not None\n else None\n )\n distortion_params_delta = ( # (h, w, num_rays, 6) or None\n distortion_params_delta.broadcast_to(coords.shape[:-1] + (6,))\n if distortion_params_delta is not None\n else None\n )\n\n # If camera indices was an int or coords was none, we need to broadcast our indices along batch dims\n camera_indices = camera_indices.broadcast_to(coords.shape[:-1] + (len(cameras.shape),)).to(torch.long)\n\n # Checking our tensors have been standardized\n assert isinstance(coords, torch.Tensor) and isinstance(camera_indices, torch.Tensor)\n assert camera_indices.shape[-1] == len(cameras.shape)\n assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == coords.shape[:-1]\n assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == coords.shape[:-1]\n\n # This will do the actual work of generating the rays now that we have standardized the inputs\n # raybundle.shape == (num_rays) when done\n # pylint: disable=protected-access\n raybundle = cameras._generate_rays_from_coords(\n camera_indices, coords, camera_opt_to_camera, distortion_params_delta, disable_distortion=disable_distortion\n )\n\n # If we have mandated that we don't keep the shape, then we flatten\n if keep_shape is False:\n raybundle = raybundle.flatten()\n\n # TODO: We should have to squeeze the last dimension here if we started with zero batch dims, but never have to,\n # so there might be a rogue squeeze happening somewhere, and this may cause some unintended behaviour\n # that we haven't caught yet with tests\n return raybundle\n\n # pylint: disable=too-many-statements\n def _generate_rays_from_coords(\n self,\n camera_indices: TensorType[\"num_rays\":..., \"num_cameras_batch_dims\"],\n coords: TensorType[\"num_rays\":..., 2],\n camera_opt_to_camera: Optional[TensorType[\"num_rays\":..., 3, 4]] = None,\n distortion_params_delta: Optional[TensorType[\"num_rays\":..., 6]] = None,\n disable_distortion: bool = False,\n ) -> RayBundle:\n \"\"\"Generates rays for the given camera indices and coords where self isn't jagged\n\n This is a fairly complex function, so let's break this down slowly.\n\n Shapes involved:\n - num_rays: This is your output raybundle shape. It dictates the number and shape of the rays generated\n - num_cameras_batch_dims: This is the number of dimensions of our camera\n\n Args:\n camera_indices: Camera indices of the flattened cameras object to generate rays for.\n The shape of this is such that indexing into camera_indices[\"num_rays\":...] will return the\n index into each batch dimension of the camera in order to get the correct camera specified by\n \"num_rays\".\n Example:\n >>> cameras = Cameras(...)\n >>> cameras.shape\n (2, 3, 4)\n >>> camera_indices = torch.tensor([0, 0, 0]) # We need an axis of length 3 since cameras.ndim == 3\n >>> camera_indices.shape\n (3,)\n >>> coords = torch.tensor([1,1])\n >>> coords.shape\n (2,)\n >>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)\n # This will generate a RayBundle with a single ray for the\n # camera at cameras[0,0,0] at image coordinates (1,1), so out_rays.shape == ()\n >>> out_rays.shape\n ()\n >>> camera_indices = torch.tensor([[0,0,0]])\n >>> camera_indices.shape\n (1, 3)\n >>> coords = torch.tensor([[1,1]])\n >>> coords.shape\n (1, 2)\n >>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)\n # This will generate a RayBundle with a single ray for the\n # camera at cameras[0,0,0] at point (1,1), so out_rays.shape == (1,)\n # since we added an extra dimension in front of camera_indices\n >>> out_rays.shape\n (1,)\n\n If you want more examples, check tests/cameras/test_cameras and the function check_generate_rays_shape\n\n The bottom line is that for camera_indices: (num_rays:..., num_cameras_batch_dims), num_rays is the\n output shape and if you index into the output RayBundle with some indices [i:...], if you index into\n camera_indices with camera_indices[i:...] as well, you will get a 1D tensor containing the batch\n indices into the original cameras object corresponding to that ray (ie: you will get the camera\n from our batched cameras corresponding to the ray at RayBundle[i:...]).\n\n coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered, meaning\n height and width get prepended to the num_rays dimensions. Indexing into coords with [i:...] will\n get you the image coordinates [x, y] of that specific ray located at output RayBundle[i:...].\n\n camera_opt_to_camera: Optional transform for the camera to world matrices.\n In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you\n the 2D camera to world transform matrix for the camera optimization at RayBundle[i:...].\n\n distortion_params_delta: Optional delta for the distortion parameters.\n In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you\n the 1D tensor with the 6 distortion parameters for the camera optimization at RayBundle[i:...].\n\n disable_distortion: If True, disables distortion.\n\n Returns:\n Rays for the given camera indices and coords. RayBundle.shape == num_rays\n \"\"\"\n # Make sure we're on the right devices\n camera_indices = camera_indices.to(self.device)\n coords = coords.to(self.device)\n\n # Checking to make sure everything is of the right shape and type\n num_rays_shape = camera_indices.shape[:-1]\n assert camera_indices.shape == num_rays_shape + (self.ndim,)\n assert coords.shape == num_rays_shape + (2,)\n assert coords.shape[-1] == 2\n assert camera_opt_to_camera is None or camera_opt_to_camera.shape == num_rays_shape + (3, 4)\n assert distortion_params_delta is None or distortion_params_delta.shape == num_rays_shape + (6,)\n\n # Here, we've broken our indices down along the num_cameras_batch_dims dimension allowing us to index by all\n # of our output rays at each dimension of our cameras object\n true_indices = [camera_indices[..., i] for i in range(camera_indices.shape[-1])]\n\n # Get all our focal lengths, principal points and make sure they are the right shapes\n y = coords[..., 0] # (num_rays,) get rid of the last dimension\n x = coords[..., 1] # (num_rays,) get rid of the last dimension\n fx, fy = self.fx[true_indices].squeeze(-1), self.fy[true_indices].squeeze(-1) # (num_rays,)\n cx, cy = self.cx[true_indices].squeeze(-1), self.cy[true_indices].squeeze(-1) # (num_rays,)\n assert (\n y.shape == num_rays_shape\n and x.shape == num_rays_shape\n and fx.shape == num_rays_shape\n and fy.shape == num_rays_shape\n and cx.shape == num_rays_shape\n and cy.shape == num_rays_shape\n ), (\n str(num_rays_shape)\n + str(y.shape)\n + str(x.shape)\n + str(fx.shape)\n + str(fy.shape)\n + str(cx.shape)\n + str(cy.shape)\n )\n\n # Get our image coordinates and image coordinates offset by 1 (offsets used for dx, dy calculations)\n # Also make sure the shapes are correct\n coord = torch.stack([(x - cx) / fx, -(y - cy) / fy], -1) # (num_rays, 2)\n coord_x_offset = torch.stack([(x - cx + 1) / fx, -(y - cy) / fy], -1) # (num_rays, 2)\n coord_y_offset = torch.stack([(x - cx) / fx, -(y - cy + 1) / fy], -1) # (num_rays, 2)\n assert (\n coord.shape == num_rays_shape + (2,)\n and coord_x_offset.shape == num_rays_shape + (2,)\n and coord_y_offset.shape == num_rays_shape + (2,)\n )\n\n # Stack image coordinates and image coordinates offset by 1, check shapes too\n coord_stack = torch.stack([coord, coord_x_offset, coord_y_offset], dim=0) # (3, num_rays, 2)\n assert coord_stack.shape == (3,) + num_rays_shape + (2,)\n\n # Undistorts our images according to our distortion parameters\n if not disable_distortion:\n distortion_params = None\n if self.distortion_params is not None:\n distortion_params = self.distortion_params[true_indices]\n if distortion_params_delta is not None:\n distortion_params = distortion_params + distortion_params_delta\n elif distortion_params_delta is not None:\n distortion_params = distortion_params_delta\n\n # Do not apply distortion for equirectangular images\n if distortion_params is not None:\n mask = (self.camera_type[true_indices] != CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)\n coord_mask = torch.stack([mask, mask, mask], dim=0)\n if mask.any():\n coord_stack[coord_mask, :] = camera_utils.radial_and_tangential_undistort(\n coord_stack[coord_mask, :].reshape(3, -1, 2),\n distortion_params[mask, :],\n ).reshape(-1, 2)\n\n # Make sure after we have undistorted our images, the shapes are still correct\n assert coord_stack.shape == (3,) + num_rays_shape + (2,)\n\n # Gets our directions for all our rays in camera coordinates and checks shapes at the end\n # Here, directions_stack is of shape (3, num_rays, 3)\n # directions_stack[0] is the direction for ray in camera coordinates\n # directions_stack[1] is the direction for ray in camera coordinates offset by 1 in x\n # directions_stack[2] is the direction for ray in camera coordinates offset by 1 in y\n cam_types = torch.unique(self.camera_type, sorted=False)\n directions_stack = torch.empty((3,) + num_rays_shape + (3,), device=self.device)\n if CameraType.PERSPECTIVE.value in cam_types:\n mask = (self.camera_type[true_indices] == CameraType.PERSPECTIVE.value).squeeze(-1) # (num_rays)\n mask = torch.stack([mask, mask, mask], dim=0)\n directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0], mask).float()\n directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1], mask).float()\n directions_stack[..., 2][mask] = -1.0\n\n if CameraType.FISHEYE.value in cam_types:\n mask = (self.camera_type[true_indices] == CameraType.FISHEYE.value).squeeze(-1) # (num_rays)\n mask = torch.stack([mask, mask, mask], dim=0)\n\n theta = torch.sqrt(torch.sum(coord_stack**2, dim=-1))\n theta = torch.clip(theta, 0.0, math.pi)\n\n sin_theta = torch.sin(theta)\n\n directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0] * sin_theta / theta, mask).float()\n directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1] * sin_theta / theta, mask).float()\n directions_stack[..., 2][mask] = -torch.masked_select(torch.cos(theta), mask)\n\n if CameraType.EQUIRECTANGULAR.value in cam_types:\n mask = (self.camera_type[true_indices] == CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)\n mask = torch.stack([mask, mask, mask], dim=0)\n\n # For equirect, fx = fy = height = width/2\n # Then coord[..., 0] goes from -1 to 1 and coord[..., 1] goes from -1/2 to 1/2\n theta = -torch.pi * coord_stack[..., 0] # minus sign for right-handed\n phi = torch.pi * (0.5 - coord_stack[..., 1])\n # use spherical in local camera coordinates (+y up, x=0 and z<0 is theta=0)\n directions_stack[..., 0][mask] = torch.masked_select(-torch.sin(theta) * torch.sin(phi), mask).float()\n directions_stack[..., 1][mask] = torch.masked_select(torch.cos(phi), mask).float()\n directions_stack[..., 2][mask] = torch.masked_select(-torch.cos(theta) * torch.sin(phi), mask).float()\n\n for value in cam_types:\n if value not in [CameraType.PERSPECTIVE.value, CameraType.FISHEYE.value, CameraType.EQUIRECTANGULAR.value]:\n raise ValueError(f\"Camera type {value} not supported.\")\n\n assert directions_stack.shape == (3,) + num_rays_shape + (3,)\n\n c2w = self.camera_to_worlds[true_indices]\n assert c2w.shape == num_rays_shape + (3, 4)\n\n if camera_opt_to_camera is not None:\n c2w = pose_utils.multiply(c2w, camera_opt_to_camera)\n rotation = c2w[..., :3, :3] # (..., 3, 3)\n assert rotation.shape == num_rays_shape + (3, 3)\n\n directions_stack = torch.sum(\n directions_stack[..., None, :] * rotation, dim=-1\n ) # (..., 1, 3) * (..., 3, 3) -> (..., 3)\n\n directions_norm = torch.norm(directions_stack, dim=-1, keepdim=True)\n directions_norm = directions_norm[0]\n\n directions_stack = normalize(directions_stack, dim=-1)\n assert directions_stack.shape == (3,) + num_rays_shape + (3,)\n\n origins = c2w[..., :3, 3] # (..., 3)\n assert origins.shape == num_rays_shape + (3,)\n\n directions = directions_stack[0]\n assert directions.shape == num_rays_shape + (3,)\n\n # norms of the vector going between adjacent coords, giving us dx and dy per output ray\n dx = torch.sqrt(torch.sum((directions - directions_stack[1]) ** 2, dim=-1)) # (\"num_rays\":...,)\n dy = torch.sqrt(torch.sum((directions - directions_stack[2]) ** 2, dim=-1)) # (\"num_rays\":...,)\n assert dx.shape == num_rays_shape and dy.shape == num_rays_shape\n\n pixel_area = (dx * dy)[..., None] # (\"num_rays\":..., 1)\n assert pixel_area.shape == num_rays_shape + (1,)\n\n times = self.times[camera_indices, 0] if self.times is not None else None\n\n\n return RayBundle(\n origins=origins,\n directions=directions,\n pixel_area=pixel_area,\n camera_indices=camera_indices,\n directions_norm=directions_norm,\n times=times,\n probes=self.probe,\n )\n\n def to_json(\n self, camera_idx: int, image: Optional[TensorType[\"height\", \"width\", 2]] = None, max_size: Optional[int] = None\n ) -> Dict:\n \"\"\"Convert a camera to a json dictionary.\n\n Args:\n camera_idx: Index of the camera to convert.\n image: An image in range [0, 1] that is encoded to a base64 string.\n max_size: Max size to resize the image to if present.\n\n Returns:\n A JSON representation of the camera\n \"\"\"\n flattened = self.flatten()\n json_ = {\n \"type\": \"PinholeCamera\",\n \"cx\": flattened[camera_idx].cx.item(),\n \"cy\": flattened[camera_idx].cy.item(),\n \"fx\": flattened[camera_idx].fx.item(),\n \"fy\": flattened[camera_idx].fy.item(),\n \"camera_to_world\": self.camera_to_worlds[camera_idx].tolist(),\n \"camera_index\": camera_idx,\n \"times\": flattened[camera_idx].times.item() if self.times is not None else None,\n }\n if image is not None:\n image_uint8 = (image * 255).detach().type(torch.uint8)\n if max_size is not None:\n image_uint8 = image_uint8.permute(2, 0, 1)\n image_uint8 = torchvision.transforms.functional.resize(image_uint8, max_size) # type: ignore\n image_uint8 = image_uint8.permute(1, 2, 0)\n image_uint8 = image_uint8.cpu().numpy()\n data = cv2.imencode(\".jpg\", image_uint8)[1].tobytes()\n json_[\"image\"] = str(\"data:image/jpeg;base64,\" + base64.b64encode(data).decode(\"ascii\"))\n return json_\n\n def get_intrinsics_matrices(self) -> TensorType[\"num_cameras\":..., 3, 3]:\n \"\"\"Returns the intrinsic matrices for each camera.\n\n Returns:\n Pinhole camera intrinsics matrices\n \"\"\"\n K = torch.zeros((*self.shape, 3, 3), dtype=torch.float32)\n K[..., 0, 0] = self.fx.squeeze(-1)\n K[..., 1, 1] = self.fy.squeeze(-1)\n K[..., 0, 2] = self.cx.squeeze(-1)\n K[..., 1, 2] = self.cy.squeeze(-1)\n K[..., 2, 2] = 1.0\n return K\n\n def rescale_output_resolution(\n self,\n scaling_factor: Union[TensorType[\"num_cameras\":...], TensorType[\"num_cameras\":..., 1], float, int],\n round_hw=False,\n ) -> None:\n \"\"\"Rescale the output resolution of the cameras.\n\n Args:\n scaling_factor: Scaling factor to apply to the output resolution.\n round_hw: Whether to round the height and width to the nearest integer.\n \"\"\"\n if isinstance(scaling_factor, (float, int)):\n scaling_factor = torch.tensor([scaling_factor]).to(self.device).broadcast_to((self.cx.shape))\n elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == self.shape:\n scaling_factor = scaling_factor.unsqueeze(-1)\n elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == (*self.shape, 1):\n pass\n else:\n raise ValueError(\n f\"Scaling factor must be a float, int, or a tensor of shape {self.shape} or {(*self.shape, 1)}.\"\n )\n\n self.fx = self.fx * scaling_factor\n self.fy = self.fy * scaling_factor\n self.cx = self.cx * scaling_factor\n self.cy = self.cy * scaling_factor\n if not round_hw:\n self.height = (self.height * scaling_factor).to(torch.int64)\n self.width = (self.width * scaling_factor).to(torch.int64)\n else:\n self.height = torch.floor(self.height * scaling_factor + 0.5).to(torch.int64)\n self.width = torch.floor(self.width * scaling_factor + 0.5).to(torch.int64)\n\n def get_plotly(self, camera_group):\n\n # define local necssary coordinates for plotting\n num_cameras = self.camera_to_worlds.shape[0]\n _cam_center_c = np.array([[.0, .0, .0]]).repeat(num_cameras, axis=0)\n _cam_forward_c = np.array([[.0, .0, -1.0]]).repeat(num_cameras, axis=0)\n _cam_up_c = np.array([[.0, 1.0, .0]]).repeat(num_cameras, axis=0)\n _cam_right_c = np.array([[1.0, .0, .0]]).repeat(num_cameras, axis=0)\n\n _pyramid_width = self.width.cpu().numpy() / self.fx.cpu().numpy()\n _pyramid_height = self.height.cpu().numpy() / self.fy.cpu().numpy()\n\n _cam_pyramid_ur = np.concatenate([_pyramid_width/2, _pyramid_height/2, -np.ones_like(_pyramid_width)], axis=-1)\n _cam_pyramid_dr = np.concatenate([_pyramid_width/2, -_pyramid_height/2, -np.ones_like(_pyramid_width)], axis=-1)\n _cam_pyramid_ul = np.concatenate([-_pyramid_width/2, _pyramid_height/2, -np.ones_like(_pyramid_width)], axis=-1)\n _cam_pyramid_dl = np.concatenate([-_pyramid_width/2, -_pyramid_height/2, -np.ones_like(_pyramid_width)], axis=-1)\n\n _local_coordinates = {\n 'center': _cam_center_c, \n 'forward': _cam_forward_c, \n 'up': _cam_up_c, \n 'right': _cam_right_c, \n 'pyramid_ur': _cam_pyramid_ur, \n 'pyramid_dr': _cam_pyramid_dr, \n 'pyramid_ul': _cam_pyramid_ul, \n 'pyramid_dl': _cam_pyramid_dl, \n }\n\n # transform it into world coordinates\n data = {}\n for k in _local_coordinates.keys():\n _local_coor_homo = np.concatenate([_local_coordinates[k].reshape(-1, 3) * plotly_camera_scale, np.ones((num_cameras, 1))], axis=-1) # num_cam, 4\n _cw = self.camera_to_worlds.cpu().numpy() # num_cam, 3, 4\n\n _homo = np.einsum('ijk,ik->ij', _cw, _local_coor_homo) # num_cam, 3\n data[k] = _homo[:, :3]\n\n plot_data = plot_camera_components(data, image_list=self.image_filenames, camera_group=camera_group)\n \n if isinstance(plot_data, list):\n return plot_data\n else:\n return [plot_data]" }, { "identifier": "RayBundle", "path": "nerfstudio/cameras/rays.py", "snippet": "class RayBundle(TensorDataclass):\n \"\"\"A bundle of ray parameters.\"\"\"\n\n # TODO(ethan): make sure the sizes with ... are correct\n origins: TensorType[..., 3]\n \"\"\"Ray origins (XYZ)\"\"\"\n directions: TensorType[..., 3]\n \"\"\"Unit ray direction vector\"\"\"\n pixel_area: TensorType[..., 1]\n \"\"\"Projected area of pixel a distance 1 away from origin\"\"\"\n directions_norm: Optional[TensorType[..., 1]] = None\n \"\"\"Norm of ray direction vector before normalization\"\"\"\n camera_indices: Optional[TensorType[..., 1]] = None\n \"\"\"Camera indices\"\"\"\n nears: Optional[TensorType[..., 1]] = None\n \"\"\"Distance along ray to start sampling\"\"\"\n fars: Optional[TensorType[..., 1]] = None\n \"\"\"Rays Distance along ray to stop sampling\"\"\"\n metadata: Optional[Dict[str, TensorType[\"num_rays\", \"latent_dims\"]]] = None\n \"\"\"Additional metadata or data needed for interpolation, will mimic shape of rays\"\"\"\n times: Optional[TensorType[..., 1]] = None\n \"\"\"Times at which rays are sampled\"\"\"\n probes: Optional[Probes] = None\n \"\"\"Probe Cameras Object. This object doesn't follow the same shape pattern as the other fields. \n Lazy broadcasting is used for preventing CUDA memory overflow. \"\"\"\n\n def set_camera_indices(self, camera_index: int) -> None:\n \"\"\"Sets all of the the camera indices to a specific camera index.\n\n Args:\n camera_index: Camera index.\n \"\"\"\n self.camera_indices = torch.ones_like(self.origins[..., 0:1]).long() * camera_index\n\n def __len__(self):\n num_rays = torch.numel(self.origins) // self.origins.shape[-1]\n return num_rays\n\n def sample(self, num_rays: int) -> \"RayBundle\":\n \"\"\"Returns a RayBundle as a subset of rays.\n\n Args:\n num_rays: Number of rays in output RayBundle\n\n Returns:\n RayBundle with subset of rays.\n \"\"\"\n assert num_rays <= len(self)\n indices = random.sample(range(len(self)), k=num_rays)\n return self[indices]\n\n def get_row_major_sliced_ray_bundle(self, start_idx: int, end_idx: int) -> \"RayBundle\":\n \"\"\"Flattens RayBundle and extracts chunk given start and end indicies.\n\n Args:\n start_idx: Start index of RayBundle chunk.\n end_idx: End index of RayBundle chunk.\n\n Returns:\n Flattened RayBundle with end_idx-start_idx rays.\n\n \"\"\"\n return self.flatten()[start_idx:end_idx]\n\n def get_ray_samples(\n self,\n bin_starts: TensorType[\"bs\":..., \"num_samples\", 1],\n bin_ends: TensorType[\"bs\":..., \"num_samples\", 1],\n spacing_starts: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None,\n spacing_ends: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None,\n spacing_to_euclidean_fn: Optional[Callable] = None,\n ) -> RaySamples:\n \"\"\"Produces samples for each ray by projection points along the ray direction. Currently samples uniformly.\n\n Args:\n bin_starts: Distance from origin to start of bin. (in Euclidean space)\n bin_ends: Distance from origin to end of bin. (in Euclidean space)\n spacing_starts: start point in normalized space. [0, 1]\n spacing_ends: end point in normalized space. [0, 1]\n\n Returns:\n Samples projected along ray.\n \"\"\"\n deltas = bin_ends - bin_starts\n if self.camera_indices is not None:\n camera_indices = self.camera_indices[..., None]\n else:\n camera_indices = None\n\n shaped_raybundle_fields = self[..., None]\n\n frustums = Frustums(\n origins=shaped_raybundle_fields.origins, # [..., 1, 3]\n directions=shaped_raybundle_fields.directions, # [..., 1, 3]\n starts=bin_starts, # [..., num_samples, 1]\n ends=bin_ends, # [..., num_samples, 1]\n pixel_area=shaped_raybundle_fields.pixel_area, # [..., 1, 1]\n )\n\n ray_samples = RaySamples(\n frustums=frustums,\n camera_indices=camera_indices, # [..., 1, 1]\n deltas=deltas, # [..., num_samples, 1]\n spacing_starts=spacing_starts, # [..., num_samples, 1]\n spacing_ends=spacing_ends, # [..., num_samples, 1]\n spacing_to_euclidean_fn=spacing_to_euclidean_fn,\n metadata=shaped_raybundle_fields.metadata,\n times=None if self.times is None else self.times[..., None], # [..., 1, 1]\n probes=self.probes, # special class, not following the same shape pattern\n )\n\n return ray_samples" }, { "identifier": "base_config", "path": "nerfstudio/configs/base_config.py", "snippet": "CONSOLE = Console(width=120)\nclass PrintableConfig: # pylint: disable=too-few-public-methods\nclass InstantiateConfig(PrintableConfig): # pylint: disable=too-few-public-methods\nclass MachineConfig(PrintableConfig):\nclass LocalWriterConfig(InstantiateConfig):\nclass LoggingConfig(PrintableConfig):\nclass TrainerConfig(PrintableConfig):\nclass ViewerConfig(PrintableConfig):\nclass Config(PrintableConfig):\n def __str__(self):\n def setup(self, **kwargs) -> Any:\n def setup(self, banner_messages: Optional[List[str]] = None, **kwargs) -> Any:\n def is_viewer_enabled(self) -> bool:\n def is_wandb_enabled(self) -> bool:\n def is_tensorboard_enabled(self) -> bool:\n def set_timestamp(self) -> None:\n def set_experiment_name(self) -> None:\n def get_base_dir(self) -> Path:\n def get_checkpoint_dir(self) -> Path:\n def print_to_terminal(self) -> None:\n def save_config(self) -> None:" }, { "identifier": "InputDataset", "path": "nerfstudio/data/datasets/base_dataset.py", "snippet": "class InputDataset(Dataset):\n \"\"\"Dataset that returns images.\n\n Args:\n dataparser_outputs: description of where and how to read input images.\n scale_factor: The scaling factor for the dataparser outputs\n \"\"\"\n\n def __init__(self, dataparser_outputs: DataparserOutputs, scale_factor: float = 1.0):\n super().__init__()\n self._dataparser_outputs = dataparser_outputs\n self.has_masks = dataparser_outputs.mask_filenames is not None\n self.scale_factor = scale_factor\n self.scene_box = deepcopy(dataparser_outputs.scene_box)\n self.metadata = deepcopy(dataparser_outputs.metadata)\n self.cameras = deepcopy(dataparser_outputs.cameras)\n self.cameras.rescale_output_resolution(scaling_factor=scale_factor)\n self.image_cache = {}\n\n def __len__(self):\n return len(self._dataparser_outputs.image_filenames)\n\n def get_numpy_image(self, image_idx: int) -> npt.NDArray[np.uint8]:\n \"\"\"Returns the image of shape (H, W, 3 or 4).\n\n Args:\n image_idx: The image index in the dataset.\n \"\"\"\n image_filename = self._dataparser_outputs.image_filenames[image_idx]\n pil_image = Image.open(image_filename)\n if self.scale_factor != 1.0:\n width, height = pil_image.size\n newsize = (int(width * self.scale_factor), int(height * self.scale_factor))\n pil_image = pil_image.resize(newsize, resample=Image.BILINEAR)\n image = np.array(pil_image, dtype=\"uint8\") # shape is (h, w, 3 or 4)\n # mask_filename = str(image_filename).replace(\"dense/images\", \"masks\").replace(\".jpg\", \".npy\")\n # mask = np.load(mask_filename)\n # image = image * mask[..., None]\n\n assert len(image.shape) == 3\n assert image.dtype == np.uint8\n assert image.shape[2] in [3, 4], f\"Image shape of {image.shape} is in correct.\"\n return image\n\n def get_image(self, image_idx: int) -> TensorType[\"image_height\", \"image_width\", \"num_channels\"]:\n \"\"\"Returns a 3 channel image.\n\n Args:\n image_idx: The image index in the dataset.\n \"\"\"\n image = torch.from_numpy(self.get_numpy_image(image_idx).astype(\"float32\") / 255.0)\n if self._dataparser_outputs.alpha_color is not None and image.shape[-1] == 4:\n assert image.shape[-1] == 4\n image = image[:, :, :3] * image[:, :, -1:] + self._dataparser_outputs.alpha_color * (1.0 - image[:, :, -1:])\n else:\n image = image[:, :, :3]\n return image\n\n def get_data(self, image_idx: int) -> Dict:\n \"\"\"Returns the ImageDataset data as a dictionary.\n\n Args:\n image_idx: The image index in the dataset.\n \"\"\"\n if image_idx in self.image_cache:\n image = self.image_cache[image_idx]\n else:\n image = self.get_image(image_idx)\n self.image_cache[image_idx] = image\n\n data = {\"image_idx\": image_idx, 'image_filename': self._dataparser_outputs.image_filenames[image_idx].name}\n data[\"image\"] = image\n for _, data_func_dict in self._dataparser_outputs.additional_inputs.items():\n assert \"func\" in data_func_dict, \"Missing function to process data: specify `func` in `additional_inputs`\"\n func = data_func_dict[\"func\"]\n assert \"kwargs\" in data_func_dict, \"No data to process: specify `kwargs` in `additional_inputs`\"\n data.update(func(image_idx, **data_func_dict[\"kwargs\"]))\n if self.has_masks:\n mask_filepath = self._dataparser_outputs.mask_filenames[image_idx]\n data[\"mask\"] = get_image_mask_tensor_from_path(filepath=mask_filepath, scale_factor=self.scale_factor)\n metadata = self.get_metadata(data)\n data.update(metadata)\n return data\n\n # pylint: disable=no-self-use\n def get_metadata(self, data: Dict) -> Dict:\n \"\"\"Method that can be used to process any additional metadata that may be part of the model inputs.\n\n Args:\n image_idx: The image index in the dataset.\n \"\"\"\n del data\n return {}\n\n def __getitem__(self, image_idx: int) -> Dict:\n data = self.get_data(image_idx)\n return data" }, { "identifier": "Model", "path": "nerfstudio/models/base_model.py", "snippet": "class Model(nn.Module):\n \"\"\"Model class\n Where everything (Fields, Optimizers, Samplers, Visualization, etc) is linked together. This should be\n subclassed for custom NeRF model.\n\n Args:\n config: configuration for instantiating model\n scene_box: dataset scene box\n \"\"\"\n\n config: ModelConfig\n\n def __init__(\n self,\n config: ModelConfig,\n scene_box: SceneBox,\n num_train_data: int,\n world_size: int = 1,\n local_rank: int = 0,\n load_step: int = None, \n **kwargs,\n ) -> None:\n super().__init__()\n self.config = config\n self.scene_box = scene_box\n self.num_train_data = num_train_data\n self.kwargs = kwargs\n self.collider = None\n self.world_size = world_size\n self.local_rank = local_rank\n self.load_step = load_step\n\n self.populate_modules() # populate the modules\n self.callbacks = None\n # to keep track of which device the nn.Module is on\n self.device_indicator_param = nn.Parameter(torch.empty(0))\n\n @property\n def device(self):\n \"\"\"Returns the device that the model is on.\"\"\"\n return self.device_indicator_param.device\n\n def get_training_callbacks( # pylint:disable=no-self-use\n self, training_callback_attributes: TrainingCallbackAttributes # pylint: disable=unused-argument\n ) -> List[TrainingCallback]:\n \"\"\"Returns a list of callbacks that run functions at the specified training iterations.\"\"\"\n return []\n\n def populate_modules(self):\n \"\"\"Set the necessary modules to get the network working.\"\"\"\n # default instantiates optional modules that are common among many networks\n # NOTE: call `super().populate_modules()` in subclasses\n\n if self.config.enable_collider:\n self.collider = NearFarCollider(\n near_plane=self.config.collider_params[\"near_plane\"], far_plane=self.config.collider_params[\"far_plane\"]\n )\n\n @abstractmethod\n def get_param_groups(self) -> Dict[str, List[Parameter]]:\n \"\"\"Obtain the parameter groups for the optimizers\n\n Returns:\n Mapping of different parameter groups\n \"\"\"\n\n @abstractmethod\n def get_outputs(self, ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:\n \"\"\"Takes in a Ray Bundle and returns a dictionary of outputs.\n\n Args:\n ray_bundle: Input bundle of rays. This raybundle should have all the\n needed information to compute the outputs.\n\n Returns:\n Outputs of model. (ie. rendered colors)\n \"\"\"\n\n def forward(self, ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:\n \"\"\"Run forward starting with a ray bundle. This outputs different things depending on the configuration\n of the model and whether or not the batch is provided (whether or not we are training basically)\n\n Args:\n ray_bundle: containing all the information needed to render that ray latents included\n \"\"\"\n\n if self.collider is not None:\n ray_bundle = self.collider(ray_bundle)\n\n return self.get_outputs(ray_bundle)\n\n def get_metrics_dict(self, outputs, batch) -> Dict[str, torch.Tensor]:\n \"\"\"Compute and returns metrics.\n\n Args:\n outputs: the output to compute loss dict to\n batch: ground truth batch corresponding to outputs\n \"\"\"\n # pylint: disable=unused-argument\n # pylint: disable=no-self-use\n return {}\n \n\n @abstractmethod\n def get_loss_dict(self, outputs, batch, metrics_dict=None) -> Dict[str, torch.Tensor]:\n \"\"\"Computes and returns the losses dict.\n\n Args:\n outputs: the output to compute loss dict to\n batch: ground truth batch corresponding to outputs\n metrics_dict: dictionary of metrics, some of which we can use for loss\n \"\"\"\n \n def n_parameters(self):\n return -1.0\n\n @torch.no_grad()\n def get_outputs_for_camera_ray_bundle(self, camera_ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:\n \"\"\"Takes in camera parameters and computes the output of the model.\n\n Args:\n camera_ray_bundle: ray bundle to calculate outputs over\n \"\"\"\n num_rays_per_chunk = self.config.eval_num_rays_per_chunk\n image_height, image_width = camera_ray_bundle.origins.shape[:2]\n num_rays = len(camera_ray_bundle)\n outputs_lists = defaultdict(list)\n for i in range(0, num_rays, num_rays_per_chunk):\n start_idx = i\n end_idx = i + num_rays_per_chunk\n ray_bundle = camera_ray_bundle.get_row_major_sliced_ray_bundle(start_idx, end_idx)\n outputs = self.forward(ray_bundle=ray_bundle)\n for output_name, output in outputs.items(): # type: ignore\n outputs_lists[output_name].append(output)\n outputs = {}\n for output_name, outputs_list in outputs_lists.items():\n if not torch.is_tensor(outputs_list[0]):\n # TODO: handle lists of tensors as well\n continue\n outputs[output_name] = torch.cat(outputs_list).view(image_height, image_width, -1) # type: ignore\n return outputs\n\n @abstractmethod\n def get_image_metrics_and_images(\n self, outputs: Dict[str, torch.Tensor], batch: Dict[str, torch.Tensor]\n ) -> Tuple[Dict[str, float], Dict[str, torch.Tensor]]:\n \"\"\"Writes the test image outputs.\n TODO: This shouldn't return a loss\n\n Args:\n image_idx: Index of the image.\n step: Current step.\n batch: Batch of data.\n outputs: Outputs of the model.\n\n Returns:\n A dictionary of metrics.\n \"\"\"\n\n def load_model(self, loaded_state: Dict[str, Any]) -> None:\n \"\"\"Load the checkpoint from the given path\n\n Args:\n loaded_state: dictionary of pre-trained model states\n \"\"\"\n state = {key.replace(\"module.\", \"\"): value for key, value in loaded_state[\"model\"].items()}\n self.load_state_dict(state) # type: ignore\n \n def customized_save(self, step: int, checkpoint_dir) -> None:\n \"\"\"Call the model's customized save function.\n\n Args:\n step: Current step.\n checkpoint_dir: directory of checkpoint\n \"\"\"\n pass\n\n def customized_load(self, load_step: int, checkpoint_dir) -> None:\n \"\"\"Call the model's customized load function.\n\n Args:\n checkpoint_dir: directory of checkpoint\n \"\"\"\n pass" }, { "identifier": "colormaps", "path": "nerfstudio/utils/colormaps.py", "snippet": "def apply_colormap(image: TensorType[\"bs\":..., 1], cmap=\"viridis\") -> TensorType[\"bs\":..., \"rgb\":3]:\ndef apply_depth_colormap(\n depth: TensorType[\"bs\":..., 1],\n accumulation: Optional[TensorType[\"bs\":..., 1]] = None,\n near_plane: Optional[float] = None,\n far_plane: Optional[float] = None,\n cmap=\"turbo\",\n) -> TensorType[\"bs\":..., \"rgb\":3]:\ndef apply_boolean_colormap(\n image: TensorType[\"bs\":..., 1, bool],\n true_color: TensorType[\"bs\":..., \"rgb\":3] = colors.WHITE,\n false_color: TensorType[\"bs\":..., \"rgb\":3] = colors.BLACK,\n) -> TensorType[\"bs\":..., \"rgb\":3]:" }, { "identifier": "profiler", "path": "nerfstudio/utils/profiler.py", "snippet": "CONSOLE = Console(width=120)\nPROFILER = []\ndef time_function(func: Callable) -> Callable:\n def wrapper(*args, **kwargs):\ndef flush_profiler(config: cfg.LoggingConfig):\ndef setup_profiler(config: cfg.LoggingConfig):\n def __init__(self, config: cfg.LoggingConfig):\n def update_time(self, func_name: str, start_time: float, end_time: float):\n def print_profile(self):\nclass Profiler:" }, { "identifier": "writer", "path": "nerfstudio/utils/writer.py", "snippet": "CONSOLE = Console(width=120)\nEVENT_WRITERS = []\nEVENT_STORAGE = []\nGLOBAL_BUFFER = {}\n ITER_TRAIN_TIME = \"Train Iter (time)\"\n TOTAL_TRAIN_TIME = \"Train Total (time)\"\n ITER_VIS_TIME = \"Viewer Rendering (time)\"\n ETA = \"ETA (time)\"\n TRAIN_RAYS_PER_SEC = \"Train Rays / Sec\"\n TEST_RAYS_PER_SEC = \"Test Rays / Sec\"\n VIS_RAYS_PER_SEC = \"Vis Rays / Sec\"\n CURR_TEST_PSNR = \"Test PSNR\"\n IMAGE = \"write_image\"\n PLOTLY = \"write_plotly\"\n SCALAR = \"write_scalar\"\n DICT = \"write_scalar_dict\"\n CONFIG = \"write_config\"\nclass EventName(enum.Enum):\nclass EventType(enum.Enum):\nclass Writer:\nclass TimeWriter:\nclass WandbWriter(Writer):\nclass TensorboardWriter(Writer):\nclass LocalWriter:\ndef put_image(name, image: TensorType[\"H\", \"W\", \"C\"], step: int):\ndef put_plotly(name: str, figure: Any, step: int = 0):\ndef put_scalar(name: str, scalar: Any, step: int):\ndef put_dict(name: str, scalar_dict: Dict[str, Any], step: int):\ndef put_config(name: str, config_dict: Dict[str, Any], step: int):\ndef put_time(name: str, duration: float, step: int, avg_over_steps: bool = True, update_eta: bool = False):\ndef write_out_storage():\ndef setup_local_writer(config: cfg.LoggingConfig, max_iter: int, banner_messages: Optional[List[str]] = None) -> None:\ndef setup_event_writer(config: cfg.Config, log_dir: Path) -> None:\n def write_image(self, name: str, image: TensorType[\"H\", \"W\", \"C\"], step: int) -> None:\n def write_plotly(self, name: str, figure: Any, step: int) -> None:\n def write_scalar(self, name: str, scalar: Union[float, torch.Tensor], step: int) -> None:\n def write_scalar_dict(self, name: str, scalar_dict: Dict[str, Any], step: int) -> None:\n def __init__(self, writer, name, step=None, write=True):\n def __enter__(self):\n def __exit__(self, *args):\n def __init__(self, log_dir: Path, experiment_name: str):\n def write_image(self, name: str, image: TensorType[\"H\", \"W\", \"C\"], step: int) -> None:\n def write_plotly(self, name: str, figure: Any, step: int) -> None:\n def write_scalar(self, name: str, scalar: Union[float, torch.Tensor], step: int) -> None:\n def write_config(self, name: str, config_dict: Dict[str, Any], step: int):\n def __init__(self, log_dir: Path):\n def write_image(self, name: str, image: TensorType[\"H\", \"W\", \"C\"], step: int) -> None:\n def write_plotly(self, name: str, figure: Any, step: int) -> None:\n def write_scalar(self, name: str, scalar: Union[float, torch.Tensor], step: int) -> None:\n def write_config(self, name: str, config_dict: Dict[str, Any], step: int): # pylint: disable=unused-argument\ndef _cursorup(x: int):\ndef _format_time(seconds):\n def __init__(self, config: cfg.LocalWriterConfig, banner_messages: Optional[List[str]] = None):\n def write_stats_log(self, step: int) -> None:\n def write_config(self, name: str, config_dict: Dict[str, Any], step: int):\n def _consolidate_events(self):\n def _update_header(self, latest_map, new_key):\n def _print_stats(self, latest_map, padding=\" \"):" }, { "identifier": "check_main_thread", "path": "nerfstudio/utils/decorators.py", "snippet": "def check_main_thread(func: Callable) -> Callable:\n \"\"\"Decorator: check if you are on main thread\"\"\"\n\n def wrapper(*args, **kwargs):\n ret = None\n if comms.is_main_process():\n ret = func(*args, **kwargs)\n return ret\n\n return wrapper" }, { "identifier": "decorate_all", "path": "nerfstudio/utils/decorators.py", "snippet": "def decorate_all(decorators: List[Callable]) -> Callable:\n \"\"\"A decorator to decorate all member functions of a class\n\n Args:\n decorators: list of decorators to add to all functions in the class\n \"\"\"\n\n def decorate(cls):\n for attr in cls.__dict__:\n if callable(getattr(cls, attr)) and attr != \"__init__\":\n for decorator in decorators:\n setattr(cls, attr, decorator(getattr(cls, attr)))\n return cls\n\n return decorate" }, { "identifier": "BasicImages", "path": "nerfstudio/utils/images.py", "snippet": "class BasicImages:\n \"\"\"This is a very primitive struct for holding images, especially for when these images\n are of different heights / widths.\n\n The purpose of this is to have a special struct wrapping around a list so that the\n nerfstudio_collate fn and other parts of the code recognise this as a struct to leave alone\n instead of reshaping or concatenating into a single tensor (since this will likely be used\n for cases where we have images of different sizes and shapes).\n\n This only has one batch dimension and will likely be replaced down the line with some\n TensorDataclass alternative that supports arbitrary batches.\n \"\"\"\n\n def __init__(self, images: List):\n assert isinstance(images, List)\n assert not images or isinstance(\n images[0], torch.Tensor\n ), f\"Input should be a list of tensors, not {type(images[0]) if isinstance(images, List) else type(images)}\"\n self.images = images\n\n def to(self, device):\n \"\"\"Move the images to the given device.\"\"\"\n assert isinstance(device, torch.device)\n return BasicImages([image.to(device) for image in self.images])" }, { "identifier": "load_from_json", "path": "nerfstudio/utils/io.py", "snippet": "def load_from_json(filename: Path):\n \"\"\"Load a dictionary from a JSON filename.\n\n Args:\n filename: The filename to load from.\n \"\"\"\n assert filename.suffix == \".json\"\n with open(filename, encoding=\"UTF-8\") as file:\n return json.load(file)" }, { "identifier": "write_to_json", "path": "nerfstudio/utils/io.py", "snippet": "def write_to_json(filename: Path, content: dict):\n \"\"\"Write data to a JSON file.\n\n Args:\n filename: The filename to write to.\n content: The dictionary data to write.\n \"\"\"\n assert filename.suffix == \".json\", \"Filename must have .json extension but got {}\".format(filename)\n with open(filename, \"w\", encoding=\"UTF-8\") as file:\n json.dump(content, file)" }, { "identifier": "GLOBAL_BUFFER", "path": "nerfstudio/utils/writer.py", "snippet": "GLOBAL_BUFFER = {}" }, { "identifier": "EventName", "path": "nerfstudio/utils/writer.py", "snippet": "class EventName(enum.Enum):\n \"\"\"Names of possible events that can be logged via Local Writer for convenience.\n see config/logging/default_logging.yaml\"\"\"\n\n ITER_TRAIN_TIME = \"Train Iter (time)\"\n TOTAL_TRAIN_TIME = \"Train Total (time)\"\n ITER_VIS_TIME = \"Viewer Rendering (time)\"\n ETA = \"ETA (time)\"\n TRAIN_RAYS_PER_SEC = \"Train Rays / Sec\"\n TEST_RAYS_PER_SEC = \"Test Rays / Sec\"\n VIS_RAYS_PER_SEC = \"Vis Rays / Sec\"\n CURR_TEST_PSNR = \"Test PSNR\"" }, { "identifier": "TimeWriter", "path": "nerfstudio/utils/writer.py", "snippet": "class TimeWriter:\n \"\"\"Timer context manager that calculates duration around wrapped functions\"\"\"\n\n def __init__(self, writer, name, step=None, write=True):\n self.writer = writer\n self.name = name\n self.step = step\n self.write = write\n\n self.start: float = 0.0\n self.duration: float = 0.0\n\n def __enter__(self):\n torch.cuda.synchronize()\n self.start = time()\n return self\n\n def __exit__(self, *args):\n torch.cuda.synchronize()\n self.duration = time() - self.start\n update_step = self.step is not None\n if self.write:\n self.writer.put_time(\n name=self.name,\n duration=self.duration,\n step=self.step if update_step else GLOBAL_BUFFER[\"max_iter\"],\n avg_over_steps=update_step,\n update_eta=self.name == EventName.ITER_TRAIN_TIME,\n )" }, { "identifier": "run_viewer_bridge_server_as_subprocess", "path": "nerfstudio/viewer/server/subprocess.py", "snippet": "def run_viewer_bridge_server_as_subprocess(\n websocket_port: int,\n zmq_port: Optional[int] = None,\n ip_address: str = \"127.0.0.1\",\n log_filename: Union[str, None] = None,\n):\n \"\"\"Runs the viewer bridge server as a subprocess.\n\n Args:\n zmq_port: Port to use for the ZMQ server.\n websocket_port: Port to use for the websocket server.\n ip_address: host to connect to\n log_filename: Filename to use for the log file. If None, no log file is created.\n\n Returns:\n None\n \"\"\"\n args = [sys.executable, \"-u\", \"-m\", server.__name__]\n\n # find an available port for zmq\n if zmq_port is None:\n sock = socket.socket()\n sock.bind((\"\", 0))\n zmq_port = sock.getsockname()[1]\n string = f\"Using ZMQ port: {zmq_port}\"\n CONSOLE.print(f\"[bold yellow]{string}\")\n\n args.append(\"--zmq-port\")\n args.append(str(zmq_port))\n args.append(\"--websocket-port\")\n args.append(str(websocket_port))\n args.append(\"--ip-address\")\n args.append(str(ip_address))\n # supress output if no log filename is specified\n logfile = open( # pylint: disable=consider-using-with\n log_filename if log_filename else os.devnull, \"w\", encoding=\"utf8\"\n )\n process = subprocess.Popen( # pylint: disable=consider-using-with\n args, stdout=logfile, stderr=logfile, start_new_session=True\n )\n\n def cleanup(process):\n process.kill()\n process.wait()\n\n def poll_process():\n \"\"\"\n Continually check to see if the viewer bridge server process is still running and has not failed.\n If it fails, alert the user and exit the entire program.\n \"\"\"\n while process.poll() is None:\n time.sleep(0.5)\n string = f\"\\nThe viewer bridge server subprocess failed. Please check the log file {log_filename}.\\n\"\n string += (\n \"You likely have to modify --viewer.zmq-port and/or --viewer.websocket-port in the \"\n \"config to avoid conflicting ports.\\n\"\n )\n string += \"Try modifying --viewer.websocket-port 7007\\n\"\n CONSOLE.print(f\"[bold red]{string}\")\n cleanup(process)\n # This exists the entire program. sys.exit() will only kill the thread that this runs in.\n os.kill(os.getpid(), signal.SIGKILL)\n\n # continually check to see if the process stopped\n t1 = threading.Thread(target=poll_process)\n t1.daemon = True\n t1.start()\n atexit.register(cleanup, process)\n return zmq_port" }, { "identifier": "get_intrinsics_matrix_and_camera_to_world_h", "path": "nerfstudio/viewer/server/utils.py", "snippet": "def get_intrinsics_matrix_and_camera_to_world_h(\n camera_object: Dict[str, Any], image_height: int\n) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Returns the camera intrinsics matrix and the camera to world homogeneous matrix.\n\n Args:\n camera_object: a Camera object.\n image_size: the size of the image (height, width)\n \"\"\"\n # intrinsics\n fov = camera_object[\"fov\"]\n aspect = camera_object[\"aspect\"]\n image_width = aspect * image_height\n pp_w = image_width / 2.0\n pp_h = image_height / 2.0\n focal_length = three_js_perspective_camera_focal_length(fov, image_height)\n intrinsics_matrix = torch.tensor([[focal_length, 0, pp_w], [0, focal_length, pp_h], [0, 0, 1]]).float()\n\n # extrinsics\n camera_to_world_h = torch.tensor(get_chunks(camera_object[\"matrix\"], size_of_chunk=4)).T.float()\n camera_to_world_h = torch.stack(\n [\n camera_to_world_h[0, :],\n camera_to_world_h[2, :],\n camera_to_world_h[1, :],\n camera_to_world_h[3, :],\n ],\n dim=0,\n )\n\n return intrinsics_matrix, camera_to_world_h" }, { "identifier": "Viewer", "path": "nerfstudio/viewer/server/visualizer.py", "snippet": "class Viewer:\n \"\"\"Viewer class for connecting to the bridge server.\n\n Args:\n zmq_port: Where to connect with ZMQ.\n window: An already existing ViewerWindow.\n ip_address: The ip address of the bridge server.\n \"\"\"\n\n def __init__(\n self, zmq_port: Optional[int] = None, window: Optional[ViewerWindow] = None, ip_address: str = \"127.0.0.1\"\n ):\n if zmq_port is None and window is None:\n raise ValueError(\"Must specify either zmq_port or window.\")\n if window is None:\n self.window = ViewerWindow(zmq_port=zmq_port, ip_address=ip_address)\n else:\n self.window = window\n self.path = Path(())\n\n @staticmethod\n def view_into(window: ViewerWindow, path: Path):\n \"\"\"Returns a new Viewer but keeping the same ViewerWindow.\"\"\"\n vis = Viewer(window=window)\n vis.path = path\n return vis\n\n def __getitem__(self, path):\n return Viewer.view_into(self.window, self.path.append(path))\n\n def __repr__(self):\n return f\"<Viewer using: {self.window} at path: {self.path}>\"\n\n def write(self, data: Union[Dict, str, None] = None):\n \"\"\"Write data.\"\"\"\n path = self.path.lower()\n return self.window.send({\"type\": \"write\", \"path\": path, \"data\": data})\n\n def read(self):\n \"\"\"Read data.\"\"\"\n path = self.path.lower()\n return self.window.send({\"type\": \"read\", \"path\": path})\n\n def delete(self):\n \"\"\"Delete data.\"\"\"\n return self.write(data=None)" } ]
import base64 import enum import os import sys import threading import time import warnings import cv2 import numpy as np import torch from pathlib import Path from typing import Any, Dict, Optional, Tuple from cryptography.utils import CryptographyDeprecationWarning from rich.console import Console from nerfstudio.cameras.cameras import Cameras from nerfstudio.cameras.rays import RayBundle from nerfstudio.configs import base_config as cfg from nerfstudio.data.datasets.base_dataset import InputDataset from nerfstudio.models.base_model import Model from nerfstudio.utils import colormaps, profiler, writer from nerfstudio.utils.decorators import check_main_thread, decorate_all from nerfstudio.utils.images import BasicImages from nerfstudio.utils.io import load_from_json, write_to_json from nerfstudio.utils.writer import GLOBAL_BUFFER, EventName, TimeWriter from nerfstudio.viewer.server.subprocess import run_viewer_bridge_server_as_subprocess from nerfstudio.viewer.server.utils import get_intrinsics_matrix_and_camera_to_world_h from nerfstudio.viewer.server.visualizer import Viewer
21,585
if self.prev_camera_matrix is not None and np.allclose(camera_object["matrix"], self.prev_camera_matrix): self.camera_moving = False else: self.prev_camera_matrix = camera_object["matrix"] self.camera_moving = True output_type = self.vis["renderingState/output_choice"].read() if output_type is None: output_type = OutputTypes.INIT if self.prev_output_type != output_type: self.camera_moving = True colormap_type = self.vis["renderingState/colormap_choice"].read() if colormap_type is None: colormap_type = ColormapTypes.INIT if self.prev_colormap_type != colormap_type: self.camera_moving = True return camera_object def _apply_colormap(self, outputs: Dict[str, Any], colors: torch.Tensor = None, eps=1e-6): """Determines which colormap to use based on set colormap type Args: outputs: the output tensors for which to apply colormaps on colors: is only set if colormap is for semantics. Defaults to None. eps: epsilon to handle floating point comparisons """ if self.output_list: reformatted_output = self._process_invalid_output(self.prev_output_type) # default for rgb images if self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].shape[-1] == 3: return outputs[reformatted_output] # rendering depth outputs if self.prev_colormap_type == ColormapTypes.DEPTH or ( self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].dtype == torch.float and (torch.max(outputs[reformatted_output]) - 1.0) > eps # handle floating point arithmetic ): accumulation_str = ( OutputTypes.ACCUMULATION if OutputTypes.ACCUMULATION in self.output_list else OutputTypes.ACCUMULATION_FINE ) return colormaps.apply_depth_colormap(outputs[reformatted_output], accumulation=outputs[accumulation_str]) # rendering accumulation outputs if self.prev_colormap_type == ColormapTypes.TURBO or ( self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].dtype == torch.float ): return colormaps.apply_colormap(outputs[reformatted_output]) # rendering semantic outputs if self.prev_colormap_type == ColormapTypes.SEMANTIC or ( self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].dtype == torch.int ): logits = outputs[reformatted_output] labels = torch.argmax(torch.nn.functional.softmax(logits, dim=-1), dim=-1) # type: ignore assert colors is not None return colors[labels] # rendering boolean outputs if self.prev_colormap_type == ColormapTypes.BOOLEAN or ( self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].dtype == torch.bool ): return colormaps.apply_boolean_colormap(outputs[reformatted_output]) raise NotImplementedError def _send_output_to_viewer(self, outputs: Dict[str, Any], colors: torch.Tensor = None, eps=1e-6): """Chooses the correct output and sends it to the viewer Args: outputs: the dictionary of outputs to choose from, from the graph colors: is only set if colormap is for semantics. Defaults to None. eps: epsilon to handle floating point comparisons """ if self.output_list is None: self.output_list = list(outputs.keys()) viewer_output_list = list(np.copy(self.output_list)) # remapping rgb_fine -> rgb for all cases just so that we dont have 2 of them in the options if OutputTypes.RGB_FINE in self.output_list: viewer_output_list.remove(OutputTypes.RGB_FINE) viewer_output_list.insert(0, OutputTypes.RGB) self.vis["renderingState/output_options"].write(viewer_output_list) reformatted_output = self._process_invalid_output(self.prev_output_type) # re-register colormaps and send to viewer if self.output_type_changed or self.prev_colormap_type == ColormapTypes.INIT: self.prev_colormap_type = ColormapTypes.DEFAULT colormap_options = [ColormapTypes.DEFAULT] if ( outputs[reformatted_output].shape[-1] != 3 and outputs[reformatted_output].dtype == torch.float and (torch.max(outputs[reformatted_output]) - 1.0) <= eps # handle floating point arithmetic ): # accumulation can also include depth colormap_options.extend(["depth"]) self.output_type_changed = False self.vis["renderingState/colormap_choice"].write(self.prev_colormap_type) self.vis["renderingState/colormap_options"].write(colormap_options) selected_output = (self._apply_colormap(outputs, colors) * 255).type(torch.uint8) image = selected_output[..., [2, 1, 0]].cpu().numpy() data = cv2.imencode(".jpg", image, [cv2.IMWRITE_JPEG_QUALITY, 75])[1].tobytes() data = str("data:image/jpeg;base64," + base64.b64encode(data).decode("ascii")) self.vis["render_img"].write(data) def _update_viewer_stats(self, render_time: float, num_rays: int, image_height: int, image_width: int) -> None: """Function that calculates and populates all the rendering statistics accordingly Args: render_time: total time spent rendering current view num_rays: number of rays rendered image_height: resolution of the current view image_width: resolution of the current view """
# Copyright 2022 The Nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Code to interface with the `vis/` (the JS viewer). """ from __future__ import annotations warnings.filterwarnings("ignore", category=CryptographyDeprecationWarning) CONSOLE = Console(width=120) def get_viewer_version() -> str: """Get the version of the viewer.""" json_filename = os.path.join(os.path.dirname(__file__), "../app/package.json") version = load_from_json(Path(json_filename))["version"] return version @check_main_thread def setup_viewer(config: cfg.ViewerConfig, log_filename: Path): """Sets up the viewer if enabled Args: config: the configuration to instantiate viewer """ viewer_state = ViewerState(config, log_filename=log_filename) banner_messages = [f"Viewer at: {viewer_state.viewer_url}"] return viewer_state, banner_messages class OutputTypes(str, enum.Enum): """Noncomprehsnive list of output render types""" INIT = "init" RGB = "rgb" RGB_FINE = "rgb_fine" ACCUMULATION = "accumulation" ACCUMULATION_FINE = "accumulation_fine" class ColormapTypes(str, enum.Enum): """Noncomprehsnive list of colormap render types""" INIT = "init" DEFAULT = "default" TURBO = "turbo" DEPTH = "depth" SEMANTIC = "semantic" BOOLEAN = "boolean" class IOChangeException(Exception): """Basic camera exception to interrupt viewer""" class SetTrace: """Basic trace function""" def __init__(self, func): self.func = func def __enter__(self): sys.settrace(self.func) return self def __exit__(self, ext_type, exc_value, traceback): sys.settrace(None) class RenderThread(threading.Thread): """Thread that does all the rendering calls while listening for interrupts Args: state: current viewer state object graph: current checkpoint of model camera_ray_bundle: input rays to pass through the graph to render out """ def __init__(self, state: "ViewerState", graph: Model, camera_ray_bundle: RayBundle): threading.Thread.__init__(self) self.state = state self.graph = graph self.camera_ray_bundle = camera_ray_bundle self.exc = None self.vis_outputs = None def run(self): """run function that renders out images given the current graph and ray bundles. Interlaced with a trace function that checks to see if any I/O changes were registered. Exits and continues program if IOChangeException thrown. """ outputs = None try: with SetTrace(self.state.check_interrupt): with torch.no_grad(): outputs = self.graph.get_outputs_for_camera_ray_bundle(self.camera_ray_bundle) except Exception as e: # pylint: disable=broad-except self.exc = e if outputs: self.vis_outputs = outputs self.state.check_done_render = True self.state.check_interrupt_vis = False def join(self, timeout=None): threading.Thread.join(self) if self.exc: raise self.exc class CheckThread(threading.Thread): """Thread the constantly checks for io changes and sets a flag indicating interrupt Args: state: current viewer state object """ def __init__(self, state): threading.Thread.__init__(self) self.state = state def run(self): """Run function that checks to see if any of the existing state has changed (e.g. camera pose/output type/resolutions). Sets the viewer state flag to true to signal to render thread that an interrupt was registered. """ self.state.check_done_render = False while not self.state.check_done_render: # check camera data = self.state.vis["renderingState/camera"].read() if data is not None: camera_object = data["object"] if self.state.prev_camera_matrix is None or ( not np.allclose(camera_object["matrix"], self.state.prev_camera_matrix) and not self.state.prev_moving ): self.state.check_interrupt_vis = True self.state.prev_moving = True return self.state.prev_moving = False # check output type output_type = self.state.vis["renderingState/output_choice"].read() if output_type is None: output_type = OutputTypes.INIT if self.state.prev_output_type != output_type: self.state.check_interrupt_vis = True return # check colormap type colormap_type = self.state.vis["renderingState/colormap_choice"].read() if colormap_type is None: colormap_type = ColormapTypes.INIT if self.state.prev_colormap_type != colormap_type: self.state.check_interrupt_vis = True return # check max render max_resolution = self.state.vis["renderingState/maxResolution"].read() if max_resolution is not None: if self.state.max_resolution != max_resolution: self.state.check_interrupt_vis = True return @decorate_all([check_main_thread]) class ViewerState: """Class to hold state for viewer variables Args: config: viewer setup configuration """ def __init__(self, config: cfg.ViewerConfig, log_filename: Path): self.config = config self.vis = None self.viewer_url = None self.log_filename = log_filename if self.config.launch_bridge_server: # start the viewer bridge server assert self.config.websocket_port is not None self.log_filename.parent.mkdir(exist_ok=True) zmq_port = run_viewer_bridge_server_as_subprocess( self.config.websocket_port, zmq_port=self.config.zmq_port, ip_address=self.config.ip_address, log_filename=str(self.log_filename), ) # TODO(ethan): log the output of the viewer bridge server in a file where the training logs go CONSOLE.line() version = get_viewer_version() websocket_url = f"ws://localhost:{self.config.websocket_port}" self.viewer_url = f"https://viewer.nerf.studio/versions/{version}/?websocket_url={websocket_url}" CONSOLE.rule(characters="=") CONSOLE.print(f"[Public] Open the viewer at {self.viewer_url}") CONSOLE.rule(characters="=") CONSOLE.line() self.vis = Viewer(zmq_port=zmq_port, ip_address=self.config.ip_address) else: assert self.config.zmq_port is not None self.vis = Viewer(zmq_port=self.config.zmq_port, ip_address=self.config.ip_address) # viewer specific variables self.prev_camera_matrix = None self.prev_output_type = OutputTypes.INIT self.prev_colormap_type = ColormapTypes.INIT self.prev_moving = False self.output_type_changed = True self.max_resolution = 1000 self.check_interrupt_vis = False self.check_done_render = True self.step = 0 self.static_fps = 1 self.moving_fps = 24 self.camera_moving = False self.prev_camera_timestamp = 0 self.probe_config = None self.output_list = None def _pick_drawn_image_idxs(self, total_num: int) -> list[int]: """Determine indicies of images to display in viewer. Args: total_num: total number of training images. Returns: List of indices from [0, total_num-1]. """ if self.config.max_num_display_images < 0: num_display_images = total_num else: num_display_images = min(self.config.max_num_display_images, total_num) # draw indices, roughly evenly spaced return np.linspace(0, total_num - 1, num_display_images, dtype=np.int32).tolist() def init_scene(self, dataset: InputDataset, start_train=True) -> None: """Draw some images and the scene aabb in the viewer. Args: dataset: dataset to render in the scene start_train: whether to start train when viewer init; if False, only displays dataset until resume train is toggled """ # set the config base dir self.vis["renderingState/config_base_dir"].write(str(self.log_filename.parents[0])) # clear the current scene self.vis["sceneState/sceneBox"].delete() self.vis["sceneState/cameras"].delete() # draw the training cameras and images image_indices = self._pick_drawn_image_idxs(len(dataset)) for idx in image_indices: image = dataset[idx]["image"] if isinstance(image, BasicImages): bgr = image.images[0][..., [2, 1, 0]] else: bgr = image[..., [2, 1, 0]] camera_json = dataset.cameras.to_json(camera_idx=idx, image=bgr, max_size=100) self.vis[f"sceneState/cameras/{idx:06d}"].write(camera_json) # draw the scene box (i.e., the bounding box) json_ = dataset.scene_box.to_json() self.vis["sceneState/sceneBox"].write(json_) # set the initial state whether to train or not self.vis["renderingState/isTraining"].write(start_train) # self.vis["renderingState/render_time"].write(str(0)) self.probe_config = dataset.cameras.probe_config # set the properties of the camera # self.vis["renderingState/camera"].write(json_) # set the main camera intrinsics to one from the dataset # K = camera.get_intrinsics_matrix() # set_persp_intrinsics_matrix(self.vis, K.double().numpy()) def _check_camera_path_payload(self, trainer, step: int): """Check to see if the camera path export button was pressed.""" # check if we should interrupt from a button press? camera_path_payload = self.vis["camera_path_payload"].read() if camera_path_payload: # save a model checkpoint trainer.save_checkpoint(step) # write to json file camera_path_filename = camera_path_payload["camera_path_filename"] + '.json' camera_path = camera_path_payload["camera_path"] write_to_json(Path(camera_path_filename), camera_path) self.vis["camera_path_payload"].delete() def update_scene(self, trainer, step: int, graph: Model, num_rays_per_batch: int) -> None: """updates the scene based on the graph weights Args: step: iteration step of training graph: the current checkpoint of the model """ has_temporal_distortion = getattr(graph, "temporal_distortion", None) is not None self.vis["model/has_temporal_distortion"].write(str(has_temporal_distortion).lower()) is_training = self.vis["renderingState/isTraining"].read() self.step = step self._check_camera_path_payload(trainer, step) camera_object = self._get_camera_object() if camera_object is None: return if is_training is None or is_training: # in training mode if self.camera_moving: # if the camera is moving, then we pause training and update camera continuously while self.camera_moving: self._render_image_in_viewer(camera_object, graph, is_training) camera_object = self._get_camera_object() else: # if the camera is not moving, then we approximate how many training steps need to be taken # to render at a FPS defined by self.static_fps. if EventName.TRAIN_RAYS_PER_SEC.value in GLOBAL_BUFFER["events"]: train_rays_per_sec = GLOBAL_BUFFER["events"][EventName.TRAIN_RAYS_PER_SEC.value]["avg"] target_train_util = self.vis["renderingState/targetTrainUtil"].read() if target_train_util is None: target_train_util = 0.9 batches_per_sec = train_rays_per_sec / num_rays_per_batch num_steps = max(int(1 / self.static_fps * batches_per_sec), 1) else: num_steps = 1 if step % num_steps == 0: self._render_image_in_viewer(camera_object, graph, is_training) else: # in pause training mode, enter render loop with set graph local_step = step run_loop = not is_training while run_loop: # if self._is_render_step(local_step) and step > 0: if step > 0: self._render_image_in_viewer(camera_object, graph, is_training) camera_object = self._get_camera_object() is_training = self.vis["renderingState/isTraining"].read() self._check_camera_path_payload(trainer, step) run_loop = not is_training local_step += 1 def check_interrupt(self, frame, event, arg): # pylint: disable=unused-argument """Raises interrupt when flag has been set and not already on lowest resolution. Used in conjunction with SetTrace. """ if event == "line": if self.check_interrupt_vis and not self.camera_moving: raise IOChangeException return self.check_interrupt def _get_camera_object(self): """Gets the camera object from the viewer and updates the movement state if it has changed.""" data = self.vis["renderingState/camera"].read() if data is None: return None camera_object = data["object"] if self.prev_camera_matrix is not None and np.allclose(camera_object["matrix"], self.prev_camera_matrix): self.camera_moving = False else: self.prev_camera_matrix = camera_object["matrix"] self.camera_moving = True output_type = self.vis["renderingState/output_choice"].read() if output_type is None: output_type = OutputTypes.INIT if self.prev_output_type != output_type: self.camera_moving = True colormap_type = self.vis["renderingState/colormap_choice"].read() if colormap_type is None: colormap_type = ColormapTypes.INIT if self.prev_colormap_type != colormap_type: self.camera_moving = True return camera_object def _apply_colormap(self, outputs: Dict[str, Any], colors: torch.Tensor = None, eps=1e-6): """Determines which colormap to use based on set colormap type Args: outputs: the output tensors for which to apply colormaps on colors: is only set if colormap is for semantics. Defaults to None. eps: epsilon to handle floating point comparisons """ if self.output_list: reformatted_output = self._process_invalid_output(self.prev_output_type) # default for rgb images if self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].shape[-1] == 3: return outputs[reformatted_output] # rendering depth outputs if self.prev_colormap_type == ColormapTypes.DEPTH or ( self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].dtype == torch.float and (torch.max(outputs[reformatted_output]) - 1.0) > eps # handle floating point arithmetic ): accumulation_str = ( OutputTypes.ACCUMULATION if OutputTypes.ACCUMULATION in self.output_list else OutputTypes.ACCUMULATION_FINE ) return colormaps.apply_depth_colormap(outputs[reformatted_output], accumulation=outputs[accumulation_str]) # rendering accumulation outputs if self.prev_colormap_type == ColormapTypes.TURBO or ( self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].dtype == torch.float ): return colormaps.apply_colormap(outputs[reformatted_output]) # rendering semantic outputs if self.prev_colormap_type == ColormapTypes.SEMANTIC or ( self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].dtype == torch.int ): logits = outputs[reformatted_output] labels = torch.argmax(torch.nn.functional.softmax(logits, dim=-1), dim=-1) # type: ignore assert colors is not None return colors[labels] # rendering boolean outputs if self.prev_colormap_type == ColormapTypes.BOOLEAN or ( self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].dtype == torch.bool ): return colormaps.apply_boolean_colormap(outputs[reformatted_output]) raise NotImplementedError def _send_output_to_viewer(self, outputs: Dict[str, Any], colors: torch.Tensor = None, eps=1e-6): """Chooses the correct output and sends it to the viewer Args: outputs: the dictionary of outputs to choose from, from the graph colors: is only set if colormap is for semantics. Defaults to None. eps: epsilon to handle floating point comparisons """ if self.output_list is None: self.output_list = list(outputs.keys()) viewer_output_list = list(np.copy(self.output_list)) # remapping rgb_fine -> rgb for all cases just so that we dont have 2 of them in the options if OutputTypes.RGB_FINE in self.output_list: viewer_output_list.remove(OutputTypes.RGB_FINE) viewer_output_list.insert(0, OutputTypes.RGB) self.vis["renderingState/output_options"].write(viewer_output_list) reformatted_output = self._process_invalid_output(self.prev_output_type) # re-register colormaps and send to viewer if self.output_type_changed or self.prev_colormap_type == ColormapTypes.INIT: self.prev_colormap_type = ColormapTypes.DEFAULT colormap_options = [ColormapTypes.DEFAULT] if ( outputs[reformatted_output].shape[-1] != 3 and outputs[reformatted_output].dtype == torch.float and (torch.max(outputs[reformatted_output]) - 1.0) <= eps # handle floating point arithmetic ): # accumulation can also include depth colormap_options.extend(["depth"]) self.output_type_changed = False self.vis["renderingState/colormap_choice"].write(self.prev_colormap_type) self.vis["renderingState/colormap_options"].write(colormap_options) selected_output = (self._apply_colormap(outputs, colors) * 255).type(torch.uint8) image = selected_output[..., [2, 1, 0]].cpu().numpy() data = cv2.imencode(".jpg", image, [cv2.IMWRITE_JPEG_QUALITY, 75])[1].tobytes() data = str("data:image/jpeg;base64," + base64.b64encode(data).decode("ascii")) self.vis["render_img"].write(data) def _update_viewer_stats(self, render_time: float, num_rays: int, image_height: int, image_width: int) -> None: """Function that calculates and populates all the rendering statistics accordingly Args: render_time: total time spent rendering current view num_rays: number of rays rendered image_height: resolution of the current view image_width: resolution of the current view """
writer.put_time(
7
2023-12-15 20:07:22+00:00
24k
amazon-science/c2f-seg
data/dataloader_transformer.py
[ { "identifier": "FishBowl", "path": "data/dataloader_Fishbowl.py", "snippet": "class FishBowl(object):\n def __init__(self, config, mode, subtest=None):\n self.datatype = mode\n data_dir = config.root_path\n\n self.img_path = os.path.join(data_dir, self.datatype+\"_data\", self.datatype+\"_frames\")\n self.mode = mode\n self.dtype = torch.float32\n self.test_set = subtest\n \n self.data_summary = pickle.load(open(os.path.join(data_dir, self.datatype+\"_data\", self.datatype+\"_data.pkl\"), \"rb\"))\n self.obj_lists = list(self.data_summary.keys())\n self.device = \"cpu\"\n\n self.seq_len = 32 if self.mode == \"test\" else config.train_seq_len\n\n self.cur_vid = None\n self.video_frames = None\n self.patch_h = config.patch_H\n self.patch_w = config.patch_W\n self.enlarge_coef = config.enlarge_coef\n\n def decode2binarymask(self, masks):\n mask = mask_utils.decode(masks)\n binary_masks = mask.astype('bool') # (Image_W,Image_H,128)\n binary_masks = binary_masks.transpose(2,0,1) #(128, Image_W, Image_H)\n return binary_masks\n\n def __len__(self):\n return len(self.obj_lists)\n\n def __getitem__(self, idx):\n v_id, obj_id = self.obj_lists[idx].split(\"_\")\n if v_id != self.cur_vid:\n self.cur_vid = v_id\n fm_crop = []\n fm_no_crop = []\n vm_crop = []\n vm_no_crop = []\n img_crop = []\n \n obj_position = []\n\n counts = []\n loss_mask_weight = []\n\n # for evaluation \n video_ids = []\n object_ids = []\n frame_ids = []\n\n obj_dict = self.data_summary[self.obj_lists[idx]]\n timesteps = list(obj_dict.keys())\n assert np.all(np.diff(sorted(timesteps))==1)\n start_t, end_t = min(timesteps), max(timesteps)\n # print(start_t, end_t)\n if self.mode != \"test\" and end_t - start_t > self.seq_len - 1:\n start_t = np.random.randint(start_t, end_t-(self.seq_len-2))\n end_t = start_t + self.seq_len - 1\n\n if self.mode == \"test\":\n if start_t + self.seq_len-1<=end_t:\n end_t = start_t + self.seq_len-1\n\n for t_step in range(start_t, end_t):\n image_path = os.path.join(self.img_path, v_id, str(t_step).zfill(5)+'.png')\n img = cv2.imread(image_path)[:,:,::-1]\n # get visible mask and full mask\n vm = self.decode2binarymask(obj_dict[t_step][\"VM\"])[0]\n fm = self.decode2binarymask(obj_dict[t_step][\"FM\"])[0] # 320, 480\n vx_min, vx_max, vy_min, vy_max = obj_dict[t_step][\"VM_bx\"]\n x_center = (vx_min + vx_max) // 2\n y_center = (vy_min + vy_max) // 2\n x_len = int((vx_max - vx_min) * self.enlarge_coef)\n y_len = int((vy_max - vy_min) * self.enlarge_coef)\n vx_min = max(0, x_center - x_len // 2)\n vx_max = min(320, x_center + x_len // 2)\n vy_min = max(0, y_center - y_len // 2)\n vy_max = min(480, y_center + y_len // 2)\n\n obj_position.append([vx_min, vx_max, vy_min, vy_max])\n vm_crop.append(vm[vx_min:vx_max+1, vy_min:vy_max+1])\n fm_crop.append(fm[vx_min:vx_max+1, vy_min:vy_max+1])\n img_crop.append(img[vx_min:vx_max+1, vy_min:vy_max+1])\n\n vm_no_crop.append(vm)\n fm_no_crop.append(fm)\n # get loss mask\n loss_mask_weight.append(self.decode2binarymask(obj_dict[t_step][\"loss_mask_weight\"])[0])\n\n # for evaluation\n video_ids.append(int(v_id))\n object_ids.append(int(obj_id))\n frame_ids.append(t_step)\n counts.append(1)\n \n if True:\n num_pad = self.seq_len - (end_t - start_t)\n for _ in range(num_pad):\n obj_position.append(copy.deepcopy(obj_position[-1]))\n\n fm_crop.append(copy.deepcopy(fm_crop[-1]))\n fm_no_crop.append(copy.deepcopy(fm_no_crop[-1]))\n vm_crop.append(copy.deepcopy(vm_crop[-1]))\n vm_no_crop.append(copy.deepcopy(vm_no_crop[-1]))\n img_crop.append(copy.deepcopy(img_crop[-1]))\n\n loss_mask_weight.append(copy.deepcopy(loss_mask_weight[-1]))\n \n video_ids.append(video_ids[-1])\n object_ids.append(object_ids[-1])\n frame_ids.append(frame_ids[-1] + 1)\n counts.append(0)\n \n vm_crop, vm_crop_gt, fm_crop, img_crop, vm_pad, vm_scale = self.crop_and_rescale(vm_crop, fm_crop, img_crop)\n\n vm_crop = np.stack(vm_crop, axis=0) # Seq_len * h * w\n vm_crop_gt = np.stack(vm_crop_gt, axis=0) # Seq_len * h * w\n vm_no_crop = np.stack(vm_no_crop, axis=0) # Seq_len * H * W\n fm_crop = np.stack(fm_crop, axis=0) # Seq_len * h * w\n fm_no_crop = np.stack(fm_no_crop, axis=0) # Seq_len * H * W\n\n vm_crop = torch.from_numpy(np.array(vm_crop)).to(self.dtype).to(self.device)\n vm_crop_gt = torch.from_numpy(np.array(vm_crop_gt)).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n fm_crop = torch.from_numpy(np.array(fm_crop)).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n\n vm_pad = torch.from_numpy(np.array(vm_pad)).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(np.array(vm_scale)).to(self.dtype).to(self.device)\n\n video_ids = torch.from_numpy(np.array(video_ids)).to(self.dtype).to(self.device)\n object_ids = torch.from_numpy(np.array(object_ids)).to(self.dtype).to(self.device)\n frame_ids = torch.from_numpy(np.array(frame_ids)).to(self.dtype).to(self.device)\n counts = torch.from_numpy(np.array(counts)).to(self.dtype).to(self.device)\n loss_mask_weight = torch.from_numpy(np.array(loss_mask_weight)).to(self.dtype).to(self.device) \n obj_position = torch.from_numpy(np.array(obj_position)).to(self.dtype).to(self.device)\n\n obj_data = {\n \"vm_crop\": vm_crop,\n \"vm_crop_gt\": vm_crop_gt,\n \"vm_no_crop\": vm_no_crop,\n \"fm_crop\": fm_crop,\n \"fm_no_crop\": fm_no_crop,\n \"img_crop\": img_crop,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"video_ids\": video_ids,\n \"object_ids\": object_ids,\n \"frame_ids\": frame_ids,\n \"counts\": counts,\n \"loss_mask\": loss_mask_weight, \n \"obj_position\": obj_position,\n }\n\n return obj_data\n\n def crop_and_rescale(self, vm_crop, fm_crop_vm=None, img_crop=None):\n h, w = np.array([m.shape for m in vm_crop]).max(axis=0)\n vm_pad = []\n vm_scale = []\n vm_crop_gt = []\n\n for i, m in enumerate(vm_crop):\n m = transform.rescale(m, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n vm_pad.append(np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)]))\n vm_scale.append(np.array([self.patch_h/h, self.patch_w/w]))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n if self.mode==\"train\":\n vm_crop[i] = self.data_augmentation(m)\n vm_crop_gt.append(m)\n else:\n vm_crop[i] = m\n vm_crop_gt.append(m)\n\n for i, m in enumerate(fm_crop_vm):\n m = transform.rescale(m, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n fm_crop_vm[i] = m\n\n for i, img_ in enumerate(img_crop):\n img_ = transform.rescale(img_, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop[i] = img_\n\n vm_pad = np.stack(vm_pad)\n vm_scale = np.stack(vm_scale)\n return vm_crop, vm_crop_gt, fm_crop_vm, img_crop, vm_pad, vm_scale\n \n def getImg(self, v_id):\n imgs = []\n imgs_list = os.listdir(os.path.join(self.img_path, v_id))\n imgs_list.sort()\n for sub_path in imgs_list:\n img_path = os.path.join(self.img_path, v_id, sub_path)\n img_tmp = plt.imread(img_path)\n imgs.append(img_tmp)\n assert len(imgs) == 128\n return imgs\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True,\n collate_fn=self.collate_fn\n )\n for item in sample_loader:\n yield item\n \n @staticmethod\n def collate_fn(batch):\n keys = batch[0].keys()\n res = {}\n for k in keys:\n temp_ = []\n for b in batch:\n if b[k] is not None:\n temp_.append(b[k])\n if len(temp_) > 0:\n res[k] = default_collate(temp_)\n else:\n res[k] = None\n return res\n \n def data_augmentation(self, mask):\n mask = mask.astype(np.float)\n rdv = random.random()\n n_repeat = random.randint(1, 4)\n if rdv <= 0.1:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n elif rdv > 0.1 and rdv < 0.6:\n rdv_1 = random.random()\n rdv_2 = random.random()\n for i in range(n_repeat):\n w = random.randint(5, 13)\n h = random.randint(5, 13)\n kernel = np.ones((w, h), dtype=np.uint8)\n if rdv_1 <= 0.5:\n mask = cv2.dilate(mask, kernel, 1)\n elif rdv_1 > 0.5 and rdv_1 <= 1.0:\n mask = cv2.erode(mask, kernel, 1)\n if rdv_2 <= 0.1:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n else:\n mask = mask\n return (mask>0.5)" }, { "identifier": "MOViD_A", "path": "data/dataloader_MOViD_A.py", "snippet": "class MOViD_A(object):\n def __init__(self, config, mode):\n super(MOViD_A, self).__init__()\n self.mode = mode\n self.dtype = torch.float32\n self.device = \"cpu\"\n root_path = config.root_path\n self.data_dir = os.path.join(root_path, mode)\n \n self.instance_list = np.genfromtxt(\n os.path.join(root_path, \"{}_instance.txt\".format(mode)),\n dtype=np.str,\n encoding='utf-8'\n )\n\n self.train_seq_len = 24\n self.cur_vid = None\n self.patch_h = config.patch_H\n self.patch_w = config.patch_W\n self.enlarge_coef = config.enlarge_coef\n\n def __len__(self):\n return len(self.instance_list)\n\n def __getitem__(self, idx, specified_V_O_id=None):\n # whether choose a specific instance to load\n if specified_V_O_id is None:\n v_id, obj_id, value = self.instance_list[idx].split(\"_\")\n else:\n v_id, obj_id, value = specified_V_O_id.split(\"_\")\n v_id, obj_id, value = int(v_id), int(obj_id), int(value)\n if v_id != self.cur_vid:\n self.cur_vid = v_id\n self.video_path = os.path.join(self.data_dir, str(v_id))\n metadata = self.read_json(os.path.join(self.video_path, 'metadata.json'))\n\n self.num_frames = metadata[\"metadata\"][\"num_frames\"]\n self.height = metadata['metadata']['height']\n self.width = metadata['metadata']['width']\n self.instances = [self.format_instance_information(obj) for obj in metadata[\"instances\"]]\n\n vis_mask_paths = [os.path.join(self.video_path, \"segmentation_full_{}.png\".format(str(f).zfill(5))) for f in range(self.num_frames)]\n vis_mask = [np.array(Image.open(frame_path)) for frame_path in vis_mask_paths] #[t,h,w]\n\n full_mask_paths = [os.path.join(self.video_path, \"segmentation_{}_{}.png\".format(obj_id, str(f).zfill(5))) for f in range(self.num_frames)]\n full_mask = [np.array(Image.open(frame_path)) for frame_path in full_mask_paths] #[t,h,w]\n \n rgb_img_path = [os.path.join(self.video_path, \"rgba_full_{}.png\".format(str(f).zfill(5))) for f in range(self.num_frames)]\n rgb_img = [np.array(Image.open(frame_path))[...,:3] for frame_path in rgb_img_path]\n \n counts = []\n obj_position = []\n\n vm_crop = []\n vm_no_crop = []\n fm_crop = []\n fm_no_crop = []\n loss_mask_weight = []\n img_crop = []\n # for evaluation \n video_ids = []\n object_ids = []\n frame_ids = []\n\n timesteps = self.instances[obj_id]['bbox_frames']\n start_t, end_t = 0, 23\n if self.mode != \"test\" and end_t - start_t > self.train_seq_len - 1:\n start_t = np.random.randint(start_t, end_t-(self.train_seq_len-2))\n end_t = start_t + self.train_seq_len - 1\n\n for t_step in range(start_t, end_t+1):\n Image_H, Image_W = self.height, self.width\n # some objects will move out the field of view in some frames\n if t_step in timesteps:\n index = self.instances[obj_id][\"bbox_frames\"].index(t_step)\n xmin, ymin, xmax, ymax = self.instances[obj_id][\"bboxes\"][index]\n vx_min, vy_min, vx_max, vy_max = int(Image_H*xmin), int(Image_W*ymin), int(Image_H*xmax), int(Image_W*ymax)\n counts.append(1)\n else:\n bboxs = mask_find_bboxs(full_mask[t_step].astype(np.uint8))\n \n if bboxs.size==0:\n vx_min, vy_min, vx_max, vy_max = 0, 0, 256, 256\n else:\n b = bboxs[-1][:4]\n vx_min, vy_min, vx_max, vy_max = b[1], b[0], b[1]+b[3], b[0]+b[2]\n counts.append(0)\n\n # enlarge the bbox\n x_center = (vx_min + vx_max) // 2\n y_center = (vy_min + vy_max) // 2\n x_len = int((vx_max - vx_min) * self.enlarge_coef)\n y_len = int((vy_max - vy_min) * self.enlarge_coef)\n vx_min = max(0, x_center - x_len // 2)\n vx_max = min(Image_H, x_center + x_len // 2)\n vy_min = max(0, y_center - y_len // 2)\n vy_max = min(Image_W, y_center + y_len // 2)\n\n obj_position.append([vx_min, vx_max, vy_min, vy_max])\n\n # get mask\n vm = vis_mask[t_step]\n vm_crop.append(vm[vx_min:vx_max+1, vy_min:vy_max+1]==value)\n vm_no_crop.append(vm==value)\n\n fm = full_mask[t_step]\n fm_crop.append(fm[vx_min:vx_max+1, vy_min:vy_max+1]==value)\n fm_no_crop.append(fm==value)\n \n # get image\n image = rgb_img[t_step]\n img_crop.append(image[vx_min:vx_max+1, vy_min:vy_max+1])\n\n # get loss mask\n fore_ground = vm == 0\n obj_ground = vm==value\n loss_mask = np.logical_or(fore_ground, obj_ground)\n\n loss_mask_weight.append(loss_mask)\n\n # for evaluation\n video_ids.append(v_id)\n object_ids.append(obj_id)\n frame_ids.append(t_step)\n\n obj_position = torch.from_numpy(np.array(obj_position)).to(self.dtype).to(self.device)\n \n vm_crop, fm_crop, vm_pad, vm_scale, vm_crop_gt, img_crop = self.crop_and_rescale(vm_crop, fm_crop, img_crop)\n\n vm_crop = np.stack(vm_crop, axis=0) # Seq_len * h * w\n vm_no_crop = np.stack(vm_no_crop, axis=0) # Seq_len * H * W\n # fm_crop = np.stack(fm_crop, axis=0) # Seq_len * h * w\n fm_crop = np.stack(fm_crop, axis=0) # Seq_len * h * w\n fm_no_crop = np.stack(fm_no_crop, axis=0) # Seq_len * H * W\n img_crop = np.stack(img_crop, axis=0) # Sqe_len * H * W\n\n vm_crop = torch.from_numpy(np.array(vm_crop)).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n fm_crop = torch.from_numpy(np.array(fm_crop)).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n\n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n\n vm_pad = torch.from_numpy(np.array(vm_pad)).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(np.array(vm_scale)).to(self.dtype).to(self.device)\n\n video_ids = torch.from_numpy(np.array(video_ids)).to(self.dtype).to(self.device)\n object_ids = torch.from_numpy(np.array(object_ids)).to(self.dtype).to(self.device)\n frame_ids = torch.from_numpy(np.array(frame_ids)).to(self.dtype).to(self.device)\n counts = torch.from_numpy(np.array(counts)).to(self.dtype).to(self.device)\n loss_mask_weight = torch.from_numpy(np.array(loss_mask_weight)).to(self.dtype).to(self.device) \n obj_position = torch.from_numpy(np.array(obj_position)).to(self.dtype).to(self.device)\n\n obj_data = {\n \"vm_crop\": vm_crop,\n \"vm_no_crop\": vm_no_crop,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n\n \"img_crop\": img_crop,\n \n \"fm_crop\": fm_crop,\n \"fm_no_crop\": fm_no_crop,\n\n \"obj_position\": obj_position, \n \"loss_mask\": loss_mask_weight, \n \"counts\": counts,\n \"video_ids\": video_ids,\n \"object_ids\": object_ids,\n \"frame_ids\": frame_ids,\n }\n\n return obj_data\n\n def crop_and_rescale(self, vm_crop, fm_crop=None,img_crop=None):\n h, w = np.array([m.shape for m in vm_crop]).max(axis=0)\n vm_pad = []\n vm_crop_gt = []\n vm_scale = []\n for i, img in enumerate(img_crop):\n img = transform.rescale(img, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img = np.pad(img, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop[i] = img\n\n for i, m in enumerate(vm_crop):\n m = transform.rescale(m, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n if self.mode==\"train\":\n vm_crop[i] = self.data_augmentation(m)\n else:\n vm_crop[i] = m\n vm_crop_gt.append(m)\n vm_pad.append(np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)]))\n vm_scale.append(np.array([self.patch_h/h, self.patch_w/w]))\n\n for i, m in enumerate(fm_crop):\n m = transform.rescale(m, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n fm_crop[i] = m\n\n vm_pad = np.stack(vm_pad)\n vm_scale = np.stack(vm_scale)\n return vm_crop, fm_crop, vm_pad, vm_scale, vm_crop_gt,img_crop\n \n def read_json(self,dir_):\n with open(dir_) as f:\n data = json.load(f)\n return data\n\n def format_instance_information(self, obj):\n return {\n \"bboxes\": obj[\"bboxes\"],\n \"bbox_frames\": obj[\"bbox_frames\"],\n }\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True,\n collate_fn=self.collate_fn\n )\n\n for item in sample_loader:\n yield item\n\n @staticmethod\n def collate_fn(batch):\n keys = batch[0].keys()\n res = {}\n for k in keys:\n temp_ = []\n for b in batch:\n if b[k] is not None:\n temp_.append(b[k])\n if len(temp_) > 0:\n res[k] = default_collate(temp_)\n else:\n res[k] = None\n return res\n \n def data_augmentation(self, mask):\n mask = mask.astype(np.float)\n rdv = random.random()\n n_repeat = random.randint(1, 4)\n if rdv <= 0.1:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n elif rdv > 0.1 and rdv < 0.6:\n rdv_1 = random.random()\n rdv_2 = random.random()\n for i in range(n_repeat):\n w = random.randint(5, 13)\n h = random.randint(5, 13)\n kernel = np.ones((w, h), dtype=np.uint8)\n if rdv_1 <= 0.5:\n mask = cv2.dilate(mask, kernel, 1)\n elif rdv_1 > 0.5 and rdv_1 <= 1.0:\n mask = cv2.erode(mask, kernel, 1)\n if rdv_2 <= 0.1:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n else:\n mask = mask\n return (mask>0.5)" }, { "identifier": "Kins_Fusion_dataset", "path": "data/dataloader_KINS.py", "snippet": "class Kins_Fusion_dataset(torch.utils.data.Dataset):\n def __init__(self, config, mode):\n super(Kins_Fusion_dataset, self).__init__()\n self.config = config\n self.mode = mode\n self.root_path = config.root_path\n \n # Load Fusion dataset\n self.data_info = pickle.load(open(os.path.join(self.root_path, \"fusion_{}.pkl\".format(self.mode)), \"rb\"))\n self.label_info = np.genfromtxt(os.path.join(self.root_path, \"c2f_seg_{}_list.txt\".format(self.mode)), dtype=np.str, encoding='utf-8')\n self.img_root_path = os.path.join(self.root_path, \"{}ing\".format(mode),\"image_2\")\n \n # Load the GT of AISFormer\n if mode==\"train\":\n aisformer_gt = cvb.load(os.path.join(self.root_path, \"instances_train.json\"))\n else:\n aisformer_gt = cvb.load(os.path.join(self.root_path, \"instances_val_upate.json\"))\n annotations = aisformer_gt[\"annotations\"]\n images = aisformer_gt[\"images\"]\n self.images, self.annotations = self.make_json_dict(images, annotations)\n \n # Load the GT of vanilla KINS\n self.base_img_path = os.path.join(self.root_path, \"{}ing\".format(mode), \"image_2\")\n self.base_ann_path= os.path.join(self.root_path, \"update_{}_2020.json\".format(mode))\n annotations = cvb.load(self.base_ann_path)\n imgs_info = annotations['images']\n anns_info = annotations[\"annotations\"]\n self.imgs_dict, self.anns_dict = self.make_json_dict(imgs_info, anns_info)\n\n # dataloader setting\n self.dtype = torch.float32\n self.enlarge_coef = 2\n self.patch_h = 256\n self.patch_w = 256\n self.device = \"cpu\"\n\n def __len__(self):\n return self.label_info.shape[0]\n\n def __getitem__(self, index):\n return self.load_item(index)\n \n def load_item(self, index):\n # load aisformer predicted visible masks\n if \"aisformer\" in self.label_info[index]:\n dataset_name, image_id, anno_id = self.label_info[index].split(\",\")\n image_id, anno_id = int(image_id), int(anno_id)\n # add image information\n img_name = self.images[image_id]\n img_path = os.path.join(self.img_root_path, img_name)\n # img_path = os.path.join(self.img_root_path, str(image_id).zfill(6)+ \".png\")\n img = np.array(Image.open(img_path))\n instances = self.data_info['{}_{}'.format(dataset_name, image_id)][anno_id]\n segmentation = instances[\"pred_visible_mask\"]\n height, width = segmentation[\"size\"]\n vm_no_crop = mask_utils.decode([segmentation]).astype(bool)\n vm_no_crop_gt = mask_utils.decode([instances[\"gt_visible_mask\"]]).astype(bool)\n rles = mask_utils.frPyObjects(instances[\"gt_full_mask\"], height, width)\n fm_no_crop = mask_utils.decode(mask_utils.merge(rles)).astype(bool)\n fm_no_crop = fm_no_crop[..., np.newaxis]\n\n bbox = instances[\"pred_visible_mask_bbox\"]\n y_min, x_min, w, h = bbox\n y_max, x_max = y_min + w, x_min + h\n x_center = (x_min + x_max) // 2\n y_center = (y_min + y_max) // 2\n x_len = int((x_max - x_min) * self.enlarge_coef)\n y_len = int((y_max - y_min) * self.enlarge_coef)\n x_min = max(0, x_center - x_len // 2)\n x_max = min(height, x_center + x_len // 2)\n y_min = max(0, y_center - y_len // 2)\n y_max = min(width, y_center + y_len // 2)\n x_min, x_max, y_min, y_max = int(x_min), int(x_max), int(y_min), int(y_max)\n \n vm_crop = vm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n vm_crop_gt = vm_no_crop_gt[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n fm_crop = fm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n img_crop = img[x_min:x_max+1, y_min:y_max+1]\n \n h, w = vm_crop.shape[:2]\n m = transform.rescale(vm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop = m[np.newaxis, ...]\n\n img_ = transform.rescale(img_crop, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop = img_\n\n # data augmentation\n vm_crop_aug = self.data_augmentation(vm_crop[0])[np.newaxis, ...]\n\n h, w = vm_crop_gt.shape[:2]\n m = transform.rescale(vm_crop_gt, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop_gt = m[np.newaxis, ...]\n\n m = transform.rescale(fm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w] \n fm_crop = m[np.newaxis, ...]\n\n loss_mask = fm_no_crop.astype(int)-vm_no_crop_gt.astype(int)\n loss_mask[loss_mask==255]=0\n loss_mask = 1-loss_mask.astype(bool)\n\n vm_no_crop = vm_no_crop[np.newaxis, ...]\n fm_no_crop = fm_no_crop[np.newaxis, ...]\n\n obj_position = np.array([x_min, x_max, y_min, y_max])\n vm_pad = np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)])\n vm_scale = np.array([self.patch_h/h, self.patch_w/w])\n counts = np.array([1])\n \n counts = torch.from_numpy(counts).to(self.dtype).to(self.device)\n\n obj_position = torch.from_numpy(obj_position).to(self.dtype).to(self.device)\n vm_pad = torch.from_numpy(vm_pad).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(vm_scale).to(self.dtype).to(self.device)\n\n fm_crop = torch.from_numpy(fm_crop).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n vm_crop_aug = torch.from_numpy(vm_crop_aug).to(self.dtype).to(self.device)\n vm_crop_gt = torch.from_numpy(vm_crop_gt).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n vm_no_crop_gt = torch.from_numpy(np.array(vm_no_crop_gt)).to(self.dtype).to(self.device)\n\n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n\n loss_mask = torch.from_numpy(np.array(loss_mask)).to(self.dtype).to(self.device)\n \n image_id = torch.from_numpy(np.array(image_id)).to(self.dtype).to(self.device)\n anno_id = torch.from_numpy(np.array(anno_id)).to(self.dtype).to(self.device)\n \n if self.mode==\"train\":\n meta = {\n # \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop_aug,\n \"vm_crop_gt\": vm_crop_gt,\n # \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n # \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n }\n elif self.mode==\"test\":\n meta = {\n \"vm_no_crop\": vm_no_crop,\n \"vm_no_crop_gt\": vm_no_crop_gt,\n \"vm_crop\": vm_crop,\n \"vm_crop_gt\": vm_crop_gt,\n \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n }\n return meta\n else:\n img_id, anno_id, category_id = self.label_info[index].split(\"_\")\n img_id, anno_id, category_id = int(img_id), int(anno_id), int(category_id)\n\n img_name = self.imgs_dict[img_id]\n img_path = os.path.join(self.base_img_path, img_name)\n \n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n height, width, _ = img.shape\n \n ann = self.anns_dict[img_id][anno_id]\n fm_no_crop = self.polys_to_mask(ann[\"a_segm\"], height, width)\n vm_no_crop = self.polys_to_mask(ann[\"i_segm\"], height, width)\n if np.sum(vm_no_crop)==0:\n counts = np.array([0])\n else:\n counts = np.array([1])\n y_min, x_min, w, h = ann[\"i_bbox\"]\n\n y_max, x_max = y_min + w, x_min + h\n x_center = (x_min + x_max) // 2\n y_center = (y_min + y_max) // 2\n x_len = int((x_max - x_min) * self.enlarge_coef)\n y_len = int((y_max - y_min) * self.enlarge_coef)\n x_min = max(0, x_center - x_len // 2)\n x_max = min(height, x_center + x_len // 2)\n y_min = max(0, y_center - y_len // 2)\n y_max = min(width, y_center + y_len // 2)\n \n fm_crop = fm_no_crop[x_min:x_max+1, y_min:y_max+1].astype(bool)\n vm_crop = vm_no_crop[x_min:x_max+1, y_min:y_max+1].astype(bool)\n img_crop = img[x_min:x_max+1, y_min:y_max+1]\n\n h, w = vm_crop.shape[:2]\n m = transform.rescale(vm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop = m[np.newaxis, ...]\n\n img_ = transform.rescale(img_crop, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop = img_\n\n m = transform.rescale(fm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w] \n fm_crop = m[np.newaxis, ...]\n\n obj_position = np.array([x_min, x_max, y_min, y_max])\n vm_pad = np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)])\n vm_scale = np.array([self.patch_h/h, self.patch_w/w])\n\n vm_no_crop = vm_no_crop[np.newaxis, ...]\n fm_no_crop = fm_no_crop[np.newaxis, ...]\n\n loss_mask = fm_no_crop-vm_no_crop\n loss_mask[loss_mask==255]=0\n loss_mask = 1-loss_mask.astype(bool)\n # data augmentation\n vm_crop_aug = self.data_augmentation(vm_crop[0])[np.newaxis, ...]\n counts = torch.from_numpy(counts).to(self.dtype).to(self.device)\n\n obj_position = torch.from_numpy(obj_position).to(self.dtype).to(self.device)\n vm_pad = torch.from_numpy(vm_pad).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(vm_scale).to(self.dtype).to(self.device)\n\n fm_crop = torch.from_numpy(fm_crop).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n # vm_crop here is the GT\n vm_crop = torch.from_numpy(vm_crop).to(self.dtype).to(self.device)\n vm_crop_aug = torch.from_numpy(vm_crop_aug).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n loss_mask = torch.from_numpy(np.array(loss_mask)).to(self.dtype).to(self.device)\n \n img_id = torch.from_numpy(np.array(img_id)).to(self.dtype).to(self.device)\n anno_id = torch.from_numpy(np.array(anno_id)).to(self.dtype).to(self.device)\n # category_id = torch.from_numpy(np.array(category_id)).to(self.dtype).to(self.device)\n if self.mode==\"train\":\n meta = {\n # \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop_aug,\n \"vm_crop_gt\": vm_crop,\n # \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n # \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": img_id,\n \"anno_id\": anno_id,\n # for vq\n # \"mask_crop\": fm_crop\n }\n elif self.mode==\"test\":\n meta = {\n \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop,\n \"vm_crop_gt\": vm_crop,\n \"fm_no_crop\": fm_no_crop,\n \"vm_no_crop_gt\": vm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": img_id,\n \"anno_id\": anno_id,\n # for vq\n # \"mask_crop\": fm_crop\n }\n return meta\n\n def data_augmentation(self, mask):\n mask = mask.astype(np.float)\n rdv = random.random()\n n_repeat = random.randint(1, 4)\n if rdv <= 0.2:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n elif rdv > 0.2 and rdv <0.6:\n rdv_1 = random.random()\n rdv_2 = random.random()\n for i in range(n_repeat):\n w = random.randint(5, 13)\n h = random.randint(5, 13)\n kernel = np.ones((w, h), dtype=np.uint8)\n if rdv_1 <= 0.55:\n mask = cv2.dilate(mask, kernel, 1)\n elif rdv_1 > 0.55 and rdv_1 <= 1.0:\n mask = cv2.erode(mask, kernel, 1)\n if rdv_2 <= 0.1:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n else:\n mask = mask\n return (mask>0.5)\n \n @staticmethod\n def collate_fn(batch):\n keys = batch[0].keys()\n res = {}\n for k in keys:\n temp_ = []\n for b in batch:\n if b[k] is not None:\n temp_.append(b[k])\n if len(temp_) > 0:\n res[k] = default_collate(temp_)\n else:\n res[k] = None\n\n return res\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True,\n collate_fn=self.collate_fn\n )\n\n for item in sample_loader:\n yield item\n\n def make_json_dict(self, imgs, anns):\n imgs_dict = {}\n anns_dict = {}\n for ann in anns:\n image_id = ann[\"image_id\"]\n if not image_id in anns_dict:\n anns_dict[image_id] = []\n anns_dict[image_id].append(ann)\n else:\n anns_dict[image_id].append(ann)\n \n for img in imgs:\n image_id = img['id']\n imgs_dict[image_id] = img['file_name']\n\n return imgs_dict, anns_dict\n\n def polys_to_mask(self, polygons, height, width):\n rles = mask_utils.frPyObjects(polygons, height, width)\n rle = mask_utils.merge(rles)\n mask = mask_utils.decode(rle)\n return mask" }, { "identifier": "KINS_Aisformer_VRSP_Intersection", "path": "data/dataloader_KINS.py", "snippet": "class KINS_Aisformer_VRSP_Intersection(torch.utils.data.Dataset):\n def __init__(self, config, mode):\n super(KINS_Aisformer_VRSP_Intersection, self).__init__()\n self.config = config\n self.mode = mode\n self.root_path = config.root_path\n \n # Load Intersection dataset\n self.data_info = pickle.load(open(os.path.join(self.root_path, \"kins_intersection.pkl\"), \"rb\"))\n self.label_info = np.genfromtxt(os.path.join(self.root_path, \"kins_intersection_list.txt\"), dtype=np.str, encoding='utf-8')\n if mode==\"train\":\n aisformer_gt = cvb.load(os.path.join(self.root_path, \"instances_train.json\"))\n else:\n aisformer_gt = cvb.load(os.path.join(self.root_path, \"instances_val_upate.json\"))\n annotations = aisformer_gt[\"annotations\"]\n images = aisformer_gt[\"images\"]\n self.images, self.annotations = self.make_json_dict(images, annotations)\n self.img_root_path = os.path.join(self.root_path, \"{}ing\".format(mode), \"image_2\")\n self.dtype = torch.float32\n self.enlarge_coef = 2\n self.patch_h = 256\n self.patch_w = 256\n self.device = \"cpu\"\n \n def __len__(self):\n return self.label_info.shape[0]\n\n def __getitem__(self, index):\n return self.load_item(index)\n \n def mask_find_bboxs(self, mask):\n retval, labels, stats, centroids = cv2.connectedComponentsWithStats(mask, connectivity=8)\n stats = stats[stats[:,4].argsort()]\n return stats\n \n def generate_heatmap(self, mask, kernel, sigma):\n heatmap = cv2.GaussianBlur(mask, kernel, sigma)\n am = np.amax(heatmap)\n heatmap /= am / 1\n return heatmap\n \n def load_item(self, index):\n image_id, anno_id = self.label_info[index].split(\"_\")\n image_id, anno_id = int(image_id), int(anno_id)\n instances = self.data_info[image_id][anno_id]\n\n segmentation = instances[\"pred_visible_mask\"]\n height, width = segmentation[\"size\"]\n # add image information\n img_name = self.images[image_id]\n img_path = os.path.join(self.img_root_path, img_name)\n # img_path = os.path.join(self.img_root_path, str(image_id).zfill(6)+ \".png\")\n img = Image.open(img_path)\n img = img.resize((width,height), Image.ANTIALIAS)\n img = np.array(img)\n \n vm_no_crop = mask_utils.decode([segmentation]).astype(bool)\n vm_no_crop_gt = mask_utils.decode([instances[\"gt_visible_mask\"]]).astype(bool)\n # fm_no_crop = mask_utils.decode([instances[\"gt_full_mask\"]]).astype(bool)\n rles = mask_utils.frPyObjects(instances[\"gt_full_mask\"], height, width)\n fm_no_crop = mask_utils.decode(mask_utils.merge(rles)).astype(bool)\n \n bbox = instances[\"pred_visible_mask_bbox\"]\n y_min, x_min, w, h = bbox\n y_max, x_max = y_min + w, x_min + h\n x_center = (x_min + x_max) // 2\n y_center = (y_min + y_max) // 2\n x_len = int((x_max - x_min) * self.enlarge_coef)\n y_len = int((y_max - y_min) * self.enlarge_coef)\n x_min = max(0, x_center - x_len // 2)\n x_max = min(height, x_center + x_len // 2)\n y_min = max(0, y_center - y_len // 2)\n y_max = min(width, y_center + y_len // 2)\n x_min, x_max, y_min, y_max = int(x_min), int(x_max), int(y_min), int(y_max)\n\n x_center_crop = x_center - x_min\n y_center_crop = y_center - y_min\n \n fm_no_crop = fm_no_crop[..., np.newaxis]\n vm_crop = vm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n fm_crop = fm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n img_crop = img[x_min:x_max+1, y_min:y_max+1]\n vm_crop_gt = vm_no_crop_gt[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n\n h, w = vm_crop.shape[:2]\n m = transform.rescale(vm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop = m[np.newaxis, ...]\n \n center_crop = np.zeros_like(vm_crop[0])\n x_center_crop = int(x_center_crop*self.patch_h/h)\n y_center_crop = int(y_center_crop*self.patch_w/w)\n center_crop[x_center_crop: x_center_crop+1, y_center_crop: y_center_crop+1]=1\n center_crop = self.generate_heatmap(center_crop.astype(np.float), (35, 35), 9)\n center_crop = center_crop[np.newaxis, ...]\n\n img_ = transform.rescale(img_crop, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop = img_\n\n h, w = vm_crop_gt.shape[:2]\n m = transform.rescale(vm_crop_gt, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop_gt = m[np.newaxis, ...]\n\n m = transform.rescale(fm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w] \n fm_crop = m[np.newaxis, ...]\n\n refine_loss_mask = 1 - (vm_crop_gt==vm_crop).astype(bool)\n loss_mask = fm_no_crop.astype(int)-vm_no_crop_gt.astype(int)\n # import pdb;pdb.set_trace()\n loss_mask[loss_mask==255]=0\n loss_mask = 1-loss_mask.astype(bool)\n\n vm_no_crop = vm_no_crop[np.newaxis, ...]\n fm_no_crop = fm_no_crop[np.newaxis, ...]\n\n obj_position = np.array([x_min, x_max, y_min, y_max])\n vm_pad = np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)])\n vm_scale = np.array([self.patch_h/h, self.patch_w/w])\n counts = np.array([1])\n\n counts = torch.from_numpy(counts).to(self.dtype).to(self.device)\n\n obj_position = torch.from_numpy(obj_position).to(self.dtype).to(self.device)\n vm_pad = torch.from_numpy(vm_pad).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(vm_scale).to(self.dtype).to(self.device)\n\n fm_crop = torch.from_numpy(fm_crop).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n vm_crop = torch.from_numpy(vm_crop).to(self.dtype).to(self.device)\n vm_crop_gt = torch.from_numpy(vm_crop_gt).to(self.dtype).to(self.device)\n vm_no_crop_gt = torch.from_numpy(vm_no_crop_gt).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n refine_loss_mask = torch.from_numpy(np.array(refine_loss_mask)).to(self.dtype).to(self.device)\n center_crop = torch.from_numpy(np.array(center_crop)).to(self.dtype).to(self.device)\n \n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n img = torch.from_numpy(np.array(img)).to(self.dtype).to(self.device)\n\n loss_mask = torch.from_numpy(np.array(loss_mask)).to(self.dtype).to(self.device)\n \n image_id = torch.from_numpy(np.array(image_id)).to(self.dtype).to(self.device)\n anno_id = torch.from_numpy(np.array(anno_id)).to(self.dtype).to(self.device)\n \n if self.mode==\"train\":\n meta = {\n # \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop,\n # \"vm_crop_gt\": vm_crop_gt,\n # \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"center_crop\": center_crop,\n # \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n # for vq\n # \"mask_crop\": fm_crop\n }\n # elif self.mode==\"test\":\n # meta = {\n # # \"vm_no_crop\": vm_no_crop,\n # \"vm_crop\": vm_crop,\n # \"vm_crop_gt\": vm_crop_gt,\n # # \"vm_no_crop_gt\": vm_no_crop_gt,\n # # \"refine_loss_mask\": refine_loss_mask,\n # # \"fm_no_crop\": fm_no_crop,\n # \"fm_crop\": fm_crop,\n # \"img_crop\": img_crop,\n # # \"loss_mask\": loss_mask,\n # # \"obj_position\": obj_position,\n # # \"vm_pad\": vm_pad,\n # # \"vm_scale\": vm_scale,\n # # \"counts\":counts,\n # # \"img_id\": image_id,\n # # \"anno_id\": anno_id,\n # # # for vq\n # # # \"mask_crop\": fm_crop\n # # # \"img\":img,\n # }\n elif self.mode==\"test\":\n meta = {\n \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop,\n \"vm_crop_gt\": vm_crop_gt,\n \"vm_no_crop_gt\": vm_no_crop_gt,\n \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"center_crop\": center_crop,\n \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n # for vq\n # \"mask_crop\": fm_crop\n \"img\":img,\n }\n return meta\n\n @staticmethod\n def collate_fn(batch):\n keys = batch[0].keys()\n res = {}\n for k in keys:\n temp_ = []\n for b in batch:\n if b[k] is not None:\n temp_.append(b[k])\n if len(temp_) > 0:\n res[k] = default_collate(temp_)\n else:\n res[k] = None\n\n return res\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True,\n collate_fn=self.collate_fn\n )\n\n for item in sample_loader:\n yield item\n\n def polys_to_mask(self, polygons, height, width):\n rles = mask_utils.frPyObjects(polygons, height, width)\n rle = mask_utils.merge(rles)\n mask = mask_utils.decode(rle)\n return mask\n \n def make_json_dict(self, imgs, anns):\n imgs_dict = {}\n anns_dict = {}\n for ann in anns:\n image_id = ann[\"image_id\"]\n if not image_id in anns_dict:\n anns_dict[image_id] = []\n anns_dict[image_id].append(ann)\n else:\n anns_dict[image_id].append(ann)\n \n for img in imgs:\n image_id = img['id']\n imgs_dict[image_id] = img['file_name']\n\n return imgs_dict, anns_dict" }, { "identifier": "COCOA_Fusion_dataset", "path": "data/dataloader_COCOA.py", "snippet": "class COCOA_Fusion_dataset(torch.utils.data.Dataset):\n def __init__(self, config, mode):\n super(COCOA_Fusion_dataset, self).__init__()\n self.config = config\n self.mode = mode\n self.root_path = config.root_path\n \n # Load Fusion dataset \n self.data_info = pickle.load(open(os.path.join(self.root_path, \"fusion_{}.pkl\".format(self.mode)), \"rb\"))\n self.label_info = np.genfromtxt(os.path.join(self.root_path, \"c2f_seg_{}_list.txt\".format(self.mode)), dtype=np.str, encoding='utf-8')\n \n if mode==\"train\":\n train_label = cvb.load(os.path.join(self.root_path, \"COCO_amodal_train2014_with_classes.json\"))\n self.anns_dict = train_label[\"annotations\"]\n self.img_root_path = os.path.join(self.root_path, \"train2014\")\n elif mode==\"test\":\n val_label = cvb.load(os.path.join(self.root_path, \"COCO_amodal_val2014_with_classes.json\"))\n self.anns_dict = val_label[\"annotations\"]\n self.img_root_path = os.path.join(self.root_path, \"val2014\")\n \n self.dtype = torch.float32\n self.enlarge_coef = 2\n self.patch_h = 256\n self.patch_w = 256\n self.device = \"cpu\"\n\n \n def __len__(self):\n return self.label_info.shape[0]\n\n def __getitem__(self, index):\n return self.load_item(index)\n \n def load_item(self, index):\n # predicted vm\n if len(self.label_info[index].split(\",\"))==3:\n dataset_name, image_id, anno_id = self.label_info[index].split(\",\")\n image_id, anno_id = int(image_id), int(anno_id)\n if self.mode==\"train\":\n img_path = os.path.join(self.img_root_path, \"COCO_{}2014_{}.jpg\".format(self.mode, str(image_id).zfill(12)))\n elif self.mode==\"test\":\n img_path = os.path.join(self.img_root_path, \"COCO_val2014_{}.jpg\".format(str(image_id).zfill(12)))\n img = np.array(Image.open(img_path))\n if len(img.shape)==2:\n img = np.repeat(img[:, :, np.newaxis], 3, axis=2)\n instances = self.data_info[\"{}_{}\".format(dataset_name, image_id)][anno_id]\n segmentation = instances[\"pred_visible_mask\"]\n height, weight = segmentation[\"size\"]\n # occlude_rate = instances[\"occlude_rate\"]\n vm_no_crop = mask_utils.decode([segmentation]).astype(bool)\n fm_no_crop = mask_utils.decode([instances[\"gt_full_mask\"]]).astype(bool)\n vm_no_crop_gt = mask_utils.decode([instances[\"gt_visible_mask\"]]).astype(bool)\n\n bbox = instances[\"pred_visible_mask_bbox\"]\n y_min, x_min, w, h = bbox\n y_max, x_max = y_min + w, x_min + h\n x_center = (x_min + x_max) // 2\n y_center = (y_min + y_max) // 2\n x_len = int((x_max - x_min) * self.enlarge_coef)\n y_len = int((y_max - y_min) * self.enlarge_coef)\n x_min = max(0, x_center - x_len // 2)\n x_max = min(height, x_center + x_len // 2)\n y_min = max(0, y_center - y_len // 2)\n y_max = min(weight, y_center + y_len // 2)\n x_min, x_max, y_min, y_max = int(x_min), int(x_max), int(y_min), int(y_max)\n # import pdb;pdb.set_trace()\n vm_crop = vm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n fm_crop = fm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n img_crop = img[x_min:x_max+1, y_min:y_max+1]\n vm_crop_gt = vm_no_crop_gt[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n\n h, w = vm_crop.shape[:2]\n m = transform.rescale(vm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop = m[np.newaxis, ...]\n\n img_ = transform.rescale(img_crop, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop = img_\n\n h, w = vm_crop_gt.shape[:2]\n m = transform.rescale(vm_crop_gt, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop_gt = m[np.newaxis, ...]\n\n # data augmentation\n vm_crop_aug = self.data_augmentation(vm_crop[0])[np.newaxis, ...]\n\n m = transform.rescale(fm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w] \n fm_crop = m[np.newaxis, ...]\n # if self.mode==\"test\":\n # loss_mask = mask_utils.decode([instances[\"loss_mask\"]]).astype(bool)[...,0]\n # else:\n loss_mask = fm_no_crop.astype(int)-vm_no_crop_gt.astype(int)\n loss_mask[loss_mask==255]=0\n loss_mask = 1-loss_mask.astype(bool)\n\n vm_no_crop = vm_no_crop[np.newaxis, ...]\n fm_no_crop = fm_no_crop[np.newaxis, ...]\n\n obj_position = np.array([x_min, x_max, y_min, y_max])\n vm_pad = np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)])\n vm_scale = np.array([self.patch_h/h, self.patch_w/w])\n counts = np.array([1])\n counts = torch.from_numpy(counts).to(self.dtype).to(self.device)\n\n obj_position = torch.from_numpy(obj_position).to(self.dtype).to(self.device)\n vm_pad = torch.from_numpy(vm_pad).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(vm_scale).to(self.dtype).to(self.device)\n\n fm_crop = torch.from_numpy(fm_crop).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n vm_crop = torch.from_numpy(vm_crop).to(self.dtype).to(self.device)\n vm_crop_gt = torch.from_numpy(vm_crop_gt).to(self.dtype).to(self.device)\n vm_crop_aug = torch.from_numpy(vm_crop_aug).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n\n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n img = torch.from_numpy(np.array(img)).to(self.dtype).to(self.device)\n loss_mask = torch.from_numpy(np.array(loss_mask)).to(self.dtype).to(self.device)\n \n image_id = torch.from_numpy(np.array(image_id)).to(self.dtype).to(self.device)\n anno_id = torch.from_numpy(np.array(anno_id)).to(self.dtype).to(self.device)\n # occlude_rate = torch.from_numpy(np.array(occlude_rate)).to(self.dtype).to(self.device)\n \n if self.mode==\"train\":\n meta = {\n # \"vm_no_crop\": vm_no_crop,\n # \"vm_crop\": vm_crop,\n \"vm_crop\": vm_crop_aug,\n \"vm_crop_gt\": vm_crop_gt,\n # \"vm_crop_gt\": vm_crop_gt,\n # \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n # \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n # for vq\n # \"mask_crop\": fm_crop\n # \"img_no_crop\": img,\n }\n elif self.mode==\"test\":\n meta = {\n \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop,\n \"img_crop\": img_crop,\n \"vm_crop_gt\": vm_crop_gt,\n \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n # \"occlude_rate\":occlude_rate\n # for vq\n # \"mask_crop\": fm_crop\n # \"img_no_crop\": img,\n }\n return meta\n # gt vm\n elif len(self.label_info[index].split(\",\"))==2:\n anno_id, img_path = self.label_info[index].split(\",\")\n anno_id = int(anno_id)\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n height, width, _ = img.shape\n\n ann = self.anns_dict[anno_id]\n img_id = ann[\"image_id\"]\n # category_id = ann[\"category_id\"]\n\n full_mask = ann[\"segmentation\"]\n fm_no_crop = mask_utils.decode(full_mask)[...,np.newaxis]\n\n visible_mask = ann[\"visible_mask\"]\n vm_no_crop = mask_utils.decode(visible_mask)[...,np.newaxis]\n\n if np.sum(vm_no_crop)==0:\n counts = np.array([0])\n else:\n counts = np.array([1])\n y_min, x_min, w, h = ann[\"bbox\"]\n y_max, x_max = y_min + w, x_min + h\n y_min, x_min, y_max, x_max = int(y_min), int(x_min), int(y_max), int(x_max) \n\n x_center = (x_min + x_max) // 2\n y_center = (y_min + y_max) // 2\n x_len = int((x_max - x_min) * self.enlarge_coef)\n y_len = int((y_max - y_min) * self.enlarge_coef)\n x_min = max(0, x_center - x_len // 2)\n x_max = min(height, x_center + x_len // 2)\n y_min = max(0, y_center - y_len // 2)\n y_max = min(width, y_center + y_len // 2)\n \n fm_crop = fm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n vm_crop = vm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n img_crop = img[x_min:x_max+1, y_min:y_max+1]\n\n h, w = vm_crop.shape[:2]\n m = transform.rescale(vm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop = m[np.newaxis, ...]\n\n img_ = transform.rescale(img_crop, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop = img_\n\n m = transform.rescale(fm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w] \n fm_crop = m[np.newaxis, ...]\n\n obj_position = np.array([x_min, x_max, y_min, y_max])\n vm_pad = np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)])\n vm_scale = np.array([self.patch_h/h, self.patch_w/w])\n\n # full_pad = ((0, max(375-height, 0)), (0, max(1242-width, 0)))\n # vm_no_crop = np.pad(vm_no_crop, full_pad)[:375, :1242]\n # fm_no_crop = np.pad(fm_no_crop, full_pad)[:375, :1242]\n vm_no_crop = vm_no_crop[np.newaxis, ...]\n fm_no_crop = fm_no_crop[np.newaxis, ...]\n\n loss_mask = fm_no_crop-vm_no_crop\n loss_mask[loss_mask==255]=0\n loss_mask = 1-loss_mask.astype(bool)\n # data augmentation\n vm_crop_aug = self.data_augmentation(vm_crop[0])[np.newaxis, ...]\n \n counts = torch.from_numpy(counts).to(self.dtype).to(self.device)\n\n obj_position = torch.from_numpy(obj_position).to(self.dtype).to(self.device)\n vm_pad = torch.from_numpy(vm_pad).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(vm_scale).to(self.dtype).to(self.device)\n\n fm_crop = torch.from_numpy(fm_crop).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n vm_crop = torch.from_numpy(vm_crop).to(self.dtype).to(self.device)\n vm_crop_aug = torch.from_numpy(vm_crop_aug).to(self.dtype).to(self.device)\n img_crop = torch.from_numpy(img_crop).to(self.dtype).to(self.device)\n img = torch.from_numpy(img).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n \n loss_mask = torch.from_numpy(np.array(loss_mask)).to(self.dtype).to(self.device)\n \n img_id = torch.from_numpy(np.array(img_id)).to(self.dtype).to(self.device)\n anno_id = torch.from_numpy(np.array(anno_id)).to(self.dtype).to(self.device)\n # category_id = torch.from_numpy(np.array(category_id)).to(self.dtype).to(self.device)\n if self.mode==\"train\":\n meta = {\n # \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop_aug,\n \"vm_crop_gt\": vm_crop,\n # \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n # \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\": counts,\n \"img_id\": img_id,\n \"anno_id\": anno_id,\n # \"category_id\": category_id,\n # for vq\n # \"mask_crop\": fm_crop\n # \"img_no_crop\": img\n }\n elif self.mode==\"test\":\n meta = {\n \"vm_no_crop\": vm_no_crop,\n \"vm_no_crop_gt\": vm_no_crop,\n \"vm_crop\": vm_crop,\n \"vm_crop_gt\": vm_crop,\n \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": img_id,\n \"anno_id\": anno_id,\n # \"category_id\": category_id,\n # for vq\n # \"mask_crop\": fm_crop\n \"img_no_crop\": img,\n }\n return meta\n \n @staticmethod\n def collate_fn(batch):\n keys = batch[0].keys()\n res = {}\n for k in keys:\n temp_ = []\n for b in batch:\n if b[k] is not None:\n temp_.append(b[k])\n if len(temp_) > 0:\n res[k] = default_collate(temp_)\n else:\n res[k] = None\n\n return res\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True,\n collate_fn=self.collate_fn\n )\n\n for item in sample_loader:\n yield item\n\n def polys_to_mask(self, polygons, height, width):\n rles = mask_utils.frPyObjects(polygons, height, width)\n rle = mask_utils.merge(rles)\n mask = mask_utils.decode(rle)\n return mask\n\n # def data_augmentation(self, mask):\n # return mask\n \n def data_augmentation(self, mask):\n mask = mask.astype(np.float)\n rdv = random.random()\n n_repeat = random.randint(1, 4)\n if rdv <= 0.2:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n elif rdv > 0.2 and rdv <0.9:\n rdv_1 = random.random()\n rdv_2 = random.random()\n for i in range(n_repeat):\n w = random.randint(5, 13)\n h = random.randint(5, 13)\n kernel = np.ones((w, h), dtype=np.uint8)\n if rdv_1 <= 0.6:\n mask = cv2.dilate(mask, kernel, 1)\n elif rdv_1 > 0.6 and rdv_1 <= 1.0:\n mask = cv2.erode(mask, kernel, 1)\n if rdv_2 <= 0.2:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n else:\n mask = mask\n return (mask>0.5)\n \n def make_json_dict(self, imgs, anns):\n imgs_dict = {}\n anns_dict = {}\n for ann in anns:\n image_id = ann[\"image_id\"]\n if not image_id in anns_dict:\n anns_dict[image_id] = []\n anns_dict[image_id].append(ann)\n else:\n anns_dict[image_id].append(ann)\n \n for img in imgs:\n image_id = img['id']\n imgs_dict[image_id] = img['file_name']\n\n return imgs_dict, anns_dict" }, { "identifier": "COCOA_VRSP", "path": "data/dataloader_COCOA.py", "snippet": "class COCOA_VRSP(torch.utils.data.Dataset):\n def __init__(self, config, mode):\n super(COCOA_VRSP, self).__init__()\n self.config = config\n self.mode = mode\n self.data_info = pickle.load(open(os.path.join(self.root_path, \"fusion_{}.pkl\".format(self.mode)), \"rb\"))\n self.label_info = np.genfromtxt(os.path.join(self.root_path, \"c2f_seg_{}_list.txt\".format(self.mode)), dtype=np.str, encoding='utf-8')\n \n if self.mode==\"train\":\n self.img_root_path = os.path.join(self.root_path, \"train2014\")\n elif self.mode==\"test\":\n self.img_root_path = os.path.join(self.root_path, \"val2014\")\n\n self.dtype = torch.float32\n self.enlarge_coef = 2\n self.patch_h = 256\n self.patch_w = 256\n self.device = \"cpu\"\n\n \n def __len__(self):\n return self.label_info.shape[0]\n\n def __getitem__(self, index):\n return self.load_item(index)\n \n def generate_heatmap(self, mask, kernel, sigma):\n heatmap = cv2.GaussianBlur(mask, kernel, sigma)\n am = np.amax(heatmap)\n heatmap /= am / 1\n return heatmap\n \n def load_item(self, index):\n image_id, anno_id = self.label_info[index].split(\"_\")\n image_id, anno_id = int(image_id), int(anno_id)\n if self.mode==\"train\":\n img_path = os.path.join(self.img_root_path, \"COCO_{}2014_{}.jpg\".format(self.mode, str(image_id).zfill(12)))\n elif self.mode==\"test\":\n img_path = os.path.join(self.img_root_path, \"COCO_val2014_{}.jpg\".format(str(image_id).zfill(12)))\n img = np.array(Image.open(img_path))\n if len(img.shape)==2:\n img = np.repeat(img[:, :, np.newaxis], 3, axis=2)\n instances = self.data_info[image_id][anno_id]\n segmentation = instances[\"pred_visible_mask\"]\n height, weight = segmentation[\"size\"]\n occlude_rate = instances[\"occlude_rate\"]\n vm_no_crop = mask_utils.decode([segmentation]).astype(bool)\n fm_no_crop = mask_utils.decode([instances[\"gt_full_mask\"]]).astype(bool)\n vm_no_crop_gt = mask_utils.decode([instances[\"gt_visible_mask\"]]).astype(bool)\n\n bbox = instances[\"pred_visible_mask_bbox\"]\n y_min, x_min, w, h = bbox\n y_max, x_max = y_min + w, x_min + h\n x_center = (x_min + x_max) // 2\n y_center = (y_min + y_max) // 2\n x_len = int((x_max - x_min) * self.enlarge_coef)\n y_len = int((y_max - y_min) * self.enlarge_coef)\n x_min = max(0, x_center - x_len // 2)\n x_max = min(height, x_center + x_len // 2)\n y_min = max(0, y_center - y_len // 2)\n y_max = min(weight, y_center + y_len // 2)\n x_min, x_max, y_min, y_max = int(x_min), int(x_max), int(y_min), int(y_max)\n \n x_center_crop = x_center - x_min\n y_center_crop = y_center - y_min\n\n vm_crop = vm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n fm_crop = fm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n img_crop = img[x_min:x_max+1, y_min:y_max+1]\n vm_crop_gt = vm_no_crop_gt[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n\n h, w = vm_crop.shape[:2]\n m = transform.rescale(vm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop = m[np.newaxis, ...]\n\n center_crop = np.zeros_like(vm_crop[0])\n x_center_crop = int(x_center_crop*self.patch_h/h)\n y_center_crop = int(y_center_crop*self.patch_w/w)\n center_crop[x_center_crop: x_center_crop+1, y_center_crop: y_center_crop+1]=1\n center_crop = self.generate_heatmap(center_crop.astype(np.float), (35, 35), 9)\n center_crop = center_crop[np.newaxis, ...]\n\n img_ = transform.rescale(img_crop, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop = img_\n\n h, w = vm_crop_gt.shape[:2]\n m = transform.rescale(vm_crop_gt, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop_gt = m[np.newaxis, ...]\n\n m = transform.rescale(fm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w] \n fm_crop = m[np.newaxis, ...]\n\n loss_mask = fm_no_crop.astype(int)-vm_no_crop_gt.astype(int)\n loss_mask[loss_mask==255]=0\n loss_mask = 1-loss_mask.astype(bool)\n\n vm_no_crop = vm_no_crop[np.newaxis, ...]\n fm_no_crop = fm_no_crop[np.newaxis, ...]\n\n obj_position = np.array([x_min, x_max, y_min, y_max])\n vm_pad = np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)])\n vm_scale = np.array([self.patch_h/h, self.patch_w/w])\n counts = np.array([1])\n\n counts = torch.from_numpy(counts).to(self.dtype).to(self.device)\n\n obj_position = torch.from_numpy(obj_position).to(self.dtype).to(self.device)\n vm_pad = torch.from_numpy(vm_pad).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(vm_scale).to(self.dtype).to(self.device)\n\n fm_crop = torch.from_numpy(fm_crop).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n vm_crop = torch.from_numpy(vm_crop).to(self.dtype).to(self.device)\n vm_crop_gt = torch.from_numpy(vm_crop_gt).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n center_crop = torch.from_numpy(np.array(center_crop)).to(self.dtype).to(self.device)\n \n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n img = torch.from_numpy(np.array(img)).to(self.dtype).to(self.device)\n\n loss_mask = torch.from_numpy(np.array(loss_mask)).to(self.dtype).to(self.device)\n \n image_id = torch.from_numpy(np.array(image_id)).to(self.dtype).to(self.device)\n anno_id = torch.from_numpy(np.array(anno_id)).to(self.dtype).to(self.device)\n occlude_rate = torch.from_numpy(np.array(occlude_rate)).to(self.dtype).to(self.device)\n \n if self.mode==\"train\":\n meta = {\n # \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop,\n # \"vm_crop_gt\": vm_crop_gt,\n # \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"center_crop\": center_crop,\n # \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n \"img_no_crop\": img,\n }\n elif self.mode==\"test\":\n meta = {\n \"vm_no_crop\": vm_no_crop,\n \"vm_no_crop_gt\": vm_no_crop_gt,\n \"vm_crop\": vm_crop,\n \"vm_crop_gt\": vm_crop_gt,\n \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"center_crop\": center_crop,\n \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n \"occlude_rate\":occlude_rate,\n # # for vq\n # \"mask_crop\": fm_crop,\n \"img\": img,\n }\n return meta\n\n @staticmethod\n def collate_fn(batch):\n keys = batch[0].keys()\n res = {}\n for k in keys:\n temp_ = []\n for b in batch:\n if b[k] is not None:\n temp_.append(b[k])\n if len(temp_) > 0:\n res[k] = default_collate(temp_)\n else:\n res[k] = None\n\n return res\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True,\n collate_fn=self.collate_fn\n )\n\n for item in sample_loader:\n yield item\n\n def polys_to_mask(self, polygons, height, width):\n rles = mask_utils.frPyObjects(polygons, height, width)\n rle = mask_utils.merge(rles)\n mask = mask_utils.decode(rle)\n return mask" } ]
from data.dataloader_Fishbowl import FishBowl from data.dataloader_MOViD_A import MOViD_A from data.dataloader_KINS import Kins_Fusion_dataset, KINS_Aisformer_VRSP_Intersection from data.dataloader_COCOA import COCOA_Fusion_dataset, COCOA_VRSP
21,265
def load_dataset(config, args, mode): if mode=="train": if args.dataset=="KINS": train_dataset = Kins_Fusion_dataset(config, mode='train') test_dataset = Kins_Fusion_dataset(config, mode='test') elif args.dataset=="COCOA": train_dataset = COCOA_Fusion_dataset(config, mode='train') test_dataset = COCOA_Fusion_dataset(config, mode='test') elif args.dataset=="Fishbowl": train_dataset = FishBowl(config, mode='train') test_dataset = FishBowl(config, mode='test') elif args.dataset=="MOViD_A":
def load_dataset(config, args, mode): if mode=="train": if args.dataset=="KINS": train_dataset = Kins_Fusion_dataset(config, mode='train') test_dataset = Kins_Fusion_dataset(config, mode='test') elif args.dataset=="COCOA": train_dataset = COCOA_Fusion_dataset(config, mode='train') test_dataset = COCOA_Fusion_dataset(config, mode='test') elif args.dataset=="Fishbowl": train_dataset = FishBowl(config, mode='train') test_dataset = FishBowl(config, mode='test') elif args.dataset=="MOViD_A":
train_dataset = MOViD_A(config, mode='train')
1
2023-12-21 04:25:47+00:00
24k
alipay/PainlessInferenceAcceleration
pia/lookahead/models/qwen/modeling_qwen.py
[ { "identifier": "LookaheadPreTrainedModel", "path": "pia/lookahead/common/pretrained_model.py", "snippet": "class LookaheadPreTrainedModel(PreTrainedModel):\n _batch_generation = False\n _stream_generation = False\n\n def __init__(self, config):\n super().__init__(config=config)\n\n def _get_generation_mode(\n self, generation_config: GenerationConfig, assistant_model: Optional[\"PreTrainedModel\"]\n ) -> GenerationMode:\n \"\"\"\n Returns the generation mode triggered by a [`GenerationConfig`] instance.\n \"\"\"\n if generation_config.constraints is not None or generation_config.force_words_ids is not None:\n generation_mode = GenerationMode.CONSTRAINED_BEAM_SEARCH\n elif generation_config.num_beams == 1:\n if generation_config.do_sample is False:\n if (\n generation_config.top_k is not None\n and generation_config.top_k > 1\n and generation_config.penalty_alpha is not None\n and generation_config.penalty_alpha > 0\n ):\n generation_mode = GenerationMode.CONTRASTIVE_SEARCH\n elif generation_config.use_cache \\\n and hasattr(generation_config, 'decoding_kwargs') \\\n and generation_config.decoding_kwargs.get('use_lookahead', False) \\\n and generation_config.decoding_kwargs.get('decoding_length', 64) > 1 \\\n and generation_config.decoding_kwargs.get('branch_length', 12) > 0:\n generation_mode = GenerationMode.LOOKAHEAD_GENERATION\n else:\n generation_mode = GenerationMode.GREEDY_SEARCH\n else:\n if generation_config.use_cache \\\n and hasattr(generation_config, 'decoding_kwargs') \\\n and generation_config.decoding_kwargs.get('use_lookahead', False) \\\n and generation_config.decoding_kwargs.get('decoding_length', 64) > 1 \\\n and generation_config.decoding_kwargs.get('branch_length', 12) > 0:\n generation_mode = GenerationMode.LOOKAHEAD_GENERATION\n else:\n generation_mode = GenerationMode.SAMPLE\n else:\n if generation_config.num_beam_groups > 1:\n generation_mode = GenerationMode.GROUP_BEAM_SEARCH\n elif generation_config.do_sample is True:\n generation_mode = GenerationMode.BEAM_SAMPLE\n else:\n generation_mode = GenerationMode.BEAM_SEARCH\n\n # Assisted generation may extend some generation modes\n if assistant_model is not None:\n if generation_mode in (\"greedy_search\", \"sample\"):\n generation_mode = GenerationMode.ASSISTED_GENERATION\n else:\n raise ValueError(\n \"You've set `assistant_model`, which triggers assisted generate. Currently, assisted generate \"\n \"is only supported with Greedy Search and Sample.\"\n )\n return generation_mode\n\n @torch.no_grad()\n def generate(\n self,\n inputs: Optional[torch.Tensor] = None,\n generation_config: Optional[GenerationConfig] = None,\n logits_processor: Optional[LogitsProcessorList] = None,\n stopping_criteria: Optional[StoppingCriteriaList] = None,\n prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,\n synced_gpus: Optional[bool] = None,\n assistant_model: Optional[\"PreTrainedModel\"] = None,\n streamer: Optional[\"BaseStreamer\"] = None,\n **kwargs,\n ) -> Union[GenerateOutput, torch.LongTensor]:\n r\"\"\"\n\n Generates sequences of token ids for models with a language modeling head.\n\n <Tip warning={true}>\n\n Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the\n model's default generation configuration. You can override any `generation_config` by passing the corresponding\n parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.\n\n For an overview of generation strategies and code examples, check out the [following\n guide](../generation_strategies).\n\n </Tip>\n\n Parameters:\n inputs (`torch.Tensor` of varying shape depending on the modality, *optional*):\n The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the\n method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs`\n should of in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of\n `input_ids`, `input_values`, `input_features`, or `pixel_values`.\n generation_config (`~generation.GenerationConfig`, *optional*):\n The generation configuration to be used as base parametrization for the generation call. `**kwargs`\n passed to generate matching the attributes of `generation_config` will override them. If\n `generation_config` is not provided, the default will be used, which had the following loading\n priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model\n configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s\n default values, whose documentation should be checked to parameterize generation.\n logits_processor (`LogitsProcessorList`, *optional*):\n Custom logits processors that complement the default logits processors built from arguments and\n generation config. If a logit processor is passed that is already created with the arguments or a\n generation config an error is thrown. This feature is intended for advanced users.\n stopping_criteria (`StoppingCriteriaList`, *optional*):\n Custom stopping criteria that complement the default stopping criteria built from arguments and a\n generation config. If a stopping criteria is passed that is already created with the arguments or a\n generation config an error is thrown. This feature is intended for advanced users.\n prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*):\n If provided, this function constraints the beam search to allowed tokens only at each step. If not\n provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and\n `input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned\n on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful\n for constrained generation conditioned on the prefix, as described in [Autoregressive Entity\n Retrieval](https://arxiv.org/abs/2010.00904).\n synced_gpus (`bool`, *optional*):\n Whether to continue running the while loop until max_length. Unless overridden this flag will be set to\n `True` under DeepSpeed ZeRO Stage 3 multiple GPUs environment to avoid hanging if one GPU finished\n generating before other GPUs. Otherwise it'll be set to `False`.\n assistant_model (`PreTrainedModel`, *optional*):\n An assistant model that can be used to accelerate generation. The assistant model must have the exact\n same tokenizer. The acceleration is achieved when forecasting candidate tokens with the assistent model\n is much faster than running generation with the model you're calling generate from. As such, the\n assistant model should be much smaller.\n streamer (`BaseStreamer`, *optional*):\n Streamer object that will be used to stream the generated sequences. Generated tokens are passed\n through `streamer.put(token_ids)` and the streamer is responsible for any further processing.\n kwargs (`Dict[str, Any]`, *optional*):\n Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be\n forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder\n specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.\n\n Return:\n [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`\n or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`.\n\n If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible\n [`~utils.ModelOutput`] types are:\n\n - [`~generation.GreedySearchDecoderOnlyOutput`],\n - [`~generation.SampleDecoderOnlyOutput`],\n - [`~generation.BeamSearchDecoderOnlyOutput`],\n - [`~generation.BeamSampleDecoderOnlyOutput`]\n\n If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible\n [`~utils.ModelOutput`] types are:\n\n - [`~generation.GreedySearchEncoderDecoderOutput`],\n - [`~generation.SampleEncoderDecoderOutput`],\n - [`~generation.BeamSearchEncoderDecoderOutput`],\n - [`~generation.BeamSampleEncoderDecoderOutput`]\n \"\"\"\n\n if synced_gpus is None:\n # if is_deepspeed_zero3_enabled() and dist.get_world_size() > 1:\n # synced_gpus = True\n # else:\n # synced_gpus = False\n synced_gpus = False\n\n # 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call\n self._validate_model_class()\n\n # priority: `generation_config` argument > `model.generation_config` (the default generation config)\n if generation_config is None:\n # legacy: users may modify the model configuration to control generation -- update the generation config\n # model attribute accordingly, if it was created from the model config\n if self.generation_config._from_model_config:\n new_generation_config = GenerationConfig.from_model_config(self.config)\n if new_generation_config != self.generation_config:\n # warnings.warn(\n # \"You have modified the pretrained model configuration to control generation. This is a\"\n # \" deprecated strategy to control generation and will be removed soon, in a future version.\"\n # \" Please use a generation configuration file (see\"\n # \" https://huggingface.co/docs/transformers/main_classes/text_generation )\"\n # )\n self.generation_config = new_generation_config\n generation_config = self.generation_config\n\n generation_config = copy.deepcopy(generation_config)\n model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs\n generation_config.validate()\n self._validate_model_kwargs(model_kwargs.copy())\n if not hasattr(generation_config, 'decoding_kwargs'):\n generation_config.decoding_kwargs = model_kwargs.get('decoding_kwargs', {})\n\n # 2. Set generation parameters if not already defined\n logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()\n stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()\n\n if generation_config.pad_token_id is None and generation_config.eos_token_id is not None:\n if model_kwargs.get(\"attention_mask\", None) is None:\n logger.warning(\n \"The attention mask and the pad token id were not set. As a consequence, you may observe \"\n \"unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\"\n )\n eos_token_id = generation_config.eos_token_id\n if isinstance(eos_token_id, list):\n eos_token_id = eos_token_id[0]\n logger.warning(f\"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.\")\n generation_config.pad_token_id = eos_token_id\n\n # 3. Define model inputs\n # inputs_tensor has to be defined\n # model_input_name is defined if model-specific keyword input is passed\n # otherwise model_input_name is None\n # all model-specific keyword inputs are removed from `model_kwargs`\n inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(\n inputs, generation_config.bos_token_id, model_kwargs\n )\n batch_size = inputs_tensor.shape[0]\n\n # 4. Define other model kwargs\n model_kwargs[\"output_attentions\"] = generation_config.output_attentions\n model_kwargs[\"output_hidden_states\"] = generation_config.output_hidden_states\n # decoder-only models with inputs_embeds forwarding must use caching (otherwise we can't detect whether we are\n # generating the first new token or not, and we only want to use the embeddings for the first new token)\n if not self.config.is_encoder_decoder and model_input_name == \"inputs_embeds\":\n model_kwargs[\"use_cache\"] = True\n else:\n model_kwargs[\"use_cache\"] = generation_config.use_cache\n\n accepts_attention_mask = \"attention_mask\" in set(inspect.signature(self.forward).parameters.keys())\n requires_attention_mask = \"encoder_outputs\" not in model_kwargs\n\n if model_kwargs.get(\"attention_mask\", None) is None and requires_attention_mask and accepts_attention_mask:\n model_kwargs[\"attention_mask\"] = self._prepare_attention_mask_for_generation(\n inputs_tensor, generation_config.pad_token_id, generation_config.eos_token_id\n )\n\n # decoder-only models should use left-padding for generation\n if not self.config.is_encoder_decoder:\n # If `input_ids` was given, check if the last id in any sequence is `pad_token_id`\n # Note: If using, `inputs_embeds` this check does not work, because we want to be more hands-off.\n if (\n generation_config.pad_token_id is not None\n and len(inputs_tensor.shape) == 2\n and torch.sum(inputs_tensor[:, -1] == generation_config.pad_token_id) > 0\n ):\n logger.warning(\n \"A decoder-only architecture is being used, but right-padding was detected! For correct \"\n \"generation results, please set `padding_side='left'` when initializing the tokenizer.\"\n )\n\n if self.config.is_encoder_decoder and \"encoder_outputs\" not in model_kwargs:\n # if model is encoder decoder encoder_outputs are created\n # and added to `model_kwargs`\n model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(\n inputs_tensor, model_kwargs, model_input_name\n )\n\n # 5. Prepare `input_ids` which will be used for auto-regressive generation\n if self.config.is_encoder_decoder:\n input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation(\n batch_size=batch_size,\n model_input_name=model_input_name,\n model_kwargs=model_kwargs,\n decoder_start_token_id=generation_config.decoder_start_token_id,\n bos_token_id=generation_config.bos_token_id,\n device=inputs_tensor.device,\n )\n else:\n input_ids = inputs_tensor if model_input_name == \"input_ids\" else model_kwargs.pop(\"input_ids\")\n\n if streamer is not None:\n streamer.put(input_ids.cpu())\n\n # 6. Prepare `max_length` depending on other stopping criteria.\n input_ids_length = input_ids.shape[-1]\n has_default_max_length = kwargs.get(\"max_length\") is None and generation_config.max_length is not None\n if generation_config.max_new_tokens is not None:\n if not has_default_max_length:\n logger.warning(\n f\"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=\"\n f\"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. \"\n \"Please refer to the documentation for more information. \"\n \"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)\"\n )\n generation_config.max_length = generation_config.max_new_tokens + input_ids_length\n\n # 7. determine generation mode\n generation_mode = self._get_generation_mode(generation_config, assistant_model)\n\n if streamer is not None and (generation_config.num_beams > 1):\n raise ValueError(\n \"`streamer` cannot be used with beam search (yet!). Make sure that `num_beams` is set to 1.\"\n )\n\n if self.device.type != input_ids.device.type:\n warnings.warn(\n \"You are calling .generate() with the `input_ids` being on a device type different\"\n f\" than your model's device. `input_ids` is on {input_ids.device.type}, whereas the model\"\n f\" is on {self.device.type}. You may experience unexpected behaviors or slower generation.\"\n \" Please make sure that you have put `input_ids` to the\"\n f\" correct device by calling for example input_ids = input_ids.to('{self.device.type}') before\"\n \" running `.generate()`.\",\n UserWarning,\n )\n\n # 8. prepare distribution pre_processing samplers\n logits_processor = self._get_logits_processor(\n generation_config=generation_config,\n input_ids_seq_length=input_ids_length,\n encoder_input_ids=inputs_tensor,\n prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,\n logits_processor=logits_processor,\n )\n\n # 9. prepare stopping criteria\n stopping_criteria = self._get_stopping_criteria(\n generation_config=generation_config, stopping_criteria=stopping_criteria\n )\n\n decoding_kwargs = generation_config.decoding_kwargs if hasattr(generation_config, 'decoding_kwargs') else {}\n decoding_kwargs['generation_mode'] = generation_mode\n decoding_kwargs['do_sample'] = generation_config.do_sample\n decoding_kwargs['inputs_embeds_position'] = generation_config.inputs_embeds_position if hasattr(generation_config, 'inputs_embeds_position') else 0\n decoding_kwargs['max_length'] = generation_config.max_length\n if generation_mode == GenerationMode.LOOKAHEAD_GENERATION:\n decoding_length = decoding_kwargs.get('decoding_length', 64)\n decoding_kwargs['decoding_max_length'] = generation_config.max_length + decoding_length + 1\n else:\n decoding_kwargs['decoding_max_length'] = generation_config.max_length\n model_kwargs['decoding_kwargs'] = decoding_kwargs\n\n # 10. go into different generation modes\n if generation_mode == GenerationMode.ASSISTED_GENERATION:\n if generation_config.num_return_sequences > 1:\n raise ValueError(\n \"num_return_sequences has to be 1 when doing assisted generate, \"\n f\"but is {generation_config.num_return_sequences}.\"\n )\n if batch_size > 1:\n raise ValueError(\"assisted generate is only supported for batch_size = 1\")\n if not model_kwargs[\"use_cache\"]:\n raise ValueError(\"assisted generate requires `use_cache=True`\")\n\n # 11. If the assistant model is an encoder-decoder, prepare its encoder outputs\n if assistant_model.config.is_encoder_decoder:\n assistant_model_kwargs = copy.deepcopy(model_kwargs)\n inputs_tensor, model_input_name, assistant_model_kwargs = assistant_model._prepare_model_inputs(\n inputs_tensor, assistant_model.generation_config.bos_token_id, assistant_model_kwargs\n )\n assistant_model_kwargs = assistant_model._prepare_encoder_decoder_kwargs_for_generation(\n inputs_tensor, assistant_model_kwargs, model_input_name\n )\n model_kwargs[\"assistant_encoder_outputs\"] = assistant_model_kwargs[\"encoder_outputs\"]\n\n # 12. run assisted generate\n return self.assisted_decoding(\n input_ids,\n assistant_model=assistant_model,\n do_sample=generation_config.do_sample,\n logits_processor=logits_processor,\n logits_warper=self._get_logits_warper(generation_config) if generation_config.do_sample else None,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n streamer=streamer,\n **model_kwargs,\n )\n if generation_mode == GenerationMode.GREEDY_SEARCH:\n # 11. run greedy search\n return self.greedy_search(\n input_ids,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n streamer=streamer,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.LOOKAHEAD_GENERATION:\n # 11. run greedy search\n return self.lookahead_generation(\n input_ids,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n streamer=streamer,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.CONTRASTIVE_SEARCH:\n if not model_kwargs[\"use_cache\"]:\n raise ValueError(\"Contrastive search requires `use_cache=True`\")\n\n return self.contrastive_search(\n input_ids,\n top_k=generation_config.top_k,\n penalty_alpha=generation_config.penalty_alpha,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n streamer=streamer,\n sequential=generation_config.low_memory,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.SAMPLE:\n # 11. prepare logits warper\n logits_warper = self._get_logits_warper(generation_config)\n\n # 12. expand input_ids with `num_return_sequences` additional sequences per batch\n input_ids, model_kwargs = self._expand_inputs_for_generation(\n input_ids=input_ids,\n expand_size=generation_config.num_return_sequences,\n is_encoder_decoder=self.config.is_encoder_decoder,\n **model_kwargs,\n )\n\n # 13. run sample\n return self.sample(\n input_ids,\n logits_processor=logits_processor,\n logits_warper=logits_warper,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n streamer=streamer,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.BEAM_SEARCH:\n # 11. prepare beam search scorer\n beam_scorer = BeamSearchScorer(\n batch_size=batch_size,\n num_beams=generation_config.num_beams,\n device=inputs_tensor.device,\n length_penalty=generation_config.length_penalty,\n do_early_stopping=generation_config.early_stopping,\n num_beam_hyps_to_keep=generation_config.num_return_sequences,\n max_length=generation_config.max_length,\n )\n # 12. interleave input_ids with `num_beams` additional sequences per batch\n input_ids, model_kwargs = self._expand_inputs_for_generation(\n input_ids=input_ids,\n expand_size=generation_config.num_beams,\n is_encoder_decoder=self.config.is_encoder_decoder,\n **model_kwargs,\n )\n # 13. run beam search\n return self.beam_search(\n input_ids,\n beam_scorer,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.BEAM_SAMPLE:\n # 11. prepare logits warper\n logits_warper = self._get_logits_warper(generation_config)\n\n # 12. prepare beam search scorer\n beam_scorer = BeamSearchScorer(\n batch_size=batch_size,\n num_beams=generation_config.num_beams,\n device=inputs_tensor.device,\n length_penalty=generation_config.length_penalty,\n do_early_stopping=generation_config.early_stopping,\n num_beam_hyps_to_keep=generation_config.num_return_sequences,\n max_length=generation_config.max_length,\n )\n\n # 13. interleave input_ids with `num_beams` additional sequences per batch\n input_ids, model_kwargs = self._expand_inputs_for_generation(\n input_ids=input_ids,\n expand_size=generation_config.num_beams,\n is_encoder_decoder=self.config.is_encoder_decoder,\n **model_kwargs,\n )\n\n # 14. run beam sample\n return self.beam_sample(\n input_ids,\n beam_scorer,\n logits_processor=logits_processor,\n logits_warper=logits_warper,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.GROUP_BEAM_SEARCH:\n # 11. prepare beam search scorer\n beam_scorer = BeamSearchScorer(\n batch_size=batch_size,\n num_beams=generation_config.num_beams,\n device=inputs_tensor.device,\n length_penalty=generation_config.length_penalty,\n do_early_stopping=generation_config.early_stopping,\n num_beam_hyps_to_keep=generation_config.num_return_sequences,\n num_beam_groups=generation_config.num_beam_groups,\n max_length=generation_config.max_length,\n )\n # 12. interleave input_ids with `num_beams` additional sequences per batch\n input_ids, model_kwargs = self._expand_inputs_for_generation(\n input_ids=input_ids,\n expand_size=generation_config.num_beams,\n is_encoder_decoder=self.config.is_encoder_decoder,\n **model_kwargs,\n )\n # 13. run beam search\n return self.group_beam_search(\n input_ids,\n beam_scorer,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.CONSTRAINED_BEAM_SEARCH:\n final_constraints = []\n if generation_config.constraints is not None:\n final_constraints = generation_config.constraints\n\n if generation_config.force_words_ids is not None:\n\n def typeerror():\n raise ValueError(\n \"`force_words_ids` has to either be a `List[List[List[int]]]` or `List[List[int]]`\"\n f\"of positive integers, but is {generation_config.force_words_ids}.\"\n )\n\n if (\n not isinstance(generation_config.force_words_ids, list)\n or len(generation_config.force_words_ids) == 0\n ):\n typeerror()\n\n for word_ids in generation_config.force_words_ids:\n if isinstance(word_ids[0], list):\n if not isinstance(word_ids, list) or len(word_ids) == 0:\n typeerror()\n if any(not isinstance(token_ids, list) for token_ids in word_ids):\n typeerror()\n if any(\n any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids)\n for token_ids in word_ids\n ):\n typeerror()\n\n constraint = DisjunctiveConstraint(word_ids)\n else:\n if not isinstance(word_ids, list) or len(word_ids) == 0:\n typeerror()\n if any((not isinstance(token_id, int) or token_id < 0) for token_id in word_ids):\n typeerror()\n\n constraint = PhrasalConstraint(word_ids)\n final_constraints.append(constraint)\n\n # 11. prepare beam search scorer\n constrained_beam_scorer = ConstrainedBeamSearchScorer(\n constraints=final_constraints,\n batch_size=batch_size,\n num_beams=generation_config.num_beams,\n device=inputs_tensor.device,\n length_penalty=generation_config.length_penalty,\n do_early_stopping=generation_config.early_stopping,\n num_beam_hyps_to_keep=generation_config.num_return_sequences,\n max_length=generation_config.max_length,\n )\n # 12. interleave input_ids with `num_beams` additional sequences per batch\n input_ids, model_kwargs = self._expand_inputs_for_generation(\n input_ids=input_ids,\n expand_size=generation_config.num_beams,\n is_encoder_decoder=self.config.is_encoder_decoder,\n **model_kwargs,\n )\n # 13. run beam search\n return self.constrained_beam_search(\n input_ids,\n constrained_beam_scorer=constrained_beam_scorer,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n **model_kwargs,\n )\n\n def lookahead_prepare_inputs_for_generation(self,\n input_ids,\n past_key_values=None,\n attention_mask=None,\n inputs_embeds=None,\n **kwargs):\n position_ids = kwargs.get(\"position_ids\", None)\n\n decoding_kwargs = kwargs.get('decoding_kwargs', {})\n decoding_length = decoding_kwargs.get('decoding_length', 64)\n branch_length = decoding_kwargs.get('branch_length', 12)\n decoding_mode = decoding_kwargs.get('decoding_mode', 'hier')\n max_length = decoding_kwargs.get('max_length', 2048)\n update_branch_length = min(branch_length, max_length - input_ids.size(-1))\n assert update_branch_length > 0, f'{branch_length=} {max_length=} {input_ids.size(-1)=} {update_branch_length=}'\n\n if past_key_values is None:\n if inputs_embeds is not None and input_ids is not None:\n model_inputs = {\"inputs_embeds\": inputs_embeds, \"input_ids\": input_ids}\n length = input_ids.size(1)\n elif input_ids is not None:\n model_inputs = {\"input_ids\": input_ids}\n length = input_ids.size(1)\n elif inputs_embeds is not None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n length = input_ids.size(1)\n else:\n raise ValueError('either input_ids or inputs_embeds is not None')\n update_attention_mask = attention_mask[:, :, :length, :length]\n\n model_inputs.update(\n {\"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": update_attention_mask,\n \"decoding_kwargs\": decoding_kwargs\n })\n\n if position_ids is not None:\n model_inputs[\"position_ids\"] = self._get_position_ids(position_ids, encoding=True, length=length)\n\n else:\n decoding_qids = input_ids[0, -2:].tolist()\n # decoding_qids = decoding_kwargs['input_id_list'][0][-2:]\n min_input_size = 0\n min_output_size = max(decoding_length // 2, 1)\n\n if decoding_mode in ('hier', 'par', 'one'):\n decoding_mode = decoding_mode + '_mix'\n fmt, mode = decoding_mode.split('_')\n method_name = fmt + '_get'\n\n decoding_ids, decoding_masks, sizes = getattr(self.lookahead_cache, method_name)(decoding_qids,\n decoding_length=decoding_length,\n branch_length=update_branch_length,\n min_input_size=min_input_size,\n min_output_size=min_output_size,\n mode=mode,\n idx=0)\n\n decoding_input_ids = torch.tensor([decoding_ids], dtype=torch.long, device=input_ids.device)\n prefix_length = input_ids.size(-1) - 1\n fresh_length = len(decoding_ids)\n ppl = prefix_length + fresh_length\n assert ppl <= attention_mask.size(2), \\\n f'{max_length=} {update_branch_length=} {prefix_length=} {fresh_length=} {attention_mask.shape=}'\n prefix_mask_tensor = attention_mask[:, :, prefix_length:ppl, :prefix_length]\n decoding_mask_tensor = torch.from_numpy(decoding_masks[None, None]).to(\n dtype=attention_mask.dtype, device=attention_mask.device)\n decoding_attention_mask = torch.cat([prefix_mask_tensor, decoding_mask_tensor], dim=3)\n\n decoding_kwargs.update({'decoding_qids': decoding_qids,\n 'decoding_ids': decoding_ids,\n 'decoding_masks': decoding_masks,\n 'sizes': sizes,\n })\n model_inputs = {'decoding_kwargs': decoding_kwargs}\n\n model_inputs.update(\n {\n \"input_ids\": decoding_input_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": decoding_attention_mask\n }\n )\n if position_ids is not None:\n indices = torch.sum(decoding_attention_mask, dim=3).squeeze(1)[0]\n model_inputs[\"position_ids\"] = self._get_position_ids(position_ids, indices=indices, encoding=False)\n\n return model_inputs\n\n def _get_position_ids(self, full_position_ids, indices=None, length=None, encoding=True):\n if encoding:\n return full_position_ids[..., :length]\n else:\n return full_position_ids[..., indices]\n\n def _lookahead_update_model_kwargs_for_generation(\n self,\n outputs: ModelOutput,\n model_kwargs: Dict[str, Any],\n is_encoder_decoder: bool = False,\n standardize_cache_format: bool = False,\n logits_processor: Optional[LogitsProcessorList] = None,\n input_ids: Optional[torch.Tensor] = None,\n ) -> Dict[str, Any]:\n # update past_key_values\n model_kwargs[\"past_key_values\"] = self._extract_past_from_model_output(\n outputs, standardize_cache_format=standardize_cache_format\n )\n\n decoding_kwargs = model_kwargs['decoding_kwargs']\n decoding_ids = decoding_kwargs.get('decoding_ids', [])\n if len(decoding_ids) <= 1:\n next_token_logits = outputs.logits[:, -1:, :]\n # pre-process distribution\n # next_tokens_scores = logits_processor(input_ids, next_token_logits)\n bs, nt, nv = next_token_logits.shape\n next_tokens_scores = logits_processor(input_ids, next_token_logits.squeeze(1)).unsqueeze(1)\n\n if decoding_kwargs.get('do_sample', False):\n probs = nn.functional.softmax(next_tokens_scores, dim=-1)\n next_tokens = torch.multinomial(probs.view(bs * nt, nv), num_samples=1).view(bs, nt)\n else:\n next_tokens = torch.argmax(next_tokens_scores, dim=-1, keepdim=False).long()\n model_kwargs['next_tokens'] = next_tokens\n model_kwargs['next_tokens_scores'] = next_tokens_scores\n next_token_list = next_tokens.tolist()\n model_kwargs['next_token_list'] = next_token_list\n decoding_kwargs['input_id_list'][0].extend(next_token_list[0])\n decoding_kwargs['dls'].append(1)\n decoding_kwargs['edls'].append(1)\n if decoding_kwargs.get('debug_lookahead', False):\n decoding_qids = decoding_kwargs.get('decoding_qids', [])\n print(f'size:0 query:{decoding_qids} next_token:{next_token_list[0]}')\n else:\n # TODO: accurate logit_processor\n # next_tokens_scores = logits_processor(input_ids, outputs.logits)\n bs, nt, nv = outputs.logits.shape\n next_tokens_scores = logits_processor(input_ids.repeat(1, nt).view(bs * nt, -1),\n outputs.logits.view(bs * nt, -1)).view(bs, nt, -1)\n\n if decoding_kwargs.get('do_sample', False):\n probs = nn.functional.softmax(next_tokens_scores, dim=-1)\n bs, nt, nv = probs.shape\n next_tokens = torch.multinomial(probs.view(bs * nt, nv), num_samples=1).view(bs, nt)\n else:\n next_tokens = torch.argmax(next_tokens_scores, dim=-1, keepdim=False).long()\n\n next_token_list = next_tokens.tolist()[0]\n decoding_ids = decoding_kwargs['decoding_ids'][1:]\n decoding_mask = decoding_kwargs['decoding_masks']\n sizes = decoding_kwargs['sizes']\n\n max_match_index = 0\n max_match_count = 0\n max_decoding_ids_slice = None\n max_next_token_slice = None\n \n for i in range(len(decoding_ids)):\n mask_indices = np.nonzero(decoding_mask[i + 1, 1:])[0]\n decoding_ids_slice = [decoding_ids[j] for j in mask_indices] \n next_token_slice = [next_token_list[0]] + [next_token_list[j + 1] for j in mask_indices]\n \n c = len(decoding_ids_slice)\n for j, p in enumerate(decoding_ids_slice):\n if next_token_slice[j] != p:\n c = j\n break\n if c > max_match_count:\n max_match_count = c\n max_match_index = i\n if c >= max_match_count:\n max_decoding_ids_slice = decoding_ids_slice\n max_next_token_slice = next_token_slice\n # if decoding_kwargs['eos'] in decoding_ids:\n # max_match_count = 0\n\n prefix_plus_count = input_ids.size(-1)\n match_idx = np.nonzero(decoding_mask[max_match_index + 1, 1:])[0][:max_match_count]\n if len(decoding_ids) != max_match_count:\n past = model_kwargs[\"past_key_values\"]\n device = past[0][0].device\n kv_idx = torch.tensor(match_idx + prefix_plus_count, dtype=torch.long, device=device)\n model_kwargs[\"past_key_values\"] = self._update_cache(past,\n kv_idx,\n prefix_and_next_count=prefix_plus_count,\n max_match_count=max_match_count,\n max_match_index=max_match_index)\n\n next_token_list = [next_token_list[0:1] + [next_token_list[x + 1] for x in match_idx]]\n next_tokens = torch.tensor(next_token_list, dtype=torch.long, device=input_ids.device)\n model_kwargs['next_tokens'] = next_tokens\n model_kwargs['next_token_list'] = next_token_list\n decoding_kwargs['input_id_list'][0].extend(next_token_list[0])\n decoding_kwargs['dls'].append(len(decoding_ids))\n decoding_kwargs['edls'].append(max_match_count + 1)\n if decoding_kwargs.get('debug_lookahead', False):\n lengths = np.sum(decoding_mask, axis=1) - 1\n l = np.concatenate([lengths[:-1][(lengths[1:] - lengths[:-1]) <= 0], lengths[-1:]], axis=0)\n ls = ','.join(l.astype(np.str_))\n decoding_qids = decoding_kwargs['decoding_qids']\n size_str = ','.join([str(x) for x in sizes])\n print(\n f'decoding_length:{len(decoding_ids)+1} accept_length:{max_match_count+1} '\n f'query:{decoding_qids} source:{size_str} lengths:{ls} index:{max_match_index} '\n f'branch_token:{max_decoding_ids_slice} next_token:{max_next_token_slice}')\n\n return model_kwargs\n\n def _update_cache(self, past_key_values, kv_idx, prefix_and_next_count=None, max_match_count=None,\n max_match_index=None):\n update_past_key_values = []\n for k, v in past_key_values:\n if max_match_index + 1 == max_match_count:\n k = k[:, :, :prefix_and_next_count + max_match_count]\n v = v[:, :, :prefix_and_next_count + max_match_count]\n else:\n k = torch.concat([k[:, :, :prefix_and_next_count], k[:, :, kv_idx]], 2)\n v = torch.concat([v[:, :, :prefix_and_next_count], v[:, :, kv_idx]], 2)\n update_past_key_values.append((k, v))\n return tuple(update_past_key_values)\n\n def lookahead_generation(\n self,\n input_ids: torch.LongTensor,\n logits_processor: Optional[LogitsProcessorList] = None,\n stopping_criteria: Optional[StoppingCriteriaList] = None,\n max_length: Optional[int] = None,\n pad_token_id: Optional[int] = None,\n eos_token_id: Optional[Union[int, List[int]]] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n output_scores: Optional[bool] = None,\n return_dict_in_generate: Optional[bool] = None,\n synced_gpus: bool = False,\n streamer: Optional[\"BaseStreamer\"] = None,\n **model_kwargs,\n ) -> Union[GreedySearchOutput, torch.LongTensor]:\n r\"\"\"\n Generates sequences of token ids for models with a language modeling head using **greedy decoding** and can be\n used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.\n\n <Tip warning={true}>\n\n In most cases, you do not need to call [`~generation.GenerationMixin.greedy_search`] directly. Use generate()\n instead. For an overview of generation strategies and code examples, check the [following\n guide](../generation_strategies).\n\n </Tip>\n\n\n Parameters:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n The sequence used as a prompt for the generation.\n logits_processor (`LogitsProcessorList`, *optional*):\n An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]\n used to modify the prediction scores of the language modeling head applied at each generation step.\n stopping_criteria (`StoppingCriteriaList`, *optional*):\n An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]\n used to tell if the generation loop should stop.\n\n max_length (`int`, *optional*, defaults to 20):\n **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated\n tokens. The maximum length of the sequence to be generated.\n pad_token_id (`int`, *optional*):\n The id of the *padding* token.\n eos_token_id (`Union[int, List[int]]`, *optional*):\n The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.\n output_attentions (`bool`, *optional*, defaults to `False`):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more details.\n output_hidden_states (`bool`, *optional*, defaults to `False`):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more details.\n output_scores (`bool`, *optional*, defaults to `False`):\n Whether or not to return the prediction scores. See `scores` under returned tensors for more details.\n return_dict_in_generate (`bool`, *optional*, defaults to `False`):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n synced_gpus (`bool`, *optional*, defaults to `False`):\n Whether to continue running the while loop until max_length (needed for ZeRO stage 3)\n streamer (`BaseStreamer`, *optional*):\n Streamer object that will be used to stream the generated sequences. Generated tokens are passed\n through `streamer.put(token_ids)` and the streamer is responsible for any further processing.\n model_kwargs:\n Additional model specific keyword arguments will be forwarded to the `forward` function of the model.\n If model is an encoder-decoder model the kwargs should include `encoder_outputs`.\n\n Return:\n [`~generation.GreedySearchDecoderOnlyOutput`], [`~generation.GreedySearchEncoderDecoderOutput`] or\n `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a\n [`~generation.GreedySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and\n `return_dict_in_generate=True` or a [`~generation.GreedySearchEncoderDecoderOutput`] if\n `model.config.is_encoder_decoder=True`.\n\n Examples:\n\n ```python\n >>> from transformers import (\n ... AutoTokenizer,\n ... AutoModelForCausalLM,\n ... LogitsProcessorList,\n ... MinLengthLogitsProcessor,\n ... StoppingCriteriaList,\n ... MaxLengthCriteria,\n ... )\n\n >>> tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n >>> model = AutoModelForCausalLM.from_pretrained(\"gpt2\")\n\n >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token\n >>> model.generation_config.pad_token_id = model.generation_config.eos_token_id\n\n >>> input_prompt = \"It might be possible to\"\n >>> input_ids = tokenizer(input_prompt, return_tensors=\"pt\").input_ids\n\n >>> # instantiate logits processors\n >>> logits_processor = LogitsProcessorList(\n ... [\n ... MinLengthLogitsProcessor(10, eos_token_id=model.generation_config.eos_token_id),\n ... ]\n ... )\n >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)])\n\n >>> outputs = model.greedy_search(\n ... input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria\n ... )\n\n >>> tokenizer.batch_decode(outputs, skip_special_tokens=True)\n [\"It might be possible to get a better understanding of the nature of the problem, but it's not\"]\n ```\"\"\"\n # init values\n\n if not hasattr(self, 'lookahead_cache'):\n self.lookahead_cache = LookaheadCache()\n\n logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()\n stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()\n if max_length is not None:\n warnings.warn(\n \"`max_length` is deprecated in this function, use\"\n \" `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.\",\n UserWarning,\n )\n stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)\n pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id\n eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id\n if isinstance(eos_token_id, int):\n eos_token_id = [eos_token_id]\n eos_token_id_tensor = torch.tensor(eos_token_id, device=input_ids.device) if eos_token_id is not None else None\n output_scores = output_scores if output_scores is not None else self.generation_config.output_scores\n output_attentions = (\n output_attentions if output_attentions is not None else self.generation_config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states\n )\n return_dict_in_generate = (\n return_dict_in_generate\n if return_dict_in_generate is not None\n else self.generation_config.return_dict_in_generate\n )\n\n # init attention / hidden states / scores tuples\n scores = () if (return_dict_in_generate and output_scores) else None\n decoder_attentions = () if (return_dict_in_generate and output_attentions) else None\n cross_attentions = () if (return_dict_in_generate and output_attentions) else None\n decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None\n\n # if model is an encoder-decoder, retrieve encoder attention weights and hidden states\n if return_dict_in_generate and self.config.is_encoder_decoder:\n encoder_attentions = model_kwargs[\"encoder_outputs\"].get(\"attentions\") if output_attentions else None\n encoder_hidden_states = (\n model_kwargs[\"encoder_outputs\"].get(\"hidden_states\") if output_hidden_states else None\n )\n\n decoding_kwargs = model_kwargs['decoding_kwargs']\n decoding_kwargs.update({\n 'eos': eos_token_id[0] if eos_token_id is not None else 2,\n 'edls': [],\n 'dls': [],\n 'fts': []\n })\n\n decoding_length = decoding_kwargs.get('decoding_length', 64)\n stop_max_length = stopping_criteria.max_length\n decoding_max_length = stop_max_length + decoding_length + 1\n attention_mask = model_kwargs.get('attention_mask', None)\n input_device = input_ids.device\n if attention_mask is None:\n bs = input_ids.size(0)\n full_attention_mask = torch.tril(\n torch.ones((bs, 1, decoding_max_length, decoding_max_length), dtype=torch.long, device=input_device),\n 0)\n elif len(attention_mask.shape) == 2:\n # from [bs, src_len] to [bs,1,max_len,max_len]\n bs, src_len = attention_mask.shape\n pad_len = decoding_max_length - src_len\n attention_mask = attention_mask.long()\n if pad_len > 0:\n pad_mask = torch.ones((bs, pad_len), dtype=torch.long, device=attention_mask.device)\n attention_mask = torch.cat([attention_mask, pad_mask], 1)\n full_attention_mask = torch.tril(attention_mask[:, None, None].expand(-1, -1, decoding_max_length, -1), 0)\n elif len(attention_mask.shape) == 4:\n bs, _, src_len, tgt_len = attention_mask.shape\n attention_mask = attention_mask.long()\n if src_len < decoding_max_length or tgt_len < decoding_max_length:\n full_attention_mask = torch.tril(\n torch.ones((bs, 1, decoding_max_length, decoding_max_length), dtype=torch.long,\n device=input_device),\n 0)\n full_attention_mask[:, :, :src_len, :tgt_len] = attention_mask\n else:\n full_attention_mask = attention_mask\n else:\n raise ValueError(f'unsupport attention_mask.shape:{attention_mask.shape}')\n model_kwargs['attention_mask'] = full_attention_mask\n decoding_kwargs['max_length'] = stop_max_length\n decoding_kwargs['decoding_max_length'] = decoding_max_length\n\n # keep track of which sequences are already finished\n unfinished_sequences = torch.ones(input_ids.shape[0], dtype=torch.long, device=input_ids.device)\n\n assert input_ids.size(0) == 1\n input_id_list = input_ids[0].tolist()\n decoding_kwargs['input_id_list'] = [input_id_list]\n branch_length = decoding_kwargs.get('branch_length', 12)\n self.lookahead_cache.put(input_id_list[1:], branch_length=branch_length + 1, mode='input', idx=0)\n ts = time.time()\n\n this_peer_finished = False # used by synced_gpus only\n while True:\n if synced_gpus:\n # Under synced_gpus the `forward` call must continue until all gpus complete their sequence.\n # The following logic allows an early break if all peers finished generating their sequence\n this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(input_ids.device)\n # send 0.0 if we finished, 1.0 otherwise\n dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)\n # did all peers finish? the reduced sum will be 0.0 then\n if this_peer_finished_flag.item() == 0.0:\n break\n\n # prepare model inputs\n model_inputs = self.lookahead_prepare_inputs_for_generation(input_ids, **model_kwargs)\n decoding_kwargs = model_inputs.pop('decoding_kwargs', {})\n\n # forward pass to get next token\n outputs = self(\n **model_inputs,\n return_dict=True,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n )\n\n if synced_gpus and this_peer_finished:\n continue # don't waste resources running the code we don't need\n\n model_kwargs['decoding_kwargs'] = decoding_kwargs\n model_kwargs = self._lookahead_update_model_kwargs_for_generation(\n outputs,\n model_kwargs,\n is_encoder_decoder=self.config.is_encoder_decoder,\n input_ids=input_ids,\n logits_processor=logits_processor\n )\n\n next_tokens = model_kwargs['next_tokens']\n next_tokens_scores = model_kwargs['next_tokens_scores']\n next_token_list = model_kwargs['next_token_list']\n\n # finished sentences should have their next token be a padding token\n if eos_token_id is not None:\n if pad_token_id is None:\n raise ValueError(\"If `eos_token_id` is defined, make sure that `pad_token_id` is defined.\")\n next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences)\n\n # update generated ids, model inputs, and length for next step\n input_ids = torch.cat([input_ids, next_tokens], dim=-1)\n if streamer is not None:\n streamer.put(next_token_list)\n\n self.lookahead_cache.stream_put(next_token_list[0], branch_length=branch_length + 1, final=False,\n mode='output', idx=0)\n\n # Store scores, attentions and hidden_states when required\n if return_dict_in_generate:\n if output_scores:\n scores += (next_tokens_scores,)\n if output_attentions:\n decoder_attentions += (\n (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)\n )\n if self.config.is_encoder_decoder:\n cross_attentions += (outputs.cross_attentions,)\n\n if output_hidden_states:\n decoder_hidden_states += (\n (outputs.decoder_hidden_states,)\n if self.config.is_encoder_decoder\n else (outputs.hidden_states,)\n )\n\n # if eos_token was found in one sentence, set sentence to finished\n if eos_token_id_tensor is not None:\n # unfinished_sequences = unfinished_sequences.mul(\n # next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0)\n # )\n unfinished_sequences = unfinished_sequences.mul(\n next_tokens[:, :, None].ne(eos_token_id_tensor).prod(dim=2).prod(dim=1))\n\n # stop when each sentence is finished\n if unfinished_sequences.max() == 0:\n this_peer_finished = True\n\n # stop if we exceed the maximum length\n if stopping_criteria(input_ids, scores):\n this_peer_finished = True\n\n te = time.time()\n model_kwargs['decoding_kwargs']['fts'].append(te - ts)\n ts = te\n if this_peer_finished and not synced_gpus:\n self.lookahead_cache.stream_put([], branch_length=branch_length + 1, final=True,\n mode='output', idx=0)\n break\n\n if streamer is not None:\n streamer.end()\n\n if return_dict_in_generate:\n if self.config.is_encoder_decoder:\n return GreedySearchEncoderDecoderOutput(\n sequences=input_ids,\n scores=scores,\n encoder_attentions=encoder_attentions,\n encoder_hidden_states=encoder_hidden_states,\n decoder_attentions=decoder_attentions,\n cross_attentions=cross_attentions,\n decoder_hidden_states=decoder_hidden_states,\n )\n else:\n kwargs = {'dls': model_kwargs['decoding_kwargs']['dls'],\n 'edls': model_kwargs['decoding_kwargs']['edls'],\n 'fts': model_kwargs['decoding_kwargs']['fts']}\n return LookaheadDecoderOnlyOutput(\n sequences=input_ids,\n scores=scores,\n attentions=decoder_attentions,\n hidden_states=decoder_hidden_states,\n kwargs=kwargs\n )\n else:\n return input_ids\n\n def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]):\n \"\"\"Validates model kwargs for generation. Generate argument typos will also be caught here.\"\"\"\n # Excludes arguments that are handled before calling any model function\n if self.config.is_encoder_decoder:\n for key in [\"decoder_input_ids\"]:\n model_kwargs.pop(key, None)\n\n unused_model_args = []\n model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters)\n # `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If\n # `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;)\n if \"kwargs\" in model_args or \"model_kwargs\" in model_args:\n model_args |= set(inspect.signature(self.forward).parameters)\n\n # Encoder-Decoder models may also need Encoder arguments from `model_kwargs`\n if self.config.is_encoder_decoder:\n base_model = getattr(self, self.base_model_prefix, None)\n\n # allow encoder kwargs\n encoder = getattr(self, \"encoder\", None)\n # `MusicgenForConditionalGeneration` has `text_encoder` and `audio_encoder`.\n # Also, it has `base_model_prefix = \"encoder_decoder\"` but there is no `self.encoder_decoder`\n # TODO: A better way to handle this.\n if encoder is None and base_model is not None:\n encoder = getattr(base_model, \"encoder\", None)\n\n if encoder is not None:\n encoder_model_args = set(inspect.signature(encoder.forward).parameters)\n model_args |= encoder_model_args\n\n # allow decoder kwargs\n decoder = getattr(self, \"decoder\", None)\n if decoder is None and base_model is not None:\n decoder = getattr(base_model, \"decoder\", None)\n\n if decoder is not None:\n decoder_model_args = set(inspect.signature(decoder.forward).parameters)\n model_args |= {f\"decoder_{x}\" for x in decoder_model_args}\n\n decoding_kwargs = ['decoding_kwargs','stop_words_ids']\n for key, value in model_kwargs.items():\n if value is not None and key not in model_args and key not in decoding_kwargs:\n unused_model_args.append(key)\n\n if unused_model_args:\n raise ValueError(\n f\"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the\"\n \" generate arguments will also show up in this list)\"\n )" }, { "identifier": "QWenConfig", "path": "pia/lookahead/models/qwen/configuration_qwen.py", "snippet": "class QWenConfig(PretrainedConfig):\n model_type = \"qwen\"\n keys_to_ignore_at_inference = [\"past_key_values\"]\n\n def __init__(\n self,\n vocab_size=151936,\n hidden_size=4096,\n num_hidden_layers=32,\n num_attention_heads=32,\n emb_dropout_prob=0.0,\n attn_dropout_prob=0.0,\n layer_norm_epsilon=1e-6,\n initializer_range=0.02,\n max_position_embeddings=8192,\n scale_attn_weights=True,\n use_cache=True,\n bf16=False,\n fp16=False,\n fp32=False,\n kv_channels=128,\n rotary_pct=1.0,\n rotary_emb_base=10000,\n use_dynamic_ntk=True,\n use_logn_attn=True,\n use_flash_attn=\"auto\",\n intermediate_size=22016,\n no_bias=True,\n tie_word_embeddings=False,\n **kwargs,\n ):\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.emb_dropout_prob = emb_dropout_prob\n self.attn_dropout_prob = attn_dropout_prob\n self.layer_norm_epsilon = layer_norm_epsilon\n self.initializer_range = initializer_range\n self.scale_attn_weights = scale_attn_weights\n self.use_cache = use_cache\n self.max_position_embeddings = max_position_embeddings\n self.bf16 = bf16\n self.fp16 = fp16\n self.fp32 = fp32\n self.kv_channels = kv_channels\n self.rotary_pct = rotary_pct\n self.rotary_emb_base = rotary_emb_base\n self.use_dynamic_ntk = use_dynamic_ntk\n self.use_logn_attn = use_logn_attn\n self.use_flash_attn = use_flash_attn\n self.no_bias = no_bias\n super().__init__(\n tie_word_embeddings=tie_word_embeddings,\n **kwargs\n )" }, { "identifier": "HistoryType", "path": "pia/lookahead/models/qwen/qwen_generation_utils.py", "snippet": "def pad_batch(batch: BatchTokensType, pad_id: int, seq_length: int) -> BatchTokensType:\ndef get_ltor_masks_and_position_ids(\n data,\n eod_token,\n reset_position_ids,\n reset_attention_mask,\n eod_mask_loss,\n):\ndef get_batch(context_tokens: torch.LongTensor, eod_id: int):\ndef get_stop_words_ids(chat_format, tokenizer):\ndef make_context(\n tokenizer: PreTrainedTokenizer,\n query: str,\n history: List[Tuple[str, str]] = None,\n system: str = \"\",\n max_window_size: int = 6144,\n chat_format: str = \"chatml\",\n):\n def _tokenize_str(role, content):\ndef _decode_default(\n tokens: List[int],\n *,\n stop_words: List[str],\n eod_words: List[str],\n tokenizer: PreTrainedTokenizer,\n raw_text_len: int,\n verbose: bool = False,\n return_end_reason: bool = False,\n errors: str = 'replace',\n):\ndef _decode_chatml(\n tokens: List[int],\n *,\n stop_words: List[str],\n eod_token_ids: List[int],\n tokenizer: PreTrainedTokenizer,\n raw_text_len: int,\n context_length: int,\n verbose: bool = False,\n return_end_reason: bool = False,\n errors: str = 'replace'\n):\ndef decode_tokens(\n tokens: Union[torch.LongTensor, TokensType],\n tokenizer: PreTrainedTokenizer,\n raw_text_len: int,\n context_length: int,\n chat_format: str,\n verbose: bool = False,\n return_end_reason: bool = False,\n errors: str = \"replace\",\n) -> str:\n def __init__(self, stop_words_ids: Iterable[Iterable[int]], eos_token_id: int):\n def __call__(\n self, input_ids: torch.LongTensor, scores: torch.FloatTensor\n ) -> torch.FloatTensor:\n def _tokens_match(self, prev_tokens: torch.LongTensor, tokens: List[int]) -> bool:\n def _calc_stopped_samples(self, prev_input_ids: Iterable[int]) -> Iterable[int]:\ndef top_k_logits(logits, top_k=0, top_p=0.0, filter_value=-float(\"Inf\")):\ndef switch(val1, val2, boolean):\nclass StopWordsLogitsProcessor(LogitsProcessor):" } ]
import importlib import math import torch import torch.nn.functional as F import torch.utils.checkpoint import flash_attn from typing import TYPE_CHECKING, Optional, Tuple, Union, List, Any, Generator from torch.cuda.amp import autocast from torch.nn import CrossEntropyLoss from transformers import PreTrainedTokenizer, GenerationConfig from transformers.generation.logits_process import LogitsProcessorList from transformers.modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, ) from pia.lookahead.common.pretrained_model import LookaheadPreTrainedModel from transformers.utils import logging from einops import rearrange from torch import nn from .configuration_qwen import QWenConfig from .qwen_generation_utils import ( HistoryType, make_context, decode_tokens, get_stop_words_ids, StopWordsLogitsProcessor, ) from flash_attn.layers.rotary import apply_rotary_emb_func as __apply_rotary_emb_func from flash_attn.ops.rms_norm import rms_norm as __rms_norm from flash_attn.flash_attn_interface import flash_attn_unpadded_func as __flash_attn_unpadded_func from flash_attn.flash_attn_interface import flash_attn_varlen_func as __flash_attn_unpadded_func from flash_attn.flash_attn_interface import flash_attn_unpadded_func as __flash_attn_unpadded_func from transformers_stream_generator.main import NewGenerationMixin, StreamGenerationConfig from einops import rearrange from einops import rearrange
17,805
position_ids = position_ids[:, -1].unsqueeze(-1) else: position_ids = None if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "position_ids": position_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } ) return model_inputs def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: labels = labels.to(lm_logits.device) shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() loss = loss_fct( shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1) ) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @staticmethod def _reorder_cache( past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor ) -> Tuple[Tuple[torch.Tensor]]: return tuple( tuple( past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past ) for layer_past in past_key_values ) def chat( self, tokenizer: PreTrainedTokenizer, query: str, history: Optional[HistoryType], system: str = "You are a helpful assistant.", append_history: bool = True, stream: Optional[bool] = _SENTINEL, stop_words_ids: Optional[List[List[int]]] = None, generation_config: Optional[GenerationConfig] = None, **kwargs, ) -> Tuple[str, HistoryType]: generation_config = generation_config if generation_config is not None else self.generation_config assert stream is _SENTINEL, _ERROR_STREAM_IN_CHAT assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT if history is None: history = [] if stop_words_ids is None: stop_words_ids = [] max_window_size = kwargs.get('max_window_size', None) if max_window_size is None: max_window_size = generation_config.max_window_size
# Copyright (c) Alibaba Cloud. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. if TYPE_CHECKING: pass # from transformers.modeling_utils import PreTrainedModel try: except ImportError: rearrange = None SUPPORT_CUDA = torch.cuda.is_available() SUPPORT_BF16 = SUPPORT_CUDA and torch.cuda.is_bf16_supported() SUPPORT_FP16 = SUPPORT_CUDA and torch.cuda.get_device_capability(0)[0] >= 7 logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "qwen" _CONFIG_FOR_DOC = "QWenConfig" QWen_PRETRAINED_MODEL_ARCHIVE_LIST = ["qwen-7b"] _ERROR_BAD_CHAT_FORMAT = """\ We detect you are probably using the pretrained model (rather than chat model) for chatting, since the chat_format in generation_config is not "chatml". If you are directly using the model downloaded from Huggingface, please make sure you are using our "Qwen/Qwen-7B-Chat" Huggingface model (rather than "Qwen/Qwen-7B") when you call model.chat(). 我们检测到您可能在使用预训练模型(而非chat模型)进行多轮chat,因为您当前在generation_config指定的chat_format,并未设置为我们在对话中所支持的"chatml"格式。 如果您在直接使用我们从Huggingface提供的模型,请确保您在调用model.chat()时,使用的是"Qwen/Qwen-7B-Chat"模型(而非"Qwen/Qwen-7B"预训练模型)。 """ _SENTINEL = object() _ERROR_STREAM_IN_CHAT = """\ Pass argument `stream` to model.chat() is buggy, deprecated, and marked for removal. Please use model.chat_stream(...) instead of model.chat(..., stream=True). 向model.chat()传入参数stream的用法可能存在Bug,该用法已被废弃,将在未来被移除。请使用model.chat_stream(...)代替model.chat(..., stream=True)。 """ _ERROR_INPUT_CPU_QUERY_WITH_FLASH_ATTN_ACTIVATED = """\ We detect you have activated flash attention support, but running model computation on CPU. Please make sure that your input data has been placed on GPU. If you actually want to run CPU computation, please following the readme and set device_map="cpu" to disable flash attention when loading the model (calling AutoModelForCausalLM.from_pretrained). 检测到您的模型已激活了flash attention支持,但正在执行CPU运算任务。如使用flash attention,请您确认模型输入已经传到GPU上。如果您确认要执行CPU运算,请您在载入模型(调用AutoModelForCausalLM.from_pretrained)时,按照readme说法,指定device_map="cpu"以禁用flash attention。 """ apply_rotary_emb_func = None rms_norm = None flash_attn_unpadded_func = None def _import_flash_attn(): global apply_rotary_emb_func, rms_norm, flash_attn_unpadded_func try: apply_rotary_emb_func = __apply_rotary_emb_func except ImportError: logger.warn( "Warning: import flash_attn rotary fail, please install FlashAttention rotary to get higher efficiency " "https://github.com/Dao-AILab/flash-attention/tree/main/csrc/rotary" ) try: rms_norm = __rms_norm except ImportError: logger.warn( "Warning: import flash_attn rms_norm fail, please install FlashAttention layer_norm to get higher efficiency " "https://github.com/Dao-AILab/flash-attention/tree/main/csrc/layer_norm" ) try: if not hasattr(flash_attn, '__version__'): else: if int(flash_attn.__version__.split(".")[0]) >= 2: else: flash_attn_unpadded_func = __flash_attn_unpadded_func except ImportError: logger.warn( "Warning: import flash_attn fail, please install FlashAttention to get higher efficiency " "https://github.com/Dao-AILab/flash-attention" ) class FlashSelfAttention(torch.nn.Module): def __init__( self, causal=False, softmax_scale=None, attention_dropout=0.0, ): super().__init__() assert flash_attn_unpadded_func is not None, ( "Please install FlashAttention first, " "e.g., with pip install flash-attn" ) assert ( rearrange is not None ), "Please install einops first, e.g., with pip install einops" self.causal = causal self.softmax_scale = softmax_scale self.dropout_p = attention_dropout def forward(self, q, k, v): assert all((i.dtype in [torch.float16, torch.bfloat16] for i in (q, k, v))) assert all((i.is_cuda for i in (q, k, v))) batch_size, seqlen_q = q.shape[0], q.shape[1] seqlen_k = k.shape[1] q, k, v = [rearrange(x, "b s ... -> (b s) ...") for x in [q, k, v]] cu_seqlens_q = torch.arange( 0, (batch_size + 1) * seqlen_q, step=seqlen_q, dtype=torch.int32, device=q.device, ) if self.training: assert seqlen_k == seqlen_q is_causal = self.causal cu_seqlens_k = cu_seqlens_q else: is_causal = seqlen_q == seqlen_k cu_seqlens_k = torch.arange( 0, (batch_size + 1) * seqlen_k, step=seqlen_k, dtype=torch.int32, device=q.device, ) self.dropout_p = 0 output = flash_attn_unpadded_func( q, k, v, cu_seqlens_q, cu_seqlens_k, seqlen_q, seqlen_k, self.dropout_p, softmax_scale=self.softmax_scale, causal=is_causal, ) new_shape = (batch_size, output.shape[0] // batch_size) + output.shape[1:] output = output.view(new_shape) return output class QWenAttention(nn.Module): def __init__(self, config): super().__init__() self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False) self.seq_length = config.seq_length self.hidden_size = config.hidden_size self.split_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads # self.use_flash_attn = config.use_flash_attn self.use_flash_attn = False self.scale_attn_weights = True self.projection_size = config.kv_channels * config.num_attention_heads assert self.projection_size % config.num_attention_heads == 0 self.hidden_size_per_attention_head = ( self.projection_size // config.num_attention_heads ) self.c_attn = nn.Linear(config.hidden_size, 3 * self.projection_size) self.c_proj = nn.Linear( config.hidden_size, self.projection_size, bias=not config.no_bias ) self.is_fp32 = not (config.bf16 or config.fp16) if ( self.use_flash_attn and flash_attn_unpadded_func is not None and not self.is_fp32 ): self.core_attention_flash = FlashSelfAttention( causal=True, attention_dropout=config.attn_dropout_prob ) self.bf16 = config.bf16 self.use_dynamic_ntk = config.use_dynamic_ntk self.use_logn_attn = config.use_logn_attn logn_list = [ math.log(i, self.seq_length) if i > self.seq_length else 1 for i in range(1, 32768) ] self.logn_tensor = torch.tensor(logn_list)[None, :, None, None] self.attn_dropout = nn.Dropout(config.attn_dropout_prob) def _attn(self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None): attn_weights = torch.matmul(query, key.transpose(-1, -2)) if self.scale_attn_weights: attn_weights = attn_weights / torch.full( [], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device, ) query_length, key_length = query.size(-2), key.size(-2) causal_mask = registered_causal_mask[ :, :, key_length - query_length: key_length, :key_length ] mask_value = torch.finfo(attn_weights.dtype).min mask_value = torch.full([], mask_value, dtype=attn_weights.dtype).to( attn_weights.device ) attn_weights = torch.where( causal_mask, attn_weights.to(attn_weights.dtype), mask_value ) attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = attn_weights.type(value.dtype) attn_weights = self.attn_dropout(attn_weights) if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2) return attn_output, attn_weights def _upcast_and_reordered_attn( self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None ): bsz, num_heads, q_seq_len, dk = query.size() _, _, k_seq_len, _ = key.size() attn_weights = torch.empty( bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device, ) scale_factor = 1.0 if self.scale_attn_weights: scale_factor /= float(value.size(-1)) ** 0.5 with autocast(enabled=False): q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape( -1, dk, k_seq_len ) attn_weights = torch.baddbmm( attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor ) attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len) query_length, key_length = query.size(-2), key.size(-2) causal_mask = registered_causal_mask[ :, :, key_length - query_length: key_length, :key_length ] mask_value = torch.finfo(attn_weights.dtype).min mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to( attn_weights.device ) attn_weights = torch.where(causal_mask, attn_weights, mask_value) if attention_mask is not None: attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) if attn_weights.dtype != torch.float32: raise RuntimeError( "Error with upcasting, attn_weights does not have dtype torch.float32" ) attn_weights = attn_weights.type(value.dtype) attn_weights = self.attn_dropout(attn_weights) if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) return attn_output, attn_weights def _split_heads(self, tensor, num_heads, attn_head_size): new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) tensor = tensor.view(new_shape) return tensor def _merge_heads(self, tensor, num_heads, attn_head_size): tensor = tensor.contiguous() new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,) return tensor.view(new_shape) def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], rotary_pos_emb: Optional[List[torch.Tensor]] = None, registered_causal_mask: Optional[torch.Tensor] = None, layer_past: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ): mixed_x_layer = self.c_attn(hidden_states) query, key, value = mixed_x_layer.split(self.split_size, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if rotary_pos_emb is not None: cur_len = query.shape[1] rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb] rotary_pos_emb = (rotary_pos_emb,) * 2 q_pos_emb, k_pos_emb = rotary_pos_emb # Slice the pos emb for current inference query = apply_rotary_pos_emb(query, q_pos_emb) key = apply_rotary_pos_emb(key, k_pos_emb) if layer_past is not None: past_key, past_value = layer_past[0], layer_past[1] key = torch.cat((past_key, key), dim=1) value = torch.cat((past_value, value), dim=1) if use_cache: present = (key, value) else: present = None if self.use_logn_attn and not self.training: if self.logn_tensor.device != query.device or self.logn_tensor.dtype != query.dtype: self.logn_tensor = self.logn_tensor.to(query.device).type_as(query) seq_start = key.size(1) - query.size(1) seq_end = key.size(1) logn_tensor = self.logn_tensor[:, seq_start:seq_end, :, :] query = query * logn_tensor.expand_as(query) if ( self.use_flash_attn and flash_attn_unpadded_func is not None and not self.is_fp32 and query.is_cuda ): q, k, v = query, key, value context_layer = self.core_attention_flash(q, k, v) # b s h d -> b s (h d) context_layer = context_layer.flatten(2, 3).contiguous() else: query = query.permute(0, 2, 1, 3) key = key.permute(0, 2, 1, 3) value = value.permute(0, 2, 1, 3) if ( registered_causal_mask is None and self.use_flash_attn and flash_attn_unpadded_func is not None and not self.is_fp32 and not query.is_cuda ): raise Exception(_ERROR_INPUT_CPU_QUERY_WITH_FLASH_ATTN_ACTIVATED) attn_output, attn_weight = self._attn( query, key, value, registered_causal_mask, attention_mask, head_mask ) context_layer = self._merge_heads( attn_output, self.num_heads, self.head_dim ) attn_output = self.c_proj(context_layer) outputs = (attn_output, present) if output_attentions: if ( self.use_flash_attn and flash_attn_unpadded_func is not None and not self.is_fp32 ): raise ValueError("Cannot output attentions while using flash-attn") else: outputs += (attn_weight,) return outputs class QWenMLP(nn.Module): def __init__(self, config): super().__init__() self.w1 = nn.Linear( config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias ) self.w2 = nn.Linear( config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias ) ff_dim_in = config.intermediate_size // 2 self.c_proj = nn.Linear(ff_dim_in, config.hidden_size, bias=not config.no_bias) def forward(self, hidden_states): a1 = self.w1(hidden_states) a2 = self.w2(hidden_states) intermediate_parallel = a1 * F.silu(a2) output = self.c_proj(intermediate_parallel) return output class QWenBlock(nn.Module): def __init__(self, config): super().__init__() hidden_size = config.hidden_size self.bf16 = config.bf16 self.ln_1 = RMSNorm( hidden_size, eps=config.layer_norm_epsilon, ) self.attn = QWenAttention(config) self.ln_2 = RMSNorm( hidden_size, eps=config.layer_norm_epsilon, ) self.mlp = QWenMLP(config) def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], rotary_pos_emb: Optional[List[torch.Tensor]] = None, registered_causal_mask: Optional[torch.Tensor] = None, layer_past: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, ): layernorm_output = self.ln_1(hidden_states) attn_outputs = self.attn( layernorm_output, rotary_pos_emb, registered_causal_mask=registered_causal_mask, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, ) attn_output = attn_outputs[0] outputs = attn_outputs[1:] residual = hidden_states layernorm_input = attn_output + residual layernorm_output = self.ln_2(layernorm_input) residual = layernorm_input mlp_output = self.mlp(layernorm_output) hidden_states = residual + mlp_output if use_cache: outputs = (hidden_states,) + outputs else: outputs = (hidden_states,) + outputs[1:] return outputs class QWenPreTrainedModel(LookaheadPreTrainedModel): config_class = QWenConfig base_model_prefix = "transformer" is_parallelizable = False supports_gradient_checkpointing = True _no_split_modules = ["QWenBlock"] def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, RMSNorm): module.weight.data.fill_(1.0) for name, p in module.named_parameters(): if name == "c_proj.weight": p.data.normal_( mean=0.0, std=( self.config.initializer_range / math.sqrt(2 * self.config.num_hidden_layers) ), ) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, QWenModel): module.gradient_checkpointing = value class QWenModel(QWenPreTrainedModel): _keys_to_ignore_on_load_missing = ["attn.masked_bias"] def __init__(self, config): super().__init__(config) self.vocab_size = config.vocab_size self.num_hidden_layers = config.num_hidden_layers self.embed_dim = config.hidden_size self.gradient_checkpointing = False self.use_dynamic_ntk = config.use_dynamic_ntk self.seq_length = config.seq_length self.wte = nn.Embedding(self.vocab_size, self.embed_dim) self.drop = nn.Dropout(config.emb_dropout_prob) if config.rotary_pct == 1.0: self.rotary_ndims = None else: assert config.rotary_pct < 1 self.rotary_ndims = int( config.kv_channels * config.rotary_pct ) dim = ( self.rotary_ndims if self.rotary_ndims is not None else config.kv_channels ) self.rotary_emb = RotaryEmbedding(dim, base=config.rotary_emb_base) # self.use_flash_attn = config.use_flash_attn self.use_flash_attn = False self.is_fp32 = not (config.bf16 or config.fp16) if ( self.use_flash_attn and flash_attn_unpadded_func is not None and not self.is_fp32 ): self.registered_causal_mask = None else: max_positions = config.max_position_embeddings self.register_buffer( "registered_causal_mask", torch.tril( torch.ones((max_positions, max_positions), dtype=torch.bool) ).view(1, 1, max_positions, max_positions), persistent=False, ) self.h = nn.ModuleList( [ QWenBlock( config ) for i in range(config.num_hidden_layers) ] ) self.ln_f = RMSNorm( self.embed_dim, eps=config.layer_norm_epsilon, ) self.post_init() def get_input_embeddings(self): return self.wte def set_input_embeddings(self, new_embeddings): self.wte = new_embeddings def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) if input_ids is not None and inputs_embeds is not None: raise ValueError( "You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() # input_ids = input_ids.view(-1, input_shape[-1]) batch_size, seq_length = input_ids.shape # batch_size = input_ids.shape[0] elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size = inputs_embeds.shape[0] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1]) if position_ids is not None: position_ids = position_ids.view(-1, input_shape[-1]) if past_key_values is None: past_length = 0 past_key_values = tuple([None] * len(self.h)) else: past_length = past_key_values[0][0].size(-2) if inputs_embeds is None: inputs_embeds = self.wte(input_ids) # NOTE: adapt for lookahead if attention_mask is not None and len(attention_mask.shape) == 4: # with lookahead position_ids = torch.sum(attention_mask, dim=-1).squeeze(1) - 1 attention_mask = (1.0 - attention_mask.to(inputs_embeds.dtype)) * torch.finfo(inputs_embeds.dtype).min else: # without lookahead if position_ids is None: position_ids = torch.arange( past_length, input_shape[-1] + past_length, dtype=torch.long, device=device, ) position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) if attention_mask is not None: if batch_size <= 0: raise ValueError("batch_size has to be defined and > 0") attention_mask = attention_mask.view(batch_size, -1) attention_mask = attention_mask[:, None, None, :] attention_mask = attention_mask.to(dtype=self.dtype) attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min encoder_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) # if inputs_embeds is None: # inputs_embeds = self.wte(input_ids) hidden_states = inputs_embeds kv_seq_len = hidden_states.size()[1] if past_key_values[0] is not None: # past key values[0][0] shape: bs * seq_len * head_num * dim kv_seq_len += past_key_values[0][0].shape[1] if ( self.use_dynamic_ntk and kv_seq_len == hidden_states.size()[1] and not self.training ): context_value = math.log(kv_seq_len / self.seq_length, 2) + 1 ntk_alpha = 2 ** math.ceil(context_value) - 1 ntk_alpha = max(ntk_alpha, 1) else: ntk_alpha = self.rotary_emb._ntk_alpha_cached rotary_pos_emb = self.rotary_emb(kv_seq_len, ntk_alpha=ntk_alpha) for idx in range(len(rotary_pos_emb)): rotary_pos_emb[idx] = rotary_pos_emb[idx].to(hidden_states.device) hidden_states = self.drop(hidden_states) output_shape = input_shape + (hidden_states.size(-1),) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, use_cache, output_attentions) return custom_forward outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, rotary_pos_emb, self.registered_causal_mask, None, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask, ) else: outputs = block( hidden_states, layer_past=layer_past, rotary_pos_emb=rotary_pos_emb, registered_causal_mask=self.registered_causal_mask, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) hidden_states = self.ln_f(hidden_states) hidden_states = hidden_states.view(output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, presents, all_hidden_states] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class QWenLMHeadModel(QWenPreTrainedModel): _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.rotary_emb\.inv_freq"] _keys_to_ignore_on_load_unexpected = [r"h\.\d+\.attn\.masked_bias"] def __init__(self, config): super().__init__(config) assert ( config.bf16 + config.fp16 + config.fp32 <= 1 ), "Only one of \"bf16\", \"fp16\", \"fp32\" can be true" autoset_precision = config.bf16 + config.fp16 + config.fp32 == 0 if autoset_precision: if SUPPORT_BF16: logger.warn( "The model is automatically converting to bf16 for faster inference. " "If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \"AutoModelForCausalLM.from_pretrained\"." ) config.bf16 = True elif SUPPORT_FP16: logger.warn( "The model is automatically converting to fp16 for faster inference. " "If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \"AutoModelForCausalLM.from_pretrained\"." ) config.fp16 = True else: config.fp32 = True if config.bf16 and SUPPORT_CUDA and not SUPPORT_BF16: logger.warn( "Your device does NOT seem to support bf16, you can switch to fp16 or fp32 by by passing fp16/fp32=True in \"AutoModelForCausalLM.from_pretrained\".") if config.fp16 and SUPPORT_CUDA and not SUPPORT_FP16: logger.warn( "Your device does NOT support faster inference with fp16, please switch to fp32 which is likely to be faster") if config.fp32: if SUPPORT_BF16: logger.warn( "Your device support faster inference by passing bf16=True in \"AutoModelForCausalLM.from_pretrained\".") elif SUPPORT_FP16: logger.warn( "Your device support faster inference by passing fp16=True in \"AutoModelForCausalLM.from_pretrained\".") if config.use_flash_attn == "auto": if config.bf16 or config.fp16: logger.warn("Try importing flash-attention for faster inference...") config.use_flash_attn = True else: config.use_flash_attn = False if config.use_flash_attn and config.fp32: logger.warn("Flash attention will be disabled because it does NOT support fp32.") if config.use_flash_attn: _import_flash_attn() self.transformer = QWenModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) if config.bf16: self.transformer.bfloat16() self.lm_head.bfloat16() if config.fp16: self.transformer.half() self.lm_head.half() self.post_init() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def prepare_inputs_for_generation( self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs ): token_type_ids = kwargs.get("token_type_ids", None) if past_key_values: input_ids = input_ids[:, -1].unsqueeze(-1) if token_type_ids is not None: token_type_ids = token_type_ids[:, -1].unsqueeze(-1) attention_mask = kwargs.get("attention_mask", None) position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -1].unsqueeze(-1) else: position_ids = None if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "position_ids": position_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } ) return model_inputs def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: labels = labels.to(lm_logits.device) shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() loss = loss_fct( shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1) ) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @staticmethod def _reorder_cache( past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor ) -> Tuple[Tuple[torch.Tensor]]: return tuple( tuple( past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past ) for layer_past in past_key_values ) def chat( self, tokenizer: PreTrainedTokenizer, query: str, history: Optional[HistoryType], system: str = "You are a helpful assistant.", append_history: bool = True, stream: Optional[bool] = _SENTINEL, stop_words_ids: Optional[List[List[int]]] = None, generation_config: Optional[GenerationConfig] = None, **kwargs, ) -> Tuple[str, HistoryType]: generation_config = generation_config if generation_config is not None else self.generation_config assert stream is _SENTINEL, _ERROR_STREAM_IN_CHAT assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT if history is None: history = [] if stop_words_ids is None: stop_words_ids = [] max_window_size = kwargs.get('max_window_size', None) if max_window_size is None: max_window_size = generation_config.max_window_size
raw_text, context_tokens = make_context(
2
2023-12-19 13:11:38+00:00
24k
MingtaoGuo/AnimateAnyone_unofficial
aldm/aldm.py
[ { "identifier": "conv_nd", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")" }, { "identifier": "linear", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)" }, { "identifier": "zero_module", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module" }, { "identifier": "timestep_embedding", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding" }, { "identifier": "SpatialTransformer", "path": "ldm/modules/attention.py", "snippet": "class SpatialTransformer(nn.Module):\n \"\"\"\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n NEW: use_linear for more efficiency instead of the 1x1 convs\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None,\n disable_self_attn=False, use_linear=False,\n use_checkpoint=True):\n super().__init__()\n if exists(context_dim) and not isinstance(context_dim, list):\n context_dim = [context_dim]\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = Normalize(in_channels)\n if not use_linear:\n self.proj_in = nn.Conv2d(in_channels,\n inner_dim,\n kernel_size=1,\n stride=1,\n padding=0)\n else:\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n self.transformer_blocks = nn.ModuleList(\n [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],\n disable_self_attn=disable_self_attn, checkpoint=use_checkpoint)\n for d in range(depth)]\n )\n if not use_linear:\n self.proj_out = zero_module(nn.Conv2d(inner_dim,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n else:\n self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))\n self.use_linear = use_linear\n\n def forward(self, x, context=None):\n # note: if no context is given, cross-attention defaults to self-attention\n if not isinstance(context, list):\n context = [context]\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n if not self.use_linear:\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c').contiguous()\n if self.use_linear:\n x = self.proj_in(x)\n for i, block in enumerate(self.transformer_blocks):\n x = block(x, context=context[i])\n if self.use_linear:\n x = self.proj_out(x)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()\n if not self.use_linear:\n x = self.proj_out(x)\n return x + x_in" }, { "identifier": "SpatialTransformerPlus", "path": "ldm/modules/attention.py", "snippet": "class SpatialTransformerPlus(nn.Module):\n \"\"\"\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n NEW: use_linear for more efficiency instead of the 1x1 convs\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None,\n disable_self_attn=False, use_linear=False,\n use_checkpoint=True, use_temporal_attention=False):\n super().__init__()\n if exists(context_dim) and not isinstance(context_dim, list):\n context_dim = [context_dim]\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = Normalize(in_channels)\n if not use_linear:\n self.proj_in = nn.Conv2d(in_channels,\n inner_dim,\n kernel_size=1,\n stride=1,\n padding=0)\n else:\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n self.transformer_blocks = nn.ModuleList(\n [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],\n disable_self_attn=disable_self_attn, checkpoint=use_checkpoint)\n for d in range(depth)]\n )\n if not use_linear:\n self.proj_out = zero_module(nn.Conv2d(inner_dim,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n else:\n self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))\n self.use_linear = use_linear\n self.spatial_attn = SpatialSelfAttention(in_channels)\n if use_temporal_attention:\n self.temporal_attn = TemporalTransformer(in_channels)\n\n def forward(self, x, context=None, ref=None):\n x = torch.cat([x, ref], dim=-1)\n x = self.spatial_attn(x)\n x = x[..., :ref.shape[-1]]\n # note: if no context is given, cross-attention defaults to self-attention\n if not isinstance(context, list):\n context = [context]\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n if not self.use_linear:\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c').contiguous()\n if self.use_linear:\n x = self.proj_in(x)\n for i, block in enumerate(self.transformer_blocks):\n x = block(x, context=context[i])\n if self.use_linear:\n x = self.proj_out(x)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()\n if not self.use_linear:\n x = self.proj_out(x)\n return x + x_in" }, { "identifier": "ResBlock", "path": "ldm/modules/diffusionmodules/openaimodel.py", "snippet": "def convert_module_to_f16(x):\ndef convert_module_to_f32(x):\n def __init__(\n self,\n spacial_dim: int,\n embed_dim: int,\n num_heads_channels: int,\n output_dim: int = None,\n ):\n def forward(self, x):\n def forward(self, x, emb):\n def forward(self, x, emb, context=None):\n def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):\n def forward(self, x):\n def __init__(self, channels, out_channels=None, ks=5):\n def forward(self,x):\n def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):\n def forward(self, x):\n def __init__(\n self,\n channels,\n emb_channels,\n dropout,\n out_channels=None,\n use_conv=False,\n use_scale_shift_norm=False,\n dims=2,\n use_checkpoint=False,\n up=False,\n down=False,\n ):\n def forward(self, x, emb):\n def _forward(self, x, emb):\n def __init__(\n self,\n channels,\n dropout,\n out_channels=None,\n use_conv=False,\n dims=2,\n use_checkpoint=False,\n up=False,\n down=False,\n ):\n def forward(self, x):\n def _forward(self, x):\n def __init__(\n self,\n channels,\n num_heads=1,\n num_head_channels=-1,\n use_checkpoint=False,\n use_new_attention_order=False,\n ):\n def forward(self, x):\n def _forward(self, x):\ndef count_flops_attn(model, _x, y):\n def __init__(self, n_heads):\n def forward(self, qkv):\n def count_flops(model, _x, y):\n def __init__(self, n_heads):\n def forward(self, qkv):\n def count_flops(model, _x, y):\n def __init__(\n self,\n image_size,\n in_channels,\n model_channels,\n out_channels,\n num_res_blocks,\n attention_resolutions,\n dropout=0,\n channel_mult=(1, 2, 4, 8),\n conv_resample=True,\n dims=2,\n num_classes=None,\n use_checkpoint=False,\n use_fp16=False,\n num_heads=-1,\n num_head_channels=-1,\n num_heads_upsample=-1,\n use_scale_shift_norm=False,\n resblock_updown=False,\n use_new_attention_order=False,\n use_spatial_transformer=False, # custom transformer support\n transformer_depth=1, # custom transformer support\n context_dim=None, # custom transformer support\n n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model\n legacy=True,\n disable_self_attentions=None,\n num_attention_blocks=None,\n disable_middle_self_attn=False,\n use_linear_in_transformer=False,\n ):\n def convert_to_fp16(self):\n def convert_to_fp32(self):\n def forward(self, x, timesteps=None, context=None, y=None,**kwargs):\nclass AttentionPool2d(nn.Module):\nclass TimestepBlock(nn.Module):\nclass TimestepEmbedSequential(nn.Sequential, TimestepBlock):\nclass Upsample(nn.Module):\nclass TransposedUpsample(nn.Module):\nclass Downsample(nn.Module):\nclass ResBlock(TimestepBlock):\nclass ResBlockNoTime(TimestepBlock):\nclass AttentionBlock(nn.Module):\nclass QKVAttentionLegacy(nn.Module):\nclass QKVAttention(nn.Module):\nclass UNetModel(nn.Module):" }, { "identifier": "LatentDiffusion", "path": "ldm/models/diffusion/ddpm.py", "snippet": "class LatentDiffusion(DDPM):\n \"\"\"main class\"\"\"\n\n def __init__(self,\n first_stage_config,\n cond_stage_config,\n num_timesteps_cond=None,\n cond_stage_key=\"image\",\n cond_stage_trainable=False,\n concat_mode=True,\n cond_stage_forward=None,\n conditioning_key=None,\n scale_factor=1.0,\n scale_by_std=False,\n force_null_conditioning=False,\n *args, **kwargs):\n self.force_null_conditioning = force_null_conditioning\n self.num_timesteps_cond = default(num_timesteps_cond, 1)\n self.scale_by_std = scale_by_std\n assert self.num_timesteps_cond <= kwargs['timesteps']\n # for backwards compatibility after implementation of DiffusionWrapper\n if conditioning_key is None:\n conditioning_key = 'concat' if concat_mode else 'crossattn'\n if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning:\n conditioning_key = None\n ckpt_path = kwargs.pop(\"ckpt_path\", None)\n reset_ema = kwargs.pop(\"reset_ema\", False)\n reset_num_ema_updates = kwargs.pop(\"reset_num_ema_updates\", False)\n ignore_keys = kwargs.pop(\"ignore_keys\", [])\n super().__init__(conditioning_key=conditioning_key, *args, **kwargs)\n self.concat_mode = concat_mode\n self.cond_stage_trainable = cond_stage_trainable\n self.cond_stage_key = cond_stage_key\n try:\n self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1\n except:\n self.num_downs = 0\n if not scale_by_std:\n self.scale_factor = scale_factor\n else:\n self.register_buffer('scale_factor', torch.tensor(scale_factor))\n self.instantiate_first_stage(first_stage_config)\n self.instantiate_cond_stage(cond_stage_config)\n self.cond_stage_forward = cond_stage_forward\n self.clip_denoised = False\n self.bbox_tokenizer = None\n\n self.restarted_from_ckpt = False\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys)\n self.restarted_from_ckpt = True\n if reset_ema:\n assert self.use_ema\n print(\n f\"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.\")\n self.model_ema = LitEma(self.model)\n if reset_num_ema_updates:\n print(\" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ \")\n assert self.use_ema\n self.model_ema.reset_num_updates()\n\n def make_cond_schedule(self, ):\n self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)\n ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()\n self.cond_ids[:self.num_timesteps_cond] = ids\n\n @rank_zero_only\n @torch.no_grad()\n def on_train_batch_start(self, batch, batch_idx, dataloader_idx):\n # only for very first batch\n if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:\n assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'\n # set rescale weight to 1./std of encodings\n print(\"### USING STD-RESCALING ###\")\n x = super().get_input(batch, self.first_stage_key)\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n del self.scale_factor\n self.register_buffer('scale_factor', 1. / z.flatten().std())\n print(f\"setting self.scale_factor to {self.scale_factor}\")\n print(\"### USING STD-RESCALING ###\")\n\n def register_schedule(self,\n given_betas=None, beta_schedule=\"linear\", timesteps=1000,\n linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)\n\n self.shorten_cond_schedule = self.num_timesteps_cond > 1\n if self.shorten_cond_schedule:\n self.make_cond_schedule()\n\n def instantiate_first_stage(self, config):\n model = instantiate_from_config(config)\n self.first_stage_model = model.eval()\n self.first_stage_model.train = disabled_train\n for param in self.first_stage_model.parameters():\n param.requires_grad = False\n\n def instantiate_cond_stage(self, config):\n if not self.cond_stage_trainable:\n if config == \"__is_first_stage__\":\n print(\"Using first stage also as cond stage.\")\n self.cond_stage_model = self.first_stage_model\n elif config == \"__is_unconditional__\":\n print(f\"Training {self.__class__.__name__} as an unconditional model.\")\n self.cond_stage_model = None\n # self.be_unconditional = True\n else:\n model = instantiate_from_config(config)\n self.cond_stage_model = model.eval()\n self.cond_stage_model.train = disabled_train\n for param in self.cond_stage_model.parameters():\n param.requires_grad = False\n else:\n assert config != '__is_first_stage__'\n assert config != '__is_unconditional__'\n model = instantiate_from_config(config)\n self.cond_stage_model = model\n\n def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):\n denoise_row = []\n for zd in tqdm(samples, desc=desc):\n denoise_row.append(self.decode_first_stage(zd.to(self.device),\n force_not_quantize=force_no_decoder_quantization))\n n_imgs_per_row = len(denoise_row)\n denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W\n denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')\n denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')\n denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)\n return denoise_grid\n\n def get_first_stage_encoding(self, encoder_posterior):\n if isinstance(encoder_posterior, DiagonalGaussianDistribution):\n z = encoder_posterior.sample()\n elif isinstance(encoder_posterior, torch.Tensor):\n z = encoder_posterior\n else:\n raise NotImplementedError(f\"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented\")\n return self.scale_factor * z\n\n def get_learned_conditioning(self, c):\n if self.cond_stage_forward is None:\n if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):\n c = self.cond_stage_model.encode(c)\n if isinstance(c, DiagonalGaussianDistribution):\n c = c.mode()\n else:\n c = self.cond_stage_model(c)\n else:\n assert hasattr(self.cond_stage_model, self.cond_stage_forward)\n c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)\n return c\n\n def meshgrid(self, h, w):\n y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)\n x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)\n\n arr = torch.cat([y, x], dim=-1)\n return arr\n\n def delta_border(self, h, w):\n \"\"\"\n :param h: height\n :param w: width\n :return: normalized distance to image border,\n wtith min distance = 0 at border and max dist = 0.5 at image center\n \"\"\"\n lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)\n arr = self.meshgrid(h, w) / lower_right_corner\n dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]\n dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]\n edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]\n return edge_dist\n\n def get_weighting(self, h, w, Ly, Lx, device):\n weighting = self.delta_border(h, w)\n weighting = torch.clip(weighting, self.split_input_params[\"clip_min_weight\"],\n self.split_input_params[\"clip_max_weight\"], )\n weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)\n\n if self.split_input_params[\"tie_braker\"]:\n L_weighting = self.delta_border(Ly, Lx)\n L_weighting = torch.clip(L_weighting,\n self.split_input_params[\"clip_min_tie_weight\"],\n self.split_input_params[\"clip_max_tie_weight\"])\n\n L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)\n weighting = weighting * L_weighting\n return weighting\n\n def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code\n \"\"\"\n :param x: img of size (bs, c, h, w)\n :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])\n \"\"\"\n bs, nc, h, w = x.shape\n\n # number of crops in image\n Ly = (h - kernel_size[0]) // stride[0] + 1\n Lx = (w - kernel_size[1]) // stride[1] + 1\n\n if uf == 1 and df == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)\n\n weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))\n\n elif uf > 1 and df == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),\n dilation=1, padding=0,\n stride=(stride[0] * uf, stride[1] * uf))\n fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)\n\n weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))\n\n elif df > 1 and uf == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),\n dilation=1, padding=0,\n stride=(stride[0] // df, stride[1] // df))\n fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)\n\n weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))\n\n else:\n raise NotImplementedError\n\n return fold, unfold, normalization, weighting\n\n @torch.no_grad()\n def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,\n cond_key=None, return_original_cond=False, bs=None, return_x=False):\n x = super().get_input(batch, k)\n if bs is not None:\n x = x[:bs]\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n\n if self.model.conditioning_key is not None and not self.force_null_conditioning:\n if cond_key is None:\n cond_key = self.cond_stage_key\n if cond_key != self.first_stage_key:\n if cond_key in ['caption', 'coordinates_bbox', 'txt', 'vision']:\n xc = batch[cond_key]\n xc = rearrange(xc, 'b h w c -> b c h w')\n elif cond_key in ['class_label', 'cls']:\n xc = batch\n else:\n xc = super().get_input(batch, cond_key).to(self.device)\n else:\n xc = x\n if not self.cond_stage_trainable or force_c_encode:\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n c = self.get_learned_conditioning(xc.to(self.device))\n else:\n c = xc\n if bs is not None:\n c = c[:bs]\n\n if self.use_positional_encodings:\n pos_x, pos_y = self.compute_latent_shifts(batch)\n ckey = __conditioning_keys__[self.model.conditioning_key]\n c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}\n\n else:\n c = None\n xc = None\n if self.use_positional_encodings:\n pos_x, pos_y = self.compute_latent_shifts(batch)\n c = {'pos_x': pos_x, 'pos_y': pos_y}\n out = [z, c]\n if return_first_stage_outputs:\n xrec = self.decode_first_stage(z)\n out.extend([x, xrec])\n if return_x:\n out.extend([x])\n if return_original_cond:\n out.append(xc)\n return out\n\n @torch.no_grad()\n def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):\n if predict_cids:\n if z.dim() == 4:\n z = torch.argmax(z.exp(), dim=1).long()\n z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)\n z = rearrange(z, 'b h w c -> b c h w').contiguous()\n\n z = 1. / self.scale_factor * z\n return self.first_stage_model.decode(z)\n\n @torch.no_grad()\n def encode_first_stage(self, x):\n return self.first_stage_model.encode(x)\n\n def shared_step(self, batch, **kwargs):\n x, c = self.get_input(batch, self.first_stage_key)\n loss = self(x, c)\n return loss\n\n def forward(self, x, c, *args, **kwargs):\n t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()\n if self.model.conditioning_key is not None:\n assert c is not None\n if self.cond_stage_trainable:\n c = self.get_learned_conditioning(c)\n if self.shorten_cond_schedule: # TODO: drop this option\n tc = self.cond_ids[t].to(self.device)\n c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))\n return self.p_losses(x, c, t, *args, **kwargs)\n\n def apply_model(self, x_noisy, t, cond, return_ids=False):\n if isinstance(cond, dict):\n # hybrid case, cond is expected to be a dict\n pass\n else:\n if not isinstance(cond, list):\n cond = [cond]\n key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'\n cond = {key: cond}\n\n x_recon = self.model(x_noisy, t, **cond)\n\n if isinstance(x_recon, tuple) and not return_ids:\n return x_recon[0]\n else:\n return x_recon\n\n def _predict_eps_from_xstart(self, x_t, t, pred_xstart):\n return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \\\n extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)\n\n def _prior_bpd(self, x_start):\n \"\"\"\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n This term can't be optimized, as it only depends on the encoder.\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch element.\n \"\"\"\n batch_size = x_start.shape[0]\n t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)\n qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)\n kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)\n return mean_flat(kl_prior) / np.log(2.0)\n\n def p_losses(self, x_start, cond, t, noise=None):\n noise = default(noise, lambda: torch.randn_like(x_start))\n x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)\n model_output = self.apply_model(x_noisy, t, cond)\n \n loss_dict = {}\n prefix = 'train' if self.training else 'val'\n\n if self.parameterization == \"x0\":\n target = x_start\n elif self.parameterization == \"eps\":\n target = noise\n elif self.parameterization == \"v\":\n target = self.get_v(x_start, noise, t)\n else:\n raise NotImplementedError()\n\n loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])\n loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})\n\n logvar_t = self.logvar[t].to(self.device)\n loss = loss_simple / torch.exp(logvar_t) + logvar_t\n # loss = loss_simple / torch.exp(self.logvar) + self.logvar\n if self.learn_logvar:\n loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})\n loss_dict.update({'logvar': self.logvar.data.mean()})\n\n loss = self.l_simple_weight * loss.mean()\n\n loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))\n loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()\n loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})\n loss += (self.original_elbo_weight * loss_vlb)\n loss_dict.update({f'{prefix}/loss': loss})\n return loss, loss_dict\n\n def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,\n return_x0=False, score_corrector=None, corrector_kwargs=None):\n t_in = t\n model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)\n\n if score_corrector is not None:\n assert self.parameterization == \"eps\"\n model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)\n\n if return_codebook_ids:\n model_out, logits = model_out\n\n if self.parameterization == \"eps\":\n x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)\n elif self.parameterization == \"x0\":\n x_recon = model_out\n else:\n raise NotImplementedError()\n\n if clip_denoised:\n x_recon.clamp_(-1., 1.)\n if quantize_denoised:\n x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)\n model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)\n if return_codebook_ids:\n return model_mean, posterior_variance, posterior_log_variance, logits\n elif return_x0:\n return model_mean, posterior_variance, posterior_log_variance, x_recon\n else:\n return model_mean, posterior_variance, posterior_log_variance\n\n @torch.no_grad()\n def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,\n return_codebook_ids=False, quantize_denoised=False, return_x0=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):\n b, *_, device = *x.shape, x.device\n outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,\n return_codebook_ids=return_codebook_ids,\n quantize_denoised=quantize_denoised,\n return_x0=return_x0,\n score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)\n if return_codebook_ids:\n raise DeprecationWarning(\"Support dropped.\")\n model_mean, _, model_log_variance, logits = outputs\n elif return_x0:\n model_mean, _, model_log_variance, x0 = outputs\n else:\n model_mean, _, model_log_variance = outputs\n\n noise = noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n # no noise when t == 0\n nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))\n\n if return_codebook_ids:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)\n if return_x0:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0\n else:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise\n\n @torch.no_grad()\n def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,\n img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,\n score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,\n log_every_t=None):\n if not log_every_t:\n log_every_t = self.log_every_t\n timesteps = self.num_timesteps\n if batch_size is not None:\n b = batch_size if batch_size is not None else shape[0]\n shape = [batch_size] + list(shape)\n else:\n b = batch_size = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=self.device)\n else:\n img = x_T\n intermediates = []\n if cond is not None:\n if isinstance(cond, dict):\n cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else\n list(map(lambda x: x[:batch_size], cond[key])) for key in cond}\n else:\n cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',\n total=timesteps) if verbose else reversed(\n range(0, timesteps))\n if type(temperature) == float:\n temperature = [temperature] * timesteps\n\n for i in iterator:\n ts = torch.full((b,), i, device=self.device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != 'hybrid'\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img, x0_partial = self.p_sample(img, cond, ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised, return_x0=True,\n temperature=temperature[i], noise_dropout=noise_dropout,\n score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)\n if mask is not None:\n assert x0 is not None\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1. - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(x0_partial)\n if callback: callback(i)\n if img_callback: img_callback(img, i)\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_loop(self, cond, shape, return_intermediates=False,\n x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, start_T=None,\n log_every_t=None):\n\n if not log_every_t:\n log_every_t = self.log_every_t\n device = self.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n intermediates = [img]\n if timesteps is None:\n timesteps = self.num_timesteps\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(\n range(0, timesteps))\n\n if mask is not None:\n assert x0 is not None\n assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match\n\n for i in iterator:\n ts = torch.full((b,), i, device=device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != 'hybrid'\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img = self.p_sample(img, cond, ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised)\n if mask is not None:\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1. - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(img)\n if callback: callback(i)\n if img_callback: img_callback(img, i)\n\n if return_intermediates:\n return img, intermediates\n return img\n\n @torch.no_grad()\n def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,\n verbose=True, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, shape=None, **kwargs):\n if shape is None:\n shape = (batch_size, self.channels, self.image_size, self.image_size)\n if cond is not None:\n if isinstance(cond, dict):\n cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else\n list(map(lambda x: x[:batch_size], cond[key])) for key in cond}\n else:\n cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]\n return self.p_sample_loop(cond,\n shape,\n return_intermediates=return_intermediates, x_T=x_T,\n verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,\n mask=mask, x0=x0)\n\n @torch.no_grad()\n def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):\n if ddim:\n ddim_sampler = DDIMSampler(self)\n shape = (self.channels, self.image_size, self.image_size)\n samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size,\n shape, cond, verbose=False, **kwargs)\n\n else:\n samples, intermediates = self.sample(cond=cond, batch_size=batch_size,\n return_intermediates=True, **kwargs)\n\n return samples, intermediates\n\n @torch.no_grad()\n def get_unconditional_conditioning(self, batch_size, null_label=None):\n if null_label is not None:\n xc = null_label\n if isinstance(xc, ListConfig):\n xc = list(xc)\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n if hasattr(xc, \"to\"):\n xc = xc.to(self.device)\n c = self.get_learned_conditioning(xc)\n else:\n if self.cond_stage_key in [\"class_label\", \"cls\"]:\n xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device)\n return self.get_learned_conditioning(xc)\n else:\n raise NotImplementedError(\"todo\")\n if isinstance(c, list): # in case the encoder gives us a list\n for i in range(len(c)):\n c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device)\n else:\n c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device)\n return c\n\n @torch.no_grad()\n def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None,\n quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,\n plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,\n use_ema_scope=True,\n **kwargs):\n ema_scope = self.ema_scope if use_ema_scope else nullcontext\n use_ddim = ddim_steps is not None\n\n log = dict()\n z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,\n return_first_stage_outputs=True,\n force_c_encode=True,\n return_original_cond=True,\n bs=N)\n N = min(x.shape[0], N)\n n_row = min(x.shape[0], n_row)\n log[\"inputs\"] = x\n log[\"reconstruction\"] = xrec\n if self.model.conditioning_key is not None:\n if hasattr(self.cond_stage_model, \"decode\"):\n xc = self.cond_stage_model.decode(c)\n log[\"conditioning\"] = xc\n elif self.cond_stage_key in [\"caption\", \"txt\"]:\n xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)\n log[\"conditioning\"] = xc\n elif self.cond_stage_key in ['class_label', \"cls\"]:\n try:\n xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[\"human_label\"], size=x.shape[2] // 25)\n log['conditioning'] = xc\n except KeyError:\n # probably no \"human_label\" in batch\n pass\n elif isimage(xc):\n log[\"conditioning\"] = xc\n if ismap(xc):\n log[\"original_conditioning\"] = self.to_rgb(xc)\n\n if plot_diffusion_rows:\n # get diffusion row\n diffusion_row = list()\n z_start = z[:n_row]\n for t in range(self.num_timesteps):\n if t % self.log_every_t == 0 or t == self.num_timesteps - 1:\n t = repeat(torch.tensor([t]), '1 -> b', b=n_row)\n t = t.to(self.device).long()\n noise = torch.randn_like(z_start)\n z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)\n diffusion_row.append(self.decode_first_stage(z_noisy))\n\n diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W\n diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')\n diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')\n diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])\n log[\"diffusion_row\"] = diffusion_grid\n\n if sample:\n # get denoise row\n with ema_scope(\"Sampling\"):\n samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta)\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)\n x_samples = self.decode_first_stage(samples)\n log[\"samples\"] = x_samples\n if plot_denoise_rows:\n denoise_grid = self._get_denoise_row_from_list(z_denoise_row)\n log[\"denoise_row\"] = denoise_grid\n\n if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(\n self.first_stage_model, IdentityFirstStage):\n # also display when quantizing x0 while sampling\n with ema_scope(\"Plotting Quantized Denoised\"):\n samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta,\n quantize_denoised=True)\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,\n # quantize_denoised=True)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_x0_quantized\"] = x_samples\n\n if unconditional_guidance_scale > 1.0:\n uc = self.get_unconditional_conditioning(N, unconditional_guidance_label)\n if self.model.conditioning_key == \"crossattn-adm\":\n uc = {\"c_crossattn\": [uc], \"c_adm\": c[\"c_adm\"]}\n with ema_scope(\"Sampling with classifier-free guidance\"):\n samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=uc,\n )\n x_samples_cfg = self.decode_first_stage(samples_cfg)\n log[f\"samples_cfg_scale_{unconditional_guidance_scale:.2f}\"] = x_samples_cfg\n\n if inpaint:\n # make a simple center square\n b, h, w = z.shape[0], z.shape[2], z.shape[3]\n mask = torch.ones(N, h, w).to(self.device)\n # zeros will be filled in\n mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.\n mask = mask[:, None, ...]\n with ema_scope(\"Plotting Inpaint\"):\n samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,\n ddim_steps=ddim_steps, x0=z[:N], mask=mask)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_inpainting\"] = x_samples\n log[\"mask\"] = mask\n\n # outpaint\n mask = 1. - mask\n with ema_scope(\"Plotting Outpaint\"):\n samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,\n ddim_steps=ddim_steps, x0=z[:N], mask=mask)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_outpainting\"] = x_samples\n\n if plot_progressive_rows:\n with ema_scope(\"Plotting Progressives\"):\n img, progressives = self.progressive_denoising(c,\n shape=(self.channels, self.image_size, self.image_size),\n batch_size=N)\n prog_row = self._get_denoise_row_from_list(progressives, desc=\"Progressive Generation\")\n log[\"progressive_row\"] = prog_row\n\n if return_keys:\n if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:\n return log\n else:\n return {key: log[key] for key in return_keys}\n return log\n\n def configure_optimizers(self):\n lr = self.learning_rate\n params = list(self.model.parameters())\n if self.cond_stage_trainable:\n print(f\"{self.__class__.__name__}: Also optimizing conditioner params!\")\n params = params + list(self.cond_stage_model.parameters())\n if self.learn_logvar:\n print('Diffusion model optimizing logvar')\n params.append(self.logvar)\n opt = torch.optim.AdamW(params, lr=lr)\n if self.use_scheduler:\n assert 'target' in self.scheduler_config\n scheduler = instantiate_from_config(self.scheduler_config)\n\n print(\"Setting up LambdaLR scheduler...\")\n scheduler = [\n {\n 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),\n 'interval': 'step',\n 'frequency': 1\n }]\n return [opt], scheduler\n return opt\n\n @torch.no_grad()\n def to_rgb(self, x):\n x = x.float()\n if not hasattr(self, \"colorize\"):\n self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)\n x = nn.functional.conv2d(x, weight=self.colorize)\n x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.\n return x" }, { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" } ]
import einops import torch import torch as th import torch.nn as nn from ldm.modules.diffusionmodules.util import ( conv_nd, linear, zero_module, timestep_embedding, ) from einops import rearrange, repeat from torchvision.utils import make_grid from ldm.modules.attention import SpatialTransformer, SpatialTransformerPlus from ldm.modules.diffusionmodules.openaimodel import ResBlock, TimestepEmbedSequential, Downsample, AttentionBlock, Upsample, normalization, checkpoint, convert_module_to_f16, convert_module_to_f32 from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import log_txt_as_img, exists, instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from omegaconf.listconfig import ListConfig from omegaconf.listconfig import ListConfig
18,001
class ReferenceNet(nn.Module): """ The full UNet model with attention and timestep embedding. :param in_channels: channels in the input Tensor. :param model_channels: base channel count for the model. :param out_channels: channels in the output Tensor. :param num_res_blocks: number of residual blocks per downsample. :param attention_resolutions: a collection of downsample rates at which attention will take place. May be a set, list, or tuple. For example, if this contains 4, then at 4x downsampling, attention will be used. :param dropout: the dropout probability. :param channel_mult: channel multiplier for each level of the UNet. :param conv_resample: if True, use learned convolutions for upsampling and downsampling. :param dims: determines if the signal is 1D, 2D, or 3D. :param num_classes: if specified (as an int), then this model will be class-conditional with `num_classes` classes. :param use_checkpoint: use gradient checkpointing to reduce memory usage. :param num_heads: the number of attention heads in each attention layer. :param num_heads_channels: if specified, ignore num_heads and instead use a fixed channel width per attention head. :param num_heads_upsample: works with num_heads to set a different number of heads for upsampling. Deprecated. :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. :param resblock_updown: use residual blocks for up/downsampling. :param use_new_attention_order: use a different attention pattern for potentially increased efficiency. """ def __init__( self, image_size, in_channels, model_channels, num_res_blocks, attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None, use_checkpoint=False, use_fp16=False, num_heads=-1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, use_spatial_transformer=False, # custom transformer support transformer_depth=1, # custom transformer support context_dim=None, # custom transformer support n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model legacy=True, disable_self_attentions=None, num_attention_blocks=None, disable_middle_self_attn=False, use_linear_in_transformer=False, ): super().__init__() if use_spatial_transformer: assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' if type(context_dim) == ListConfig: context_dim = list(context_dim) if num_heads_upsample == -1: num_heads_upsample = num_heads if num_heads == -1: assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' if num_head_channels == -1: assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: if len(num_res_blocks) != len(channel_mult): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks if disable_self_attentions is not None: # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not assert len(disable_self_attentions) == len(channel_mult) if num_attention_blocks is not None: assert len(num_attention_blocks) == len(self.num_res_blocks) assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " f"This option has LESS priority than attention_resolutions {attention_resolutions}, " f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " f"attention will still not be set.") self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint self.dtype = th.float16 if use_fp16 else th.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample self.predict_codebook_ids = n_embed is not None time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential(
class ReferenceNet(nn.Module): """ The full UNet model with attention and timestep embedding. :param in_channels: channels in the input Tensor. :param model_channels: base channel count for the model. :param out_channels: channels in the output Tensor. :param num_res_blocks: number of residual blocks per downsample. :param attention_resolutions: a collection of downsample rates at which attention will take place. May be a set, list, or tuple. For example, if this contains 4, then at 4x downsampling, attention will be used. :param dropout: the dropout probability. :param channel_mult: channel multiplier for each level of the UNet. :param conv_resample: if True, use learned convolutions for upsampling and downsampling. :param dims: determines if the signal is 1D, 2D, or 3D. :param num_classes: if specified (as an int), then this model will be class-conditional with `num_classes` classes. :param use_checkpoint: use gradient checkpointing to reduce memory usage. :param num_heads: the number of attention heads in each attention layer. :param num_heads_channels: if specified, ignore num_heads and instead use a fixed channel width per attention head. :param num_heads_upsample: works with num_heads to set a different number of heads for upsampling. Deprecated. :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. :param resblock_updown: use residual blocks for up/downsampling. :param use_new_attention_order: use a different attention pattern for potentially increased efficiency. """ def __init__( self, image_size, in_channels, model_channels, num_res_blocks, attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None, use_checkpoint=False, use_fp16=False, num_heads=-1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, use_spatial_transformer=False, # custom transformer support transformer_depth=1, # custom transformer support context_dim=None, # custom transformer support n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model legacy=True, disable_self_attentions=None, num_attention_blocks=None, disable_middle_self_attn=False, use_linear_in_transformer=False, ): super().__init__() if use_spatial_transformer: assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' if type(context_dim) == ListConfig: context_dim = list(context_dim) if num_heads_upsample == -1: num_heads_upsample = num_heads if num_heads == -1: assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' if num_head_channels == -1: assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: if len(num_res_blocks) != len(channel_mult): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks if disable_self_attentions is not None: # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not assert len(disable_self_attentions) == len(channel_mult) if num_attention_blocks is not None: assert len(num_attention_blocks) == len(self.num_res_blocks) assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " f"This option has LESS priority than attention_resolutions {attention_resolutions}, " f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " f"attention will still not be set.") self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint self.dtype = th.float16 if use_fp16 else th.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample self.predict_codebook_ids = n_embed is not None time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential(
conv_nd(dims, in_channels, model_channels, 3, padding=1)
0
2023-12-16 03:31:33+00:00
24k
yasserben/CLOUDS
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "clouds/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME = \"mask_former_semantic\"\n # Color augmentation\n cfg.INPUT.COLOR_AUG_SSD = False\n # We retry random cropping until no single category in semantic segmentation GT occupies more\n # than `SINGLE_CATEGORY_MAX_AREA` part of the crop.\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0\n # Pad image and segmentation GT in dataset mapper.\n cfg.INPUT.SIZE_DIVISIBILITY = -1\n\n # solver config\n # weight decay on embedding\n cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0\n # optimizer\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1\n\n # mask_former model config\n cfg.MODEL.MASK_FORMER = CN()\n\n # loss\n cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True\n cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1\n cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0\n\n # transformer config\n cfg.MODEL.MASK_FORMER.NHEADS = 8\n cfg.MODEL.MASK_FORMER.DROPOUT = 0.1\n cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048\n cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0\n cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6\n cfg.MODEL.MASK_FORMER.PRE_NORM = False\n\n cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256\n cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100\n\n cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = \"res5\"\n cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False\n\n # mask_former inference config\n cfg.MODEL.MASK_FORMER.TEST = CN()\n cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True\n cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False\n cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False\n cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False\n\n # Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)\n # you can use this config to override\n cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32\n\n # pixel decoder config\n cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256\n # adding transformer in pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0\n # pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = \"BasePixelDecoder\"\n\n # swin transformer backbone\n cfg.MODEL.SWIN = CN()\n cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224\n cfg.MODEL.SWIN.PATCH_SIZE = 4\n cfg.MODEL.SWIN.EMBED_DIM = 96\n cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]\n cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]\n cfg.MODEL.SWIN.WINDOW_SIZE = 7\n cfg.MODEL.SWIN.MLP_RATIO = 4.0\n cfg.MODEL.SWIN.QKV_BIAS = True\n cfg.MODEL.SWIN.QK_SCALE = None\n cfg.MODEL.SWIN.DROP_RATE = 0.0\n cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0\n cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3\n cfg.MODEL.SWIN.APE = False\n cfg.MODEL.SWIN.PATCH_NORM = True\n cfg.MODEL.SWIN.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SWIN.USE_CHECKPOINT = False\n\n # NOTE: maskformer2 extra configs\n # transformer module\n cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = (\n \"MultiScaleMaskedTransformerDecoder\"\n )\n\n # LSJ aug\n cfg.INPUT.IMAGE_SIZE = 1024\n cfg.INPUT.MIN_SCALE = 0.1\n cfg.INPUT.MAX_SCALE = 2.0\n\n # MSDeformAttn encoder configs\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = [\n \"res3\",\n \"res4\",\n \"res5\",\n ]\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8\n\n # point loss configs\n # Number of points sampled during training for a mask point head.\n cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112\n # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the\n # original paper.\n cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0\n # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in\n # the original paper.\n cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75\n\n # Resizing disabled for Synthia\n cfg.INPUT.RESIZE = CN()\n cfg.INPUT.RESIZE.ENABLED = True\n cfg.INPUT.RESIZE.SIZE_TRAIN = (1280, 720)\n\n # Saving Pseudo Labels during test time\n cfg.MODEL.SAVE_PSEUDO_LABELS = False\n\n # for the Dataset repeat factor\n # cfg.DATASETS.TRAIN_REPEAT_FACTOR = [(\"sd_v99\",5.0), (\"cityscapes_train\",1.0)]" }, { "identifier": "add_clouds_config", "path": "clouds/config.py", "snippet": "def add_clouds_config(cfg):\n # CLOUDS model config\n cfg.MODEL.CLOUDS = CN()\n cfg.MODEL.CLOUDS.CLIP_MODEL_NAME = \"convnext_large_d_320\"\n cfg.MODEL.CLOUDS.CLIP_PRETRAINED_WEIGHTS = \"laion2b_s29b_b131k_ft_soup\"\n cfg.MODEL.CLOUDS.EMBED_DIM = 768\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_ALPHA = 0.4\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_BETA = 0.8\n cfg.MODEL.CLOUDS.ENSEMBLE_ON_VALID_MASK = False\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE = False\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_EMA = False\n cfg.MODEL.CLOUDS.SAM = CN()\n cfg.MODEL.CLOUDS.SAM.ENABLED = False\n cfg.MODEL.CLOUDS.SAM.MOBILE = True\n cfg.MODEL.CLOUDS.SAM.MINIBATCH = False\n cfg.MODEL.CLOUDS.SAM.SIZE_THRESHOLD = 5000\n cfg.MODEL.CLOUDS.SAM.EROSION = False\n cfg.MODEL.CLOUDS.SAM.EROSION_SIZE = 3\n cfg.MODEL.CLOUDS.SAM.NUM_POINTS = 5\n cfg.MODEL.CLOUDS.SAM.SELECTION_MODE = \"random\"\n cfg.MODEL.CLOUDS.SAM.RM_INTERSECTION = True\n cfg.MODEL.CLOUDS.SAM.REFINEMENT = False\n cfg.MODEL.CLOUDS.SAM.ALPHA_EMA = 0.999\n cfg.MODEL.CLOUDS.OVERWRITING = True\n cfg.MODEL.CLOUDS.ITERATION_UPDATE = 100" }, { "identifier": "add_wandb_config", "path": "clouds/config.py", "snippet": "def add_wandb_config(cfg):\n # Wandb\n cfg.WANDB = CN()\n cfg.WANDB.PROJECT = \"clouds\"\n cfg.WANDB.NAME = None\n # use flash attention\n cfg.MODEL.FLASH = False" }, { "identifier": "add_prerocessing_training_set_config", "path": "clouds/config.py", "snippet": "def add_prerocessing_training_set_config(cfg):\n cfg.INPUT.FLIP = True\n cfg.INPUT.INITIAL_HEIGHT = 1052\n cfg.INPUT.INITIAL_WIDTH = 1914\n cfg.INPUT.RESIZE_HEIGHT = 720\n cfg.INPUT.RESIZE_WIDTH = 1280\n cfg.INPUT.PL_THRESHOLD = 0.0\n\n cfg.DATASETS.SOURCE_FACTOR = 1.0\n cfg.DATASETS.TARGET_FACTOR = 1.0" }, { "identifier": "add_repeat_factors", "path": "clouds/config.py", "snippet": "def add_repeat_factors(cfg):\n # for the Dataset repeat factor\n if (\n len(cfg.DATASETS.TRAIN) == 2\n and cfg.DATALOADER.SAMPLER_TRAIN == \"WeightedTrainingSampler\"\n ):\n if \"sd\" in cfg.DATASETS.TRAIN[0]:\n target_dataset = cfg.DATASETS.TRAIN[0]\n source_dataset = cfg.DATASETS.TRAIN[1]\n else:\n target_dataset = cfg.DATASETS.TRAIN[1]\n source_dataset = cfg.DATASETS.TRAIN[0]\n\n TRAIN_REPEAT_FACTOR = [\n (target_dataset, cfg.DATASETS.TARGET_FACTOR),\n (source_dataset, cfg.DATASETS.SOURCE_FACTOR),\n ]\n cfg.DATASETS.TRAIN_REPEAT_FACTOR = TRAIN_REPEAT_FACTOR\n return cfg\n else:\n return cfg" }, { "identifier": "MapperTrain", "path": "clouds/data/dataset_mappers/mapper_train.py", "snippet": "class MapperTrain:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for semantic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations_src,\n augmentations_sd,\n augmentations_photo,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens_src = augmentations_src\n self.tfm_gens_sd = augmentations_sd\n self.tfm_gens_photometric = augmentations_photo\n self.img_format = image_format\n self.ignore_label = ignore_label\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(\n f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations_src}\"\n )\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n augs_src = []\n augs_sd = []\n augs_photometric = []\n # Build augmentation\n if cfg.INPUT.RESIZE.ENABLED:\n augs_src.append(\n T.ResizeScale(\n min_scale=0.5,\n max_scale=2.0,\n target_height=cfg.INPUT.INITIAL_HEIGHT,\n target_width=cfg.INPUT.INITIAL_WIDTH,\n interp=Image.BILINEAR,\n )\n )\n if cfg.INPUT.CROP.ENABLED:\n augs_src.append(\n T.FixedSizeCrop(\n (768, 768),\n pad=True,\n seg_pad_value=255,\n pad_value=0,\n )\n )\n if cfg.INPUT.COLOR_AUG_SSD:\n augs_src.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n augs_photometric.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n if cfg.INPUT.FLIP:\n augs_src.append(T.RandomFlip())\n augs_sd.append(T.RandomFlip())\n\n # Assume always applies to the training set.\n dataset_names = cfg.DATASETS.TRAIN\n meta = MetadataCatalog.get(dataset_names[0])\n ignore_label = meta.ignore_label\n\n ret = {\n \"is_train\": is_train,\n \"augmentations_src\": augs_src,\n \"augmentations_sd\": augs_sd,\n \"augmentations_photo\": augs_photometric,\n \"image_format\": cfg.INPUT.FORMAT,\n \"ignore_label\": ignore_label,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert (\n self.is_train\n ), \"MaskFormerSemanticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\n \"double\"\n )\n else:\n sem_seg_gt = np.full(\n (dataset_dict[\"height\"], dataset_dict[\"width\"]), self.ignore_label\n ).astype(\"double\")\n\n if sem_seg_gt is None:\n raise ValueError(\n \"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n if not (\"generated\" in str(dataset_dict[\"image_id\"])):\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens_src, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n else:\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens_sd, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n aug_input_photo, transforms = T.apply_transform_gens(\n self.tfm_gens_photometric, aug_input\n )\n image_aug = aug_input_photo.image\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if \"generated\" in str(dataset_dict[\"image_id\"]):\n image_aug = torch.as_tensor(\n np.ascontiguousarray(image_aug.transpose(2, 0, 1))\n )\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if \"generated\" in str(dataset_dict[\"image_id\"]):\n image_aug = F.pad(image_aug, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(\n sem_seg_gt, padding_size, value=self.ignore_label\n ).contiguous()\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n if \"generated\" in str(dataset_dict[\"image_id\"]):\n dataset_dict[\"image_aug\"] = image_aug\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\n \"Semantic segmentation dataset should not have 'annotations'.\"\n )\n\n # Prepare per-category binary masks\n if sem_seg_gt is not None:\n sem_seg_gt = sem_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = np.unique(sem_seg_gt)\n # remove ignored region\n classes = classes[classes != self.ignore_label]\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n\n masks = []\n for class_id in classes:\n masks.append(sem_seg_gt == class_id)\n\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros(\n (0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1])\n )\n else:\n masks = BitMasks(\n torch.stack(\n [\n torch.from_numpy(np.ascontiguousarray(x.copy()))\n for x in masks\n ]\n )\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MapperTest", "path": "clouds/data/dataset_mappers/mapper_test.py", "snippet": "class MapperTest:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by the model.\n\n This is the default callable to be used to map your dataset dict into training data.\n You may need to follow it to implement your own one for customized logic,\n such as a different way to read or transform images.\n See :doc:`/tutorials/data_loading` for details.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies cropping/geometric transforms to the image and annotations\n 3. Prepare data and annotations to Tensor and :class:`Instances`\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train: bool,\n *,\n augmentations: List[Union[T.Augmentation, T.Transform]],\n image_format: str,\n\n ):\n \"\"\"\n NOTE: this interface is experimental.\n\n Args:\n is_train: whether it's used in training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n # if recompute_boxes:\n # assert use_instance_mask, \"recompute_boxes requires instance masks\"\n # fmt: off\n self.is_train = is_train\n self.augmentations = augmentations\n self.image_format = image_format\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[DatasetMapper] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train: bool = True):\n augs = [T.ResizeShortestEdge(short_edge_length=[1024], sample_style=\"choice\")]\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n\n\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n # USER: Write your own image loading if it's not from a file\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.image_format)\n utils.check_image_size(dataset_dict, image)\n\n # USER: Remove if you don't do semantic/panoptic segmentation.\n if \"sem_seg_file_name\" in dataset_dict:\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\"), \"L\").squeeze(2)\n else:\n sem_seg_gt = None\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transformation = T.apply_transform_gens(self.augmentations, aug_input)\n image, sem_seg_gt = aug_input.image, aug_input.sem_seg\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n dataset_dict['height'] = dataset_dict[\"image\"].shape[1]\n dataset_dict['width'] = dataset_dict[\"image\"].shape[2]\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"sem_seg_file_name\", None)\n return dataset_dict\n\n return dataset_dict" }, { "identifier": "CityscapesSemSegEvaluator", "path": "clouds/evaluation/cityscapes_evaluation.py", "snippet": "class CityscapesSemSegEvaluator(CityscapesEvaluator):\n \"\"\"\n Evaluate semantic segmentation results on cityscapes dataset using cityscapes API.\n\n Note:\n * It does not work in multi-machine distributed training.\n * It contains a synchronization, therefore has to be used on all ranks.\n * Only the main process runs evaluation.\n \"\"\"\n\n def process(self, inputs, outputs):\n from cityscapesscripts.helpers.labels import trainId2label\n for input, output in zip(inputs, outputs):\n file_name = input[\"file_name\"]\n basename = os.path.splitext(os.path.basename(file_name))[0]\n pred_filename = os.path.join(self._temp_dir, basename + \"_pred.png\")\n\n output = output[\"sem_seg\"].argmax(dim=0).to(self._cpu_device).numpy()\n pred = 255 * np.ones(output.shape, dtype=np.uint8)\n for train_id, label in trainId2label.items():\n if label.ignoreInEval:\n continue\n pred[output == train_id] = label.id\n Image.fromarray(pred).save(pred_filename)\n\n\n def evaluate(self):\n comm.synchronize()\n if comm.get_rank() > 0:\n return\n # Load the Cityscapes eval script *after* setting the required env var,\n # since the script reads CITYSCAPES_DATASET into global variables at load time.\n import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval\n\n self._logger.info(\"Evaluating results under {} ...\".format(self._temp_dir))\n\n # set some global states in cityscapes evaluation API, before evaluating\n cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)\n cityscapes_eval.args.predictionWalk = None\n cityscapes_eval.args.JSONOutput = False\n cityscapes_eval.args.colorized = False\n\n # These lines are adopted from\n # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py # noqa\n gt_dir = PathManager.get_local_path(self._metadata.gt_dir)\n groundTruthImgList = glob.glob(\n os.path.join(gt_dir, \"*\", \"*_gtFine_labelIds.png\")\n )\n assert len(\n groundTruthImgList\n ), \"Cannot find any ground truth images to use for evaluation. Searched for: {}\".format(\n cityscapes_eval.args.groundTruthSearch\n )\n predictionImgList = []\n for gt in groundTruthImgList:\n predictionImgList.append(\n cityscapes_eval.getPrediction(cityscapes_eval.args, gt)\n )\n results = cityscapes_eval.evaluateImgLists(\n predictionImgList, groundTruthImgList, cityscapes_eval.args\n )\n ret = OrderedDict()\n ret[\"sem_seg\"] = {\n \"mIoU\": 100.0 * results[\"averageScoreClasses\"],\n \"IoU.road\": 100.0 * results[\"classScores\"][\"road\"],\n \"IoU.sidewalk\": 100.0 * results[\"classScores\"][\"sidewalk\"],\n \"IoU.building\": 100.0 * results[\"classScores\"][\"building\"],\n \"IoU.wall\": 100.0 * results[\"classScores\"][\"wall\"],\n \"IoU.fence\": 100.0 * results[\"classScores\"][\"fence\"],\n \"IoU.pole\": 100.0 * results[\"classScores\"][\"pole\"],\n \"IoU.traffic light\": 100.0 * results[\"classScores\"][\"traffic light\"],\n \"IoU.traffic sign\": 100.0 * results[\"classScores\"][\"traffic sign\"],\n \"IoU.vegetation\": 100.0 * results[\"classScores\"][\"vegetation\"],\n \"IoU.terrain\": 100.0 * results[\"classScores\"][\"terrain\"],\n \"IoU.sky\": 100.0 * results[\"classScores\"][\"sky\"],\n \"IoU.person\": 100.0 * results[\"classScores\"][\"person\"],\n \"IoU.rider\": 100.0 * results[\"classScores\"][\"rider\"],\n \"IoU.car\": 100.0 * results[\"classScores\"][\"car\"],\n \"IoU.truck\": 100.0 * results[\"classScores\"][\"truck\"],\n \"IoU.bus\": 100.0 * results[\"classScores\"][\"bus\"],\n \"IoU.train\": 100.0 * results[\"classScores\"][\"train\"],\n \"IoU.motorcycle\": 100.0 * results[\"classScores\"][\"motorcycle\"],\n \"IoU.bicycle\": 100.0 * results[\"classScores\"][\"bicycle\"],\n }\n if not self._save_pl:\n self._working_dir.cleanup()\n return ret" }, { "identifier": "ClassicalSemSegEvaluator", "path": "clouds/evaluation/semantic_evaluation.py", "snippet": "class ClassicalSemSegEvaluator(DatasetEvaluator):\n \"\"\"\n Evaluate semantic segmentation metrics.\n \"\"\"\n\n def __init__(\n self,\n dataset_name,\n distributed=True,\n output_dir=None,\n *,\n sem_seg_loading_fn=load_image_into_numpy_array,\n num_classes=None,\n ignore_label=None,\n save_pl=False,\n ):\n \"\"\"\n Args:\n dataset_name (str): name of the dataset to be evaluated.\n distributed (bool): if True, will collect results from all ranks for evaluation.\n Otherwise, will evaluate the results in the current process.\n output_dir (str): an output directory to dump results.\n sem_seg_loading_fn: function to read sem seg file and load into numpy array.\n Default provided, but projects can customize.\n num_classes, ignore_label: deprecated argument\n \"\"\"\n self._logger = logging.getLogger(__name__)\n if num_classes is not None:\n self._logger.warn(\n \"SemSegEvaluator(num_classes) is deprecated! It should be obtained from metadata.\"\n )\n if ignore_label is not None:\n self._logger.warn(\n \"SemSegEvaluator(ignore_label) is deprecated! It should be obtained from metadata.\"\n )\n self._dataset_name = dataset_name\n self._distributed = distributed\n self._output_dir = output_dir\n\n self._cpu_device = torch.device(\"cpu\")\n\n self.input_file_to_gt_file = {\n dataset_record[\"file_name\"]: dataset_record[\"sem_seg_file_name\"]\n for dataset_record in DatasetCatalog.get(dataset_name)\n }\n\n meta = MetadataCatalog.get(dataset_name)\n # Dict that maps contiguous training ids to COCO category ids\n try:\n c2d = meta.stuff_dataset_id_to_contiguous_id\n self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()}\n except AttributeError:\n self._contiguous_id_to_dataset_id = None\n self._class_names = meta.stuff_classes\n self.sem_seg_loading_fn = sem_seg_loading_fn\n self._num_classes = len(meta.stuff_classes)\n if num_classes is not None:\n assert (\n self._num_classes == num_classes\n ), f\"{self._num_classes} != {num_classes}\"\n self._ignore_label = (\n ignore_label if ignore_label is not None else meta.ignore_label\n )\n\n # This is because cv2.erode did not work for int datatype. Only works for uint8.\n self._compute_boundary_iou = True\n if not _CV2_IMPORTED:\n self._compute_boundary_iou = False\n self._logger.warn(\n \"\"\"Boundary IoU calculation requires OpenCV. B-IoU metrics are\n not going to be computed because OpenCV is not available to import.\"\"\"\n )\n if self._num_classes >= np.iinfo(np.uint8).max:\n self._compute_boundary_iou = False\n self._logger.warn(\n f\"\"\"SemSegEvaluator(num_classes) is more than supported value for Boundary IoU calculation!\n B-IoU metrics are not going to be computed. Max allowed value (exclusive)\n for num_classes for calculating Boundary IoU is {np.iinfo(np.uint8).max}.\n The number of classes of dataset {self._dataset_name} is {self._num_classes}\"\"\"\n )\n self._save_pl = save_pl\n\n def reset(self):\n self._conf_matrix = np.zeros(\n (self._num_classes + 1, self._num_classes + 1), dtype=np.int64\n )\n self._b_conf_matrix = np.zeros(\n (self._num_classes + 1, self._num_classes + 1), dtype=np.int64\n )\n self._predictions = []\n\n def process(self, inputs, outputs):\n \"\"\"\n Args:\n inputs: the inputs to a model.\n It is a list of dicts. Each dict corresponds to an image and\n contains keys like \"height\", \"width\", \"file_name\".\n outputs: the outputs of a model. It is either list of semantic segmentation predictions\n (Tensor [H, W]) or list of dicts with key \"sem_seg\" that contains semantic\n segmentation prediction in the same format.\n \"\"\"\n for input, output in zip(inputs, outputs):\n output = output[\"sem_seg\"].argmax(dim=0).to(self._cpu_device)\n pred = np.array(output, dtype=int)\n gt = input[\"sem_seg\"].numpy()\n\n gt[gt == self._ignore_label] = self._num_classes\n\n self._conf_matrix += np.bincount(\n (self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1),\n minlength=self._conf_matrix.size,\n ).reshape(self._conf_matrix.shape)\n\n if self._compute_boundary_iou:\n b_gt = self._mask_to_boundary(gt.astype(np.uint8))\n b_pred = self._mask_to_boundary(pred.astype(np.uint8))\n\n self._b_conf_matrix += np.bincount(\n (self._num_classes + 1) * b_pred.reshape(-1) + b_gt.reshape(-1),\n minlength=self._conf_matrix.size,\n ).reshape(self._conf_matrix.shape)\n\n if self._save_pl:\n self._predictions.extend(\n [dict(file_name=input[\"file_name\"], pred=pred)]\n )\n else:\n self._predictions.extend(\n self.encode_json_sem_seg(pred, input[\"file_name\"])\n )\n\n def evaluate(self):\n \"\"\"\n Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval):\n\n * Mean intersection-over-union averaged across classes (mIoU)\n * Frequency Weighted IoU (fwIoU)\n * Mean pixel accuracy averaged across classes (mACC)\n * Pixel Accuracy (pACC)\n \"\"\"\n if self._distributed:\n synchronize()\n conf_matrix_list = all_gather(self._conf_matrix)\n b_conf_matrix_list = all_gather(self._b_conf_matrix)\n self._predictions = all_gather(self._predictions)\n self._predictions = list(itertools.chain(*self._predictions))\n if not is_main_process():\n return\n\n self._conf_matrix = np.zeros_like(self._conf_matrix)\n for conf_matrix in conf_matrix_list:\n self._conf_matrix += conf_matrix\n\n self._b_conf_matrix = np.zeros_like(self._b_conf_matrix)\n for b_conf_matrix in b_conf_matrix_list:\n self._b_conf_matrix += b_conf_matrix\n\n if self._output_dir:\n first_elem = self._predictions[0]\n if \"bdd\" in first_elem[\"file_name\"]:\n self._output_dir = os.path.join(self._output_dir, \"bdd_eval_pl\")\n elif \"mapillary\" in first_elem[\"file_name\"]:\n self._output_dir = os.path.join(self._output_dir, \"mapillary_eval_pl\")\n PathManager.mkdirs(self._output_dir)\n if self._save_pl:\n # A function that will iterate over the list of dictionnaries and write the corresponding image\n # in the output directory\n def write_image_from_dict(dict):\n filename = os.path.join(\n self._output_dir,\n dict[\"file_name\"].split(\"/\")[-1].split(\".\")[0] + \"_pred.png\",\n )\n pred = dict[\"pred\"]\n pred = get_rgb_from_semantic_map_maxed(pred)\n # pred = Image.fromarray(pred)\n pred.save(filename)\n\n # We apply the function to the list of dictionnaries\n list(map(write_image_from_dict, self._predictions))\n\n else:\n file_path = os.path.join(self._output_dir, \"sem_seg_predictions.json\")\n with PathManager.open(file_path, \"w\") as f:\n f.write(json.dumps(self._predictions))\n\n acc = np.full(self._num_classes, np.nan, dtype=float)\n iou = np.full(self._num_classes, np.nan, dtype=float)\n tp = self._conf_matrix.diagonal()[:-1].astype(float)\n pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(float)\n class_weights = pos_gt / np.sum(pos_gt)\n pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(float)\n acc_valid = pos_gt > 0\n acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid]\n union = pos_gt + pos_pred - tp\n iou_valid = np.logical_and(acc_valid, union > 0)\n iou[iou_valid] = tp[iou_valid] / union[iou_valid]\n macc = np.sum(acc[acc_valid]) / np.sum(acc_valid)\n miou = np.sum(iou[iou_valid]) / np.sum(iou_valid)\n fiou = np.sum(iou[iou_valid] * class_weights[iou_valid])\n pacc = np.sum(tp) / np.sum(pos_gt)\n\n if self._compute_boundary_iou:\n b_iou = np.full(self._num_classes, np.nan, dtype=float)\n b_tp = self._b_conf_matrix.diagonal()[:-1].astype(float)\n b_pos_gt = np.sum(self._b_conf_matrix[:-1, :-1], axis=0).astype(float)\n b_pos_pred = np.sum(self._b_conf_matrix[:-1, :-1], axis=1).astype(float)\n b_union = b_pos_gt + b_pos_pred - b_tp\n b_iou_valid = b_union > 0\n b_iou[b_iou_valid] = b_tp[b_iou_valid] / b_union[b_iou_valid]\n\n res = {}\n res[\"mIoU\"] = 100 * miou\n res[\"fwIoU\"] = 100 * fiou\n for i, name in enumerate(self._class_names):\n res[f\"IoU-{name}\"] = 100 * iou[i]\n if self._compute_boundary_iou:\n res[f\"BoundaryIoU-{name}\"] = 100 * b_iou[i]\n res[f\"min(IoU, B-Iou)-{name}\"] = 100 * min(iou[i], b_iou[i])\n res[\"mACC\"] = 100 * macc\n res[\"pACC\"] = 100 * pacc\n for i, name in enumerate(self._class_names):\n res[f\"ACC-{name}\"] = 100 * acc[i]\n\n if self._output_dir:\n file_path = os.path.join(self._output_dir, \"sem_seg_evaluation.pth\")\n with PathManager.open(file_path, \"wb\") as f:\n torch.save(res, f)\n results = OrderedDict({\"sem_seg\": res})\n self._logger.info(results)\n\n def get_miou_value_from_dict(dict, subkey):\n for key, value in dict.items():\n if subkey in key and \"IoU\" in key:\n if np.isnan(value):\n return 0\n else:\n return value\n\n ret = OrderedDict()\n ret[\"sem_seg\"] = {\n \"mIoU\": results[\"sem_seg\"][\"mIoU\"],\n \"IoU.road\": get_miou_value_from_dict(results[\"sem_seg\"], \"road\"),\n \"IoU.sidewalk\": get_miou_value_from_dict(results[\"sem_seg\"], \"sidewalk\"),\n \"IoU.building\": get_miou_value_from_dict(results[\"sem_seg\"], \"building\"),\n \"IoU.wall\": get_miou_value_from_dict(results[\"sem_seg\"], \"wall\"),\n \"IoU.fence\": get_miou_value_from_dict(results[\"sem_seg\"], \"fence\"),\n \"IoU.pole\": get_miou_value_from_dict(results[\"sem_seg\"], \"pole\"),\n \"IoU.traffic light\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"traffic light\"\n ),\n \"IoU.traffic sign\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"traffic sign\"\n ),\n \"IoU.vegetation\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"vegetation\"\n ),\n \"IoU.terrain\": get_miou_value_from_dict(results[\"sem_seg\"], \"terrain\"),\n \"IoU.sky\": get_miou_value_from_dict(results[\"sem_seg\"], \"sky\"),\n \"IoU.person\": get_miou_value_from_dict(results[\"sem_seg\"], \"person\"),\n \"IoU.rider\": get_miou_value_from_dict(results[\"sem_seg\"], \"rider\"),\n \"IoU.car\": get_miou_value_from_dict(results[\"sem_seg\"], \"car\"),\n \"IoU.truck\": get_miou_value_from_dict(results[\"sem_seg\"], \"truck\"),\n \"IoU.bus\": get_miou_value_from_dict(results[\"sem_seg\"], \"bus\"),\n \"IoU.train\": get_miou_value_from_dict(results[\"sem_seg\"], \"train\"),\n \"IoU.motorcycle\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"motorcycle\"\n ),\n \"IoU.bicycle\": get_miou_value_from_dict(results[\"sem_seg\"], \"bicycle\"),\n }\n return ret\n\n def encode_json_sem_seg(self, sem_seg, input_file_name):\n \"\"\"\n Convert semantic segmentation to COCO stuff format with segments encoded as RLEs.\n See http://cocodataset.org/#format-results\n \"\"\"\n json_list = []\n for label in np.unique(sem_seg):\n if self._contiguous_id_to_dataset_id is not None:\n assert (\n label in self._contiguous_id_to_dataset_id\n ), \"Label {} is not in the metadata info for {}\".format(\n label, self._dataset_name\n )\n dataset_id = self._contiguous_id_to_dataset_id[label]\n else:\n dataset_id = int(label)\n mask = (sem_seg == label).astype(np.uint8)\n mask_rle = mask_util.encode(np.array(mask[:, :, None], order=\"F\"))[0]\n mask_rle[\"counts\"] = mask_rle[\"counts\"].decode(\"utf-8\")\n json_list.append(\n {\n \"file_name\": input_file_name,\n \"category_id\": dataset_id,\n \"segmentation\": mask_rle,\n }\n )\n return json_list\n\n def _mask_to_boundary(self, mask: np.ndarray, dilation_ratio=0.02):\n assert mask.ndim == 2, \"mask_to_boundary expects a 2-dimensional image\"\n h, w = mask.shape\n diag_len = np.sqrt(h ** 2 + w ** 2)\n dilation = max(1, int(round(dilation_ratio * diag_len)))\n kernel = np.ones((3, 3), dtype=np.uint8)\n\n padded_mask = cv2.copyMakeBorder(mask, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=0)\n eroded_mask_with_padding = cv2.erode(padded_mask, kernel, iterations=dilation)\n eroded_mask = eroded_mask_with_padding[1:-1, 1:-1]\n boundary = mask - eroded_mask\n return boundary" }, { "identifier": "PersoEvalHook", "path": "clouds/engine/hooks.py", "snippet": "class PersoEvalHook(HookBase):\n \"\"\"\n Run an evaluation function periodically, and at the end of training.\n\n It is executed every ``eval_period`` iterations and after the last iteration.\n \"\"\"\n\n def __init__(self, eval_period, eval_function, eval_after_train=True):\n \"\"\"\n Args:\n eval_period (int): the period to run `eval_function`. Set to 0 to\n not evaluate periodically (but still evaluate after the last iteration\n if `eval_after_train` is True).\n eval_function (callable): a function which takes no arguments, and\n returns a nested dict of evaluation metrics.\n eval_after_train (bool): whether to evaluate after the last iteration\n\n Note:\n This hook must be enabled in all or none workers.\n If you would like only certain workers to perform evaluation,\n give other workers a no-op function (`eval_function=lambda: None`).\n \"\"\"\n self._period = eval_period\n self._func = eval_function\n self._eval_after_train = eval_after_train\n\n def _do_eval(self):\n results = self._func()\n\n if results:\n assert isinstance(\n results, dict\n ), \"Eval function must return a dict. Got {} instead.\".format(results)\n\n flattened_results = flatten_results_dict(results)\n for k, v in flattened_results.items():\n try:\n v = float(v)\n except Exception as e:\n raise ValueError(\n \"[EvalHook] eval_function should return a nested dict of float. \"\n \"Got '{}: {}' instead.\".format(k, v)\n ) from e\n self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False)\n\n # Evaluation may take different time among workers.\n # A barrier make them start the next iteration together.\n comm.synchronize()\n\n def before_train(self):\n \"\"\"\n Called before the first iteration.\n \"\"\"\n if \"debug\" in self.trainer.cfg.OUTPUT_DIR:\n pass\n else:\n results = self._func()\n\n if results:\n assert isinstance(\n results, dict\n ), \"Eval function must return a dict. Got {} instead.\".format(results)\n\n flattened_results = flatten_results_dict(results)\n for k, v in flattened_results.items():\n try:\n v = float(v)\n except Exception as e:\n raise ValueError(\n \"[EvalHook] eval_function should return a nested dict of float. \"\n \"Got '{}: {}' instead.\".format(k, v)\n ) from e\n self.trainer.storage.put_scalars(\n **flattened_results, smoothing_hint=False\n )\n\n def after_step(self):\n next_iter = self.trainer.iter + 1\n if self._period > 0 and next_iter % self._period == 0:\n # do the last eval in after_train\n if next_iter != self.trainer.max_iter:\n self._do_eval()\n\n def after_train(self):\n # This condition is to prevent the eval from running after a failed training\n if self._eval_after_train and self.trainer.iter + 1 >= self.trainer.max_iter:\n self._do_eval()\n # func is likely a closure that holds reference to the trainer\n # therefore we clean it to avoid circular reference in the end\n del self._func" }, { "identifier": "WandbWriter", "path": "clouds/utils/events.py", "snippet": "class WandbWriter(EventWriter):\n \"\"\"\n Write all scalars to a tensorboard file.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Args:\n log_dir (str): the directory to save the output events\n kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)`\n \"\"\"\n self._last_write = -1\n self._group_rules = [\n (IsIn(\"/\"), BaseRule()),\n (IsIn(\"loss\"), Prefix(\"train\")),\n # (IsIn(\"sem_seg\"), Prefix(\"val\")),\n (\n IsInList([\"lr\", \"time\", \"eta_seconds\", \"rank_data_time\", \"data_time\"]),\n Prefix(\"stats\"),\n ),\n ]\n\n def write(self):\n storage = get_event_storage()\n\n def _group_name(scalar_name):\n for rule, op in self._group_rules:\n if rule(scalar_name):\n return op(scalar_name)\n return scalar_name\n\n stats = {\n _group_name(name): scalars[0]\n for name, scalars in storage.latest().items()\n if scalars[1] > self._last_write\n }\n if len(stats) > 0:\n self._last_write = max([v[1] for k, v in storage.latest().items()])\n\n # storage.put_{image,histogram} is only meant to be used by\n # tensorboard writer. So we access its internal fields directly from here.\n if len(storage._vis_data) >= 1:\n stats[\"image\"] = [\n wandb.Image(img, caption=img_name)\n for img_name, img, step_num in storage._vis_data\n ]\n # Storage stores all image data and rely on this writer to clear them.\n # As a result it assumes only one writer will use its image data.\n # An alternative design is to let storage store limited recent\n # data (e.g. only the most recent image) that all writers can access.\n # In that case a writer may not see all image data if its period is long.\n storage.clear_images()\n\n if len(storage._histograms) >= 1:\n\n def create_bar(tag, bucket_limits, bucket_counts, **kwargs):\n data = [\n [label, val] for (label, val) in zip(bucket_limits, bucket_counts)\n ]\n table = wandb.Table(data=data, columns=[\"label\", \"value\"])\n return wandb.plot.bar(table, \"label\", \"value\", title=tag)\n\n stats[\"hist\"] = [create_bar(**params) for params in storage._histograms]\n\n storage.clear_histograms()\n\n if len(stats) == 0:\n return\n wandb.log(stats, step=storage.iter)\n\n def close(self):\n wandb.finish()" }, { "identifier": "setup_wandb", "path": "clouds/utils/events.py", "snippet": "def setup_wandb(cfg, args):\n if comm.is_main_process():\n init_args = {\n k.lower(): v\n for k, v in cfg.WANDB.items()\n if isinstance(k, str) and k not in [\"config\", \"name\"]\n }\n if \"config_exclude_keys\" in init_args:\n init_args[\"config\"] = cfg\n init_args[\"config\"][\"cfg_file\"] = args.config_file\n else:\n init_args[\"config\"] = {\n \"output_dir\": cfg.OUTPUT_DIR,\n \"train\": extract_dataset_from_string(cfg.DATASETS.TRAIN),\n \"test\": extract_dataset_from_string(cfg.DATASETS.TEST),\n \"iter\": cfg.SOLVER.MAX_ITER,\n \"lr\": cfg.SOLVER.BASE_LR,\n \"batch_size\": cfg.SOLVER.IMS_PER_BATCH,\n \"cfg_file\": args.config_file,\n }\n\n init_args[\"group\"] = get_base_name(cfg)\n if cfg.WANDB.NAME is not None:\n init_args[\"name\"] = cfg.WANDB.NAME\n else:\n init_args[\"name\"] = get_full_name_xp(init_args[\"group\"], cfg)\n if \"debug\" in cfg.OUTPUT_DIR:\n init_args[\"project\"] = \"debug\"\n wandb.init(**init_args)" } ]
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import ( MetadataCatalog, build_detection_train_loader, build_detection_test_loader, ) from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.modeling import build_model from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, inference_on_dataset, print_csv_format, DatasetEvaluator, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from detectron2.engine import hooks from fvcore.nn.precise_bn import get_bn_modules from clouds import ( CityscapesSemSegEvaluator, ClassicalSemSegEvaluator, MapperTrain, MapperTest, add_maskformer2_config, add_clouds_config, add_wandb_config, add_prerocessing_training_set_config, PersoEvalHook, add_repeat_factors, ) from clouds.utils import setup_wandb, WandbWriter import warnings import copy import itertools import logging import os import ast import torch import detectron2.utils.comm as comm
14,555
evaluators (list[DatasetEvaluator] or None): if None, will call :meth:`build_evaluator`. Otherwise, must have the same length as ``cfg.DATASETS.TEST``. Returns: dict: a dict of result metrics """ logger = logging.getLogger(__name__) if isinstance(evaluators, DatasetEvaluator): evaluators = [evaluators] if evaluators is not None: assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( len(cfg.DATASETS.TEST), len(evaluators) ) results = OrderedDict() for idx, dataset_name in enumerate(cfg.DATASETS.TEST): data_loader = cls.build_test_loader(cfg, dataset_name) # When evaluators are passed in as arguments, # implicitly assume that evaluators can be created before data_loader. if evaluators is not None: evaluator = evaluators[idx] else: try: evaluator = cls.build_evaluator( cfg, dataset_name, output_folder=output_folder ) except NotImplementedError: logger.warn( "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " "or implement its `build_evaluator` method." ) results[dataset_name] = {} continue results_i = inference_on_dataset(model, data_loader, evaluator) results[dataset_name] = results_i if comm.is_main_process(): assert isinstance( results_i, dict ), "Evaluator must return a dict on the main process. Got {} instead.".format( results_i ) logger.info( "Evaluation results for {} in csv format:".format(dataset_name) ) print_csv_format(results_i) if len(results) == 1: results = list(results.values())[0] return results def build_hooks(self): """ Build a list of default hooks, including timing, evaluation, checkpointing, lr scheduling, precise BN, writing events. Returns: list[HookBase]: """ cfg = self.cfg.clone() cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN ret = [ hooks.IterationTimer(), hooks.LRScheduler(), hooks.PreciseBN( # Run at the same freq as (but before) evaluation. cfg.TEST.EVAL_PERIOD, self.model, # Build a new data loader to not affect training self.build_train_loader(cfg), cfg.TEST.PRECISE_BN.NUM_ITER, ) if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) else None, ] # Do PreciseBN before checkpointer, because it updates the model and need to # be saved by checkpointer. # This is not always the best: if checkpointing has a different frequency, # some checkpoints may have more precise statistics than others. if comm.is_main_process(): ret.append( hooks.PeriodicCheckpointer(self.checkpointer, cfg.TEST.EVAL_PERIOD * 5) ) def test_and_save_results(): self._last_eval_results = self.test(self.cfg, self.model) return self._last_eval_results # Do evaluation after checkpointer, because then if it fails, # we can use the saved checkpoint to debug. # ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) ret.append(PersoEvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) if comm.is_main_process(): # Here the default print/log frequency of each writer is used. # run writers in the end, so that evaluation metrics are written ret.append(hooks.PeriodicWriter(self.build_writers(), period=20)) return ret def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # for poly lr schedule add_deeplab_config(cfg) add_maskformer2_config(cfg) add_clouds_config(cfg) add_wandb_config(cfg) add_prerocessing_training_set_config(cfg) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) add_repeat_factors(cfg) cfg.freeze() default_setup(cfg, args) if not args.eval_only:
""" Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved. Licensed under the Apache License, Version 2.0 Reference: https://github.com/facebookresearch/Mask2Former/blob/main/train_net.py CLOUDS Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings("ignore", category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to CLOUDS. """ def build_writers(self): writers = super().build_writers() # use wandb writer instead. writers[-1] = WandbWriter() return writers @classmethod def build_model(cls, cfg): """ Returns: torch.nn.Module: It now calls :func:`detectron2.modeling.build_model`. Overwrite it if you'd like a different model. """ model = build_model(cfg) # logger = logging.getLogger(__name__) # logger.info("Model:\n{}".format(model)) return model # @classmethod # def build_model(cls, cfg): # """ # Returns: # torch.nn.Module: # # It now calls :func:`detectron2.modeling.build_model`. # Overwrite it if you'd like a different model. # """ # model = build_model(cfg) # # logger = logging.getLogger(__name__) # # logger.info("Model:\n{}".format(model)) # return model @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") else: output_folder = os.path.join(cfg.OUTPUT_DIR, output_folder, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if ( evaluator_type == "bdd_sem_seg" or evaluator_type == "mapillary_sem_seg" or evaluator_type == "acdc_sem_seg" ): evaluator_list.append( ClassicalSemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, save_pl=cfg.MODEL.SAVE_PSEUDO_LABELS, ) ) # Cityscapes if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." # return CityscapesSemSegEvaluator(dataset_name) if cfg.MODEL.SAVE_PSEUDO_LABELS: return CityscapesSemSegEvaluator( dataset_name, save_pl=True, output_dir=output_folder ) else: return CityscapesSemSegEvaluator(dataset_name) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper mapper = MapperTrain(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_test_loader(cls, cfg, dataset_name): mapper = MapperTest(cfg, False) return build_detection_test_loader( cfg, dataset_name, batch_size=1, mapper=mapper ) @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_optimizer(cls, cfg, model): weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED defaults = {} defaults["lr"] = cfg.SOLVER.BASE_LR defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module_name, module in model.named_modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue if cfg.MODEL.CLOUDS.OVERWRITING: if any( ignored_module in module_name for ignored_module in ["sem_seg_head_ema.", "sam.sam."] ): continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if "backbone" in module_name: hyperparams["lr"] = ( hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER ) if ( "relative_position_bias_table" in module_param_name or "absolute_pos_embed" in module_param_name ): print(module_param_name) hyperparams["weight_decay"] = 0.0 if isinstance(module, norm_module_types): hyperparams["weight_decay"] = weight_decay_norm if isinstance(module, torch.nn.Embedding): hyperparams["weight_decay"] = weight_decay_embed params.append({"params": [value], **hyperparams}) def maybe_add_full_model_gradient_clipping(optim): # detectron2 doesn't have full model gradient clipping now clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = ( cfg.SOLVER.CLIP_GRADIENTS.ENABLED and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" and clip_norm_val > 0.0 ) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain( *[x["params"] for x in self.param_groups] ) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def test(cls, cfg, model, output_folder=None, evaluators=None): """ Evaluate the given model. The given model is expected to already contain weights to evaluate. Args: cfg (CfgNode): model (nn.Module): evaluators (list[DatasetEvaluator] or None): if None, will call :meth:`build_evaluator`. Otherwise, must have the same length as ``cfg.DATASETS.TEST``. Returns: dict: a dict of result metrics """ logger = logging.getLogger(__name__) if isinstance(evaluators, DatasetEvaluator): evaluators = [evaluators] if evaluators is not None: assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( len(cfg.DATASETS.TEST), len(evaluators) ) results = OrderedDict() for idx, dataset_name in enumerate(cfg.DATASETS.TEST): data_loader = cls.build_test_loader(cfg, dataset_name) # When evaluators are passed in as arguments, # implicitly assume that evaluators can be created before data_loader. if evaluators is not None: evaluator = evaluators[idx] else: try: evaluator = cls.build_evaluator( cfg, dataset_name, output_folder=output_folder ) except NotImplementedError: logger.warn( "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " "or implement its `build_evaluator` method." ) results[dataset_name] = {} continue results_i = inference_on_dataset(model, data_loader, evaluator) results[dataset_name] = results_i if comm.is_main_process(): assert isinstance( results_i, dict ), "Evaluator must return a dict on the main process. Got {} instead.".format( results_i ) logger.info( "Evaluation results for {} in csv format:".format(dataset_name) ) print_csv_format(results_i) if len(results) == 1: results = list(results.values())[0] return results def build_hooks(self): """ Build a list of default hooks, including timing, evaluation, checkpointing, lr scheduling, precise BN, writing events. Returns: list[HookBase]: """ cfg = self.cfg.clone() cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN ret = [ hooks.IterationTimer(), hooks.LRScheduler(), hooks.PreciseBN( # Run at the same freq as (but before) evaluation. cfg.TEST.EVAL_PERIOD, self.model, # Build a new data loader to not affect training self.build_train_loader(cfg), cfg.TEST.PRECISE_BN.NUM_ITER, ) if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) else None, ] # Do PreciseBN before checkpointer, because it updates the model and need to # be saved by checkpointer. # This is not always the best: if checkpointing has a different frequency, # some checkpoints may have more precise statistics than others. if comm.is_main_process(): ret.append( hooks.PeriodicCheckpointer(self.checkpointer, cfg.TEST.EVAL_PERIOD * 5) ) def test_and_save_results(): self._last_eval_results = self.test(self.cfg, self.model) return self._last_eval_results # Do evaluation after checkpointer, because then if it fails, # we can use the saved checkpoint to debug. # ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) ret.append(PersoEvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) if comm.is_main_process(): # Here the default print/log frequency of each writer is used. # run writers in the end, so that evaluation metrics are written ret.append(hooks.PeriodicWriter(self.build_writers(), period=20)) return ret def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # for poly lr schedule add_deeplab_config(cfg) add_maskformer2_config(cfg) add_clouds_config(cfg) add_wandb_config(cfg) add_prerocessing_training_set_config(cfg) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) add_repeat_factors(cfg) cfg.freeze() default_setup(cfg, args) if not args.eval_only:
setup_wandb(cfg, args)
11
2023-12-15 15:40:58+00:00
24k
Ruiyuan-Zhang/CCS
multi_part_assembly/utils/wx_transformer_utilities/transformer_layer.py
[ { "identifier": "LayerNorm", "path": "multi_part_assembly/utils/wx_transformer_utilities/layer_norm.py", "snippet": "def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):\n if not export and torch.cuda.is_available() and has_fused_layernorm:\n return FusedLayerNorm(normalized_shape, eps, elementwise_affine)\n return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)" }, { "identifier": "MultiheadAttention", "path": "multi_part_assembly/utils/wx_transformer_utilities/multihead_attention.py", "snippet": "class MultiheadAttention(nn.Module):\n \"\"\"Multi-headed attention.\n\n See \"Attention Is All You Need\" for more details.\n \"\"\"\n\n def __init__(\n self,\n embed_dim,\n num_heads,\n kdim=None,\n vdim=None,\n dropout=0.0,\n bias=True,\n add_bias_kv=False,\n add_zero_attn=False,\n self_attention=False,\n encoder_decoder_attention=False,\n q_noise=0.0,\n qn_block_size=8,\n nblocks=1,\n top_k_ratio=None,\n use_value_competition=True,\n shared_memory_attention = False,\n use_topk = False,\n topk = 3,\n num_steps = 5,\n mem_slots = 4,\n null_attention = False,\n regressive = False\n ):\n super().__init__()\n self.embed_dim = embed_dim\n self.kdim = kdim if kdim is not None else embed_dim\n self.vdim = vdim if vdim is not None else embed_dim\n self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim\n\n self.num_heads = num_heads\n self.dropout_module = FairseqDropout(\n dropout, module_name=self.__class__.__name__\n )\n\n self.head_dim = embed_dim // num_heads\n self.shared_memory_attention = shared_memory_attention\n\n print('total heads', self.num_heads)\n print('head dim', self.head_dim)\n\n self.use_topk = use_topk\n self.topk = topk\n\n print('use topk?' + str(self.use_topk))\n print('topk:'+str(self.topk))\n\n assert (\n self.head_dim * num_heads == self.embed_dim\n ), \"embed_dim must be divisible by num_heads\"\n self.scaling = self.head_dim ** -0.5\n\n self.self_attention = self_attention\n self.encoder_decoder_attention = encoder_decoder_attention\n\n assert not self.self_attention or self.qkv_same_dim, (\n \"Self-attention requires query, key and \" \"value to be of the same size\"\n )\n if not self.shared_memory_attention: # 这里的共享memory_attention是什么内容呢?表示的是不在不同的layer之间共享memory吗?\n self.k_proj = quant_noise(GroupLinearLayer(self.kdim//nblocks, embed_dim//nblocks, nblocks, bias=bias), q_noise, qn_block_size)\n self.v_proj = quant_noise(GroupLinearLayer(self.vdim//nblocks, embed_dim//nblocks, nblocks, bias=bias), q_noise, qn_block_size)\n self.q_proj = quant_noise(GroupLinearLayer(embed_dim//nblocks, embed_dim//nblocks, nblocks, bias=bias), q_noise, qn_block_size)\n self.out_proj = quant_noise(GroupLinearLayer(embed_dim//nblocks, embed_dim//nblocks, nblocks, bias=bias), q_noise, qn_block_size)\n\n if add_bias_kv:\n self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))\n self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))\n if self.shared_memory_attention:\n self.bias_k_memory = Parameter(torch.Tensor(1, 1, embed_dim))\n self.bias_v_memory = Parameter(torch.Tensor(1, 1, embed_dim))\n else:\n self.bias_k = self.bias_v = None\n self.bias_k_memory = self.bias_v_memory = None\n\n self.add_zero_attn = add_zero_attn\n\n self.reset_parameters()\n\n self.onnx_trace = False\n self.tpu = False\n\n # 这里表示,如果共享memory_attention的话\n if self.shared_memory_attention:\n print('MEM SLOTS:' + str(mem_slots))\n print('Null attention:' + str(null_attention))\n print('USING SHARED MEMORY ATTENTION +++++++++')\n #self.num_heads = 1\n self.regressive = regressive\n if not regressive: \n self.relational_memory = RelationalMemory(\n mem_slots=mem_slots,\n head_size=self.head_dim , #128\n input_size=embed_dim,\n output_size=embed_dim,\n num_heads=self.num_heads, #1\n num_blocks=1,\n forget_bias=1,\n input_bias=0,\n gate_style=\"unit\",\n attention_mlp_layers=1,\n key_size=32,\n return_all_outputs=False,\n use_topk = self.use_topk,\n topk = self.topk,\n num_steps = num_steps, \n null_attention = null_attention\n )\n else:\n print('USING AUTO REGRESSIVE')\n self.relational_memory = RelationalMemoryRegressive(\n mem_slots=mem_slots,\n head_size=self.head_dim ,\n input_size=embed_dim,\n output_size=embed_dim,\n num_heads=self.num_heads,\n num_blocks=1,\n forget_bias=1,\n input_bias=0,\n gate_style=\"unit\",\n attention_mlp_layers=4,\n key_size=32,\n return_all_outputs=False,\n use_topk = self.use_topk,\n topk = self.topk,\n num_steps = num_steps,\n null_attention = False\n )\n self.memory_size = 128 #self.head_dim * self.num_heads\n '''\n self.mem_att = MHAMemory(\n n_head=4,\n d_model_read=embed_dim,\n d_model_write=self.memory_size,\n d_model_out=embed_dim,\n d_k=32,\n d_v=32,\n grad_sparse=False,\n )\n '''\n self.memory = None # 因为要共享self.memory,所以这里是为了占个位置\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n def prepare_for_tpu_(self, **kwargs):\n self.tpu = True\n\n def reset_parameters(self):\n if self.qkv_same_dim:\n # Empirically observed the convergence to be much better with\n # the scaled initialization\n nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))\n if self.shared_memory_attention:\n nn.init.xavier_uniform_(self.k_proj_memory.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.v_proj_memory.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.q_proj_memory.weight, gain=1 / math.sqrt(2))\n\n else:\n nn.init.xavier_uniform_(self.k_proj.weight)\n nn.init.xavier_uniform_(self.v_proj.weight)\n nn.init.xavier_uniform_(self.q_proj.weight)\n\n #if self.shared_memory_attention:\n # nn.init.xavier_uniform_(self.k_proj_memory.weight)\n # nn.init.xavier_uniform_(self.v_proj_memory.weight)\n # nn.init.xavier_uniform_(self.q_proj_memory.weight)\n\n nn.init.xavier_uniform_(self.out_proj.weight)\n #if self.shared_memory_attention:\n # nn.init.xavier_uniform_(self.out_proj_memory.weight)\n \n if self.out_proj.bias is not None:\n nn.init.constant_(self.out_proj.bias, 0.)\n\n #if self.shared_memory_attention and self.out_proj_memory.bias is not None:\n # nn.init.constant_(self.out_proj.bias, 0.)\n \n if self.bias_k is not None:\n nn.init.xavier_normal_(self.bias_k)\n if self.bias_v is not None:\n nn.init.xavier_normal_(self.bias_v)\n\n #if self.shared_memory_attention:\n # if self.bias_k is not None:\n # nn.init.xavier_normal_(self.bias_k_memory)\n # if self.bias_v is not None:\n # nn.init.xavier_normal_(self.bias_v_memory)\n\n\n def forward(\n self,\n query,\n key: Optional[Tensor],\n value: Optional[Tensor],\n key_padding_mask: Optional[Tensor] = None,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,\n need_weights: bool = True,\n static_kv: bool = False,\n attn_mask: Optional[Tensor] = None,\n before_softmax: bool = False,\n need_head_weights: bool = False,\n comp = None,\n memory = None\n ) -> Tuple[Tensor, Optional[Tensor]]:\n \"\"\"Input shape: Time x Batch x Channel\n\n Args:\n key_padding_mask (ByteTensor, optional): mask to exclude\n keys that are pads, of shape `(batch, src_len)`, where\n padding elements are indicated by 1s.\n need_weights (bool, optional): return the attention weights,\n averaged over heads (default: False).\n attn_mask (ByteTensor, optional): typically used to\n implement causal attention, where the mask prevents the\n attention from looking forward in time (default: None).\n before_softmax (bool, optional): return the raw attention\n weights and values before the attention softmax.\n need_head_weights (bool, optional): return the attention\n weights for each head. Implies *need_weights*. Default:\n return the average attention weights over all heads.\n \"\"\"\n if need_head_weights:\n need_weights = True\n\n tgt_len, bsz, embed_dim = query.size()\n assert embed_dim == self.embed_dim\n assert list(query.size()) == [tgt_len, bsz, embed_dim]\n\n if (\n not self.onnx_trace\n and not self.tpu # don't use PyTorch version on TPUs\n and incremental_state is None\n and not static_kv\n # A workaround for quantization to work. Otherwise JIT compilation\n # treats bias in linear module as method.\n and not torch.jit.is_scripting()\n and False\n ):\n assert key is not None and value is not None\n if self.shared_memory_attention:\n memory,_ = F.multi_head_attention_forward(\n memory,\n key,\n value,\n self.embed_dim,\n self.num_heads,\n torch.empty([0]),\n torch.cat((self.q_proj_memory.bias, self.k_proj.bias, self.v_proj.bias)),\n self.bias_k,\n self.bias_v,\n self.add_zero_attn,\n self.dropout_module.p,\n self.out_proj_memory.weight,\n self.out_proj_memory.bias,\n self.training or self.dropout_module.apply_during_inference,\n key_padding_mask,\n need_weights,\n attn_mask,\n use_separate_proj_weight=True,\n q_proj_weight=self.q_proj_memory.weight,\n k_proj_weight=self.k_proj.weight,\n v_proj_weight=self.v_proj.weight,\n )\n out,weights = F.multi_head_attention_forward(\n query,\n memory,\n memory,\n self.embed_dim,\n self.num_heads,\n torch.empty([0]),\n torch.cat((self.q_proj.bias, self.k_proj_memory.bias, self.v_proj_memory.bias)),\n self.bias_k_memory,\n self.bias_v_memory,\n self.add_zero_attn,\n self.dropout_module.p,\n self.out_proj.weight,\n self.out_proj.bias,\n self.training or self.dropout_module.apply_during_inference,\n key_padding_mask,\n need_weights,\n attn_mask,\n use_separate_proj_weight=True,\n q_proj_weight=self.q_proj.weight,\n k_proj_weight=self.k_proj_memory.weight,\n v_proj_weight=self.v_proj_memory.weight,\n )\n else:\n out, weights = F.multi_head_attention_forward(\n query,\n key,\n value,\n self.embed_dim,\n self.num_heads,\n torch.empty([0]),\n torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),\n self.bias_k,\n self.bias_v,\n self.add_zero_attn,\n self.dropout_module.p,\n self.out_proj.weight,\n self.out_proj.bias,\n self.training or self.dropout_module.apply_during_inference,\n key_padding_mask,\n need_weights,\n attn_mask,\n use_separate_proj_weight=True,\n q_proj_weight=self.q_proj.weight,\n k_proj_weight=self.k_proj.weight,\n v_proj_weight=self.v_proj.weight,\n\n ) \n\n return out, memory, weights\n\n if incremental_state is not None:\n saved_state = self._get_input_buffer(incremental_state)\n if saved_state is not None and \"prev_key\" in saved_state:\n # previous time steps are cached - no need to recompute\n # key and value if they are static\n if static_kv:\n assert self.encoder_decoder_attention and not self.self_attention\n key = value = None\n else:\n saved_state = None\n\n # 如果不共享memory attention\n if not self.shared_memory_attention:\n\n t1 = time.time()\n\n if self.self_attention:\n q = self.q_proj(query)\n k = self.k_proj(query)\n v = self.v_proj(query)\n elif self.encoder_decoder_attention:\n # encoder-decoder attention\n q = self.q_proj(query)\n if key is None:\n assert value is None\n k = v = None\n else:\n k = self.k_proj(key)\n v = self.v_proj(key)\n\n else:\n assert key is not None and value is not None\n \n q = self.q_proj(query)\n k = self.k_proj(key)\n v = self.v_proj(value)\n\n if comp is not None:\n v = v * comp\n #v_memory = v_memory * comp\n q *= self.scaling\n #q_memory *= self.scaling\n\n if self.bias_k is not None:\n assert self.bias_v is not None\n k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])\n v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])\n if attn_mask is not None:\n attn_mask = torch.cat(\n [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1\n )\n if key_padding_mask is not None:\n key_padding_mask = torch.cat(\n [\n key_padding_mask,\n key_padding_mask.new_zeros(key_padding_mask.size(0), 1),\n ],\n dim=1,\n )\n\n q = (\n q.contiguous()\n .view(tgt_len, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n if k is not None:\n k = (\n k.contiguous()\n .view(-1, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n if v is not None:\n v = (\n v.contiguous()\n .view(-1, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n\n \n if saved_state is not None:\n # saved states are stored with shape (bsz, num_heads, seq_len, head_dim)\n if \"prev_key\" in saved_state:\n _prev_key = saved_state[\"prev_key\"]\n assert _prev_key is not None\n prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)\n if static_kv:\n k = prev_key\n else:\n assert k is not None\n k = torch.cat([prev_key, k], dim=1)\n if \"prev_value\" in saved_state:\n _prev_value = saved_state[\"prev_value\"]\n assert _prev_value is not None\n prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)\n if static_kv:\n v = prev_value\n else:\n assert v is not None\n v = torch.cat([prev_value, v], dim=1)\n prev_key_padding_mask: Optional[Tensor] = None\n if \"prev_key_padding_mask\" in saved_state:\n prev_key_padding_mask = saved_state[\"prev_key_padding_mask\"]\n assert k is not None and v is not None\n key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(\n key_padding_mask=key_padding_mask,\n prev_key_padding_mask=prev_key_padding_mask,\n batch_size=bsz,\n src_len=k.size(1),\n static_kv=static_kv,\n )\n\n saved_state[\"prev_key\"] = k.view(bsz, self.num_heads, -1, self.head_dim)\n saved_state[\"prev_value\"] = v.view(bsz, self.num_heads, -1, self.head_dim)\n saved_state[\"prev_key_padding_mask\"] = key_padding_mask\n # In this branch incremental_state is never None\n assert incremental_state is not None\n incremental_state = self._set_input_buffer(incremental_state, saved_state)\n assert k is not None\n src_len = k.size(1)\n\n # This is part of a workaround to get around fork/join parallelism\n # not supporting Optional types.\n if key_padding_mask is not None and key_padding_mask.dim() == 0:\n key_padding_mask = None\n\n if key_padding_mask is not None:\n assert key_padding_mask.size(0) == bsz\n assert key_padding_mask.size(1) == src_len\n\n if self.add_zero_attn:\n assert v is not None\n src_len += 1\n k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)\n v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)\n if attn_mask is not None:\n attn_mask = torch.cat(\n [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1\n )\n if key_padding_mask is not None:\n key_padding_mask = torch.cat(\n [\n key_padding_mask,\n torch.zeros(key_padding_mask.size(0), 1).type_as(\n key_padding_mask\n ),\n ],\n dim=1,\n )\n\n attn_weights = torch.bmm(q, k.transpose(1, 2))\n attn_weights = MultiheadAttention.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)\n\n assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]\n\n if attn_mask is not None:\n attn_mask = attn_mask.unsqueeze(0)\n if self.onnx_trace:\n attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)\n attn_weights += attn_mask\n\n if key_padding_mask is not None:\n # don't attend to padding symbols\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n if not self.tpu:\n attn_weights = attn_weights.masked_fill(\n key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),\n float(\"-inf\")\n )\n else:\n attn_weights = attn_weights.transpose(0, 2)\n attn_weights = attn_weights.masked_fill(key_padding_mask, float('-inf'))\n attn_weights = attn_weights.transpose(0, 2)\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n if before_softmax:\n return attn_weights, v\n \n # 是这个\n attn_weights_float = utils.softmax(\n attn_weights, dim=-1, onnx_trace=self.onnx_trace\n )\n attn_weights = attn_weights_float.type_as(attn_weights)\n attn_probs = self.dropout_module(attn_weights)\n\n assert v is not None\n if self.use_topk:\n k = torch.topk(attn_probs, dim = 2, k = self.topk)\n mask = torch.zeros(attn_probs.size()).to(attn_probs.device)\n mask.scatter_(2, k.indices, 1)\n attn_probs = attn_probs * mask\n attn = torch.bmm(attn_probs, v)\n assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]\n if self.onnx_trace and attn.size(1) == 1:\n # when ONNX tracing a single decoder step (sequence length == 1)\n # the transpose is a no-op copy before view, thus unnecessary\n attn = attn.contiguous().view(tgt_len, bsz, embed_dim)\n else:\n attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)\n attn = self.out_proj(attn)\n attn_weights: Optional[Tensor] = None\n if need_weights:\n attn_weights = attn_weights_float.view(\n bsz, self.num_heads, tgt_len, src_len\n ).transpose(1, 0)\n if not need_head_weights:\n # average attention weights over heads\n attn_weights = attn_weights.mean(dim=0)\n #print('time taken by default mha:' + str(time.time() - t1))\n return attn, None, attn_weights\n \n else: # 共享注意力机制 memory\n t1 = time.time()\n\n # 这个是共享memory的时候\n if self.memory is None:\n self.memory = self.relational_memory.initial_state(query.size(1), query.size(0)).to(query.device)\n\n self.memory = self.memory.to(query.device)\n\n #print(self.memory.size())\n \n \n key = key.transpose(1, 0)\n\n #print(key.size())\n #memory = self.memory[:key.size(0)]\n #print(self.memory.size())\n\n t2 = time.time()\n\n #print(self.memory)\n\n # self.memory只是一个memory更新的方式,它并不是workspace吧!!! lm-workspace这篇代码是不是搞错了\n # 那这个 self.memory \n # 这里是对memory进行更新\n # 利用relational_memory 来对 workspace中的memory进行更新\n _,_, self.memory, out_hx_mem_new = self.relational_memory(\n inputs=key,\n memory=self.memory#.cuda(),\n )\n #print('time taken by relational:' + str(time.time() - t2))\n\n\n\n #query = query.transpose(1, 0)\n #if self.regressive:\n # B, T, D = query.size()\n # query = query.reshape(B * T, -1).unsqueeze(1)\n #out_hx_mem_new, _, _ = self.mem_att(\n # query,#.reshape((bsz, self.num_blocks_out, self.block_size_out)),\n # self.memory,\n # self.memory,\n # )\n\n #z = torch.zeros(self.memory.size(0) - memory.size(0), memory.size(1), memory.size(2)).to(memory.device)\n #memory = torch.cat((memory, z), dim = 0)\n #self.memory = self.memory + memory\n #print('time taken by shared mha:' + str(time.time() - t1))\n #if self.regressive:\n # out_hx_mem_new = out_hx_mem_new.squeeze(1)\n # out_hx_mem_new = out_hx_mem_new.reshape(B, T, -1)\n\n # 这里的memory实际上没啥用处了,emmm 我觉得\n return out_hx_mem_new.transpose(0, 1), memory, None\n \"\"\"\n\n tgt_len = memory.size(0)\n src_len = key.size(0)\n q_memory = self.q_proj_memory(memory)\n k = self.k_proj(key)\n v = self.v_proj(value)\n\n q_memory = (\n q_memory.contiguous()\n .view(memory.size(0), bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n\n k = (\n k.contiguous()\n .view(-1, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n\n v = (\n v.contiguous()\n .view(-1, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n\n \n\n attn_weights_1 = torch.bmm(q_memory, k.transpose(1, 2))\n\n if key_padding_mask is not None:\n # don't attend to padding symbols\n attn_weights_1 = attn_weights_1.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights_1 = attn_weights_1.masked_fill(\n key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),\n float(\"-inf\")\n )\n\n attn_weights_float_1 = utils.softmax(\n attn_weights_1, dim=-1, onnx_trace=self.onnx_trace\n )\n attn_weights_1 = attn_weights_float_1.type_as(attn_weights_1)\n attn_probs_1 = self.dropout_module(attn_weights_1)\n\n assert v is not None\n memory = torch.bmm(attn_probs_1, v)\n\n memory = memory.permute(1, 0, 2)\n memory = memory.reshape(memory.size(0), bsz, self.num_heads, -1)\n memory = memory.reshape(memory.size(0), bsz, -1)\n\n\n\n q = self.q_proj(query)\n \n k_memory = self.k_proj_memory(memory)\n v_memory = self.v_proj_memory(memory)\n\n q = (\n q.contiguous()\n .view(src_len, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n\n k_memory = (\n k.contiguous()\n .view(-1, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n\n v_memory = (\n v.contiguous()\n .view(-1, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n\n attn_weights_2 = torch.bmm(q, k_memory.transpose(1, 2))\n \n attn_weights_float_2 = utils.softmax(\n attn_weights_2, dim=-1, onnx_trace=self.onnx_trace\n )\n \n attn_weights_2 = attn_weights_float_2.type_as(attn_weights_2)\n attn_probs_2 = self.dropout_module(attn_weights_2)\n\n out = torch.bmm(attn_probs_2, v)\n out = out.transpose(0, 1).contiguous().view(src_len, bsz, embed_dim)\n return out, memory, None\n \"\"\"\n \n # 共享参数的时候,或者是共享memory attn的时候,\n def init_memory(self, bs, ts = None, device = None):\n if not self.regressive:\n self.memory = self.relational_memory.initial_state(bs).to(device)\n else:\n self.memory = self.relational_memory.initial_state(bs, ts).to(device)\n\n\n @staticmethod\n def _append_prev_key_padding_mask(\n key_padding_mask: Optional[Tensor],\n prev_key_padding_mask: Optional[Tensor],\n batch_size: int,\n src_len: int,\n static_kv: bool,\n ) -> Optional[Tensor]:\n # saved key padding masks have shape (bsz, seq_len)\n if prev_key_padding_mask is not None and static_kv:\n new_key_padding_mask = prev_key_padding_mask\n elif prev_key_padding_mask is not None and key_padding_mask is not None:\n new_key_padding_mask = torch.cat(\n [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1\n )\n # During incremental decoding, as the padding token enters and\n # leaves the frame, there will be a time when prev or current\n # is None\n elif prev_key_padding_mask is not None:\n filler = torch.zeros(\n (batch_size, src_len - prev_key_padding_mask.size(1)),\n device=prev_key_padding_mask.device,\n )\n new_key_padding_mask = torch.cat(\n [prev_key_padding_mask.float(), filler.float()], dim=1\n )\n elif key_padding_mask is not None:\n filler = torch.zeros(\n (batch_size, src_len - key_padding_mask.size(1)),\n device=key_padding_mask.device,\n )\n new_key_padding_mask = torch.cat(\n [filler.float(), key_padding_mask.float()], dim=1\n )\n else:\n new_key_padding_mask = prev_key_padding_mask\n return new_key_padding_mask\n\n @torch.jit.export\n def reorder_incremental_state(\n self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor\n ):\n \"\"\"Reorder buffered internal state (for incremental generation).\"\"\"\n input_buffer = self._get_input_buffer(incremental_state)\n if input_buffer is not None:\n for k in input_buffer.keys():\n input_buffer_k = input_buffer[k]\n if input_buffer_k is not None:\n if self.encoder_decoder_attention and input_buffer_k.size(0) == new_order.size(0):\n break\n input_buffer[k] = input_buffer_k.index_select(0, new_order)\n incremental_state = self._set_input_buffer(incremental_state, input_buffer)\n return incremental_state\n\n def _get_input_buffer(\n self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]\n ) -> Dict[str, Optional[Tensor]]:\n result = self.get_incremental_state(incremental_state, \"attn_state\")\n if result is not None:\n return result\n else:\n empty_result: Dict[str, Optional[Tensor]] = {}\n return empty_result\n\n def _set_input_buffer(\n self,\n incremental_state: Dict[str, Dict[str, Optional[Tensor]]],\n buffer: Dict[str, Optional[Tensor]],\n ):\n return self.set_incremental_state(incremental_state, \"attn_state\", buffer)\n\n def apply_sparse_mask(attn_weights, tgt_len: int, src_len: int, bsz: int):\n return attn_weights\n\n def upgrade_state_dict_named(self, state_dict, name):\n prefix = name + \".\" if name != \"\" else \"\"\n items_to_add = {}\n keys_to_remove = []\n for k in state_dict.keys():\n if k.endswith(prefix + \"in_proj_weight\"):\n # in_proj_weight used to be q + k + v with same dimensions\n dim = int(state_dict[k].shape[0] / 3)\n items_to_add[prefix + \"q_proj.weight\"] = state_dict[k][:dim]\n items_to_add[prefix + \"k_proj.weight\"] = state_dict[k][dim : 2 * dim]\n items_to_add[prefix + \"v_proj.weight\"] = state_dict[k][2 * dim :]\n\n keys_to_remove.append(k)\n\n k_bias = prefix + \"in_proj_bias\"\n if k_bias in state_dict.keys():\n dim = int(state_dict[k].shape[0] / 3)\n items_to_add[prefix + \"q_proj.bias\"] = state_dict[k_bias][:dim]\n items_to_add[prefix + \"k_proj.bias\"] = state_dict[k_bias][\n dim : 2 * dim\n ]\n items_to_add[prefix + \"v_proj.bias\"] = state_dict[k_bias][2 * dim :]\n\n keys_to_remove.append(prefix + \"in_proj_bias\")\n\n for k in keys_to_remove:\n del state_dict[k]\n\n for key, value in items_to_add.items():\n state_dict[key] = value" }, { "identifier": "RelationalMemory", "path": "multi_part_assembly/utils/wx_transformer_utilities/relational_memory.py", "snippet": "class RelationalMemory(nn.Module):\n \"\"\"\n Constructs a `RelationalMemory` object.\n This class is same as the RMC from relational_rnn_models.py, but without language modeling-specific variables.\n Args:\n mem_slots: The total number of memory slots to use.\n head_size: The size of an attention head.\n input_size: The size of input per step. i.e. the dimension of each input vector\n num_heads: The number of attention heads to use. Defaults to 1.\n num_blocks: Number of times to compute attention per time step. Defaults\n to 1.\n forget_bias: Bias to use for the forget gate, assuming we are using\n some form of gating. Defaults to 1.\n input_bias: Bias to use for the input gate, assuming we are using\n some form of gating. Defaults to 0.\n gate_style: Whether to use per-element gating ('unit'),\n per-memory slot gating ('memory'), or no gating at all (None).\n Defaults to `unit`.\n attention_mlp_layers: Number of layers to use in the post-attention\n MLP. Defaults to 2.\n key_size: Size of vector to use for key & query vectors in the attention\n computation. Defaults to None, in which case we use `head_size`.\n name: Name of the module.\n\n # NEW flag for this class\n return_all_outputs: Whether the model returns outputs for each step (like seq2seq) or only the final output.\n Raises:\n ValueError: gate_style not one of [None, 'memory', 'unit'].\n ValueError: num_blocks is < 1.\n ValueError: attention_mlp_layers is < 1.\n \"\"\"\n\n def __init__(self, mem_slots, head_size, input_size, output_size, num_heads=1, num_blocks=1, forget_bias=1., input_bias=0.,\n gate_style='unit', attention_mlp_layers=2, key_size=None, return_all_outputs=False, use_topk = False, topk = 3, num_steps = 5,\n null_attention = False):\n super(RelationalMemory, self).__init__()\n\n ########## generic parameters for RMC ##########\n self.mem_slots = mem_slots\n self.head_size = head_size\n self.num_heads = num_heads\n self.mem_size = self.head_size * self.num_heads\n self.use_topk = use_topk\n self.topk = topk\n self.attn_log = None\n\n # a new fixed params needed for pytorch port of RMC\n # +1 is the concatenated input per time step : we do self-attention with the concatenated memory & input\n # so if the mem_slots = 1, this value is 2\n self.mem_slots_plus_input = self.mem_slots + 1\n\n if num_blocks < 1:\n raise ValueError('num_blocks must be >=1. Got: {}.'.format(num_blocks))\n self.num_blocks = num_blocks\n\n if gate_style not in ['unit', 'memory', None]:\n raise ValueError(\n 'gate_style must be one of [\\'unit\\', \\'memory\\', None]. got: '\n '{}.'.format(gate_style))\n self.gate_style = gate_style\n\n if attention_mlp_layers < 1:\n raise ValueError('attention_mlp_layers must be >= 1. Got: {}.'.format(\n attention_mlp_layers))\n self.attention_mlp_layers = attention_mlp_layers\n\n self.key_size = key_size if key_size else self.head_size\n\n ########## parameters for multihead attention ##########\n # value_size is same as head_size\n self.value_size = self.head_size\n # total size for query-key-value\n self.qkv_size = 2 * self.key_size + self.value_size\n self.total_qkv_size = self.qkv_size * self.num_heads # denoted as F\n\n self.query_proj = nn.Linear(self.mem_size, self.key_size * self.num_heads)\n self.key_proj = nn.Linear(self.mem_size, self.key_size * self.num_heads)\n self.value_proj = nn.Linear(self.mem_size, self.value_size * self.num_heads)\n\n\n # each head has qkv_sized linear projector\n # just using one big param is more efficient, rather than this line\n # self.qkv_projector = [nn.Parameter(torch.randn((self.qkv_size, self.qkv_size))) for _ in range(self.num_heads)]\n self.qkv_projector = nn.Linear(self.mem_size, self.total_qkv_size)\n self.qkv_layernorm = nn.LayerNorm(self.total_qkv_size)\n\n # used for attend_over_memory function\n self.attention_mlp = nn.ModuleList([nn.Linear(self.mem_size, self.mem_size)] * self.attention_mlp_layers)\n self.attended_memory_layernorm = nn.LayerNorm( self.mem_size)\n self.attended_memory_layernorm2 = nn.LayerNorm(self.mem_size)\n\n ########## parameters for initial embedded input projection ##########\n self.input_size = input_size\n self.input_projector = nn.Linear(self.input_size, self.mem_size)\n\n self.output_projector = nn.Linear(self.output_size, self.input_size)\n\n ########## parameters for gating ##########\n self.num_gates = 2 * self.calculate_gate_size()\n print('input projector:'+str(self.mem_size))\n self.input_gate_projector = nn.Linear(self.mem_size * num_steps, self.num_gates)\n self.memory_gate_projector = nn.Linear(self.mem_size, self.num_gates)\n # trainable scalar gate bias tensors\n self.forget_bias = nn.Parameter(torch.tensor(forget_bias, dtype=torch.float32))\n self.input_bias = nn.Parameter(torch.tensor(input_bias, dtype=torch.float32))\n\n ########## number of outputs returned #####\n self.return_all_outputs = return_all_outputs\n\n self.null_attention = null_attention\n\n self.competition_mlp = nn.Sequential(nn.Linear(self.mem_slots * self.mem_size + self.mem_size, 256),\n nn.ReLU(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Linear(256, 2))\n\n def repackage_hidden(self, h):\n \"\"\"Wraps hidden states in new Tensors, to detach them from their history.\"\"\"\n # needed for truncated BPTT, called at every batch forward pass\n if isinstance(h, torch.Tensor):\n return h.detach()\n else:\n return tuple(self.repackage_hidden(v) for v in h)\n\n def initial_state(self, batch_size, trainable=False):\n \"\"\"\n Creates the initial memory.\n We should ensure each row of the memory is initialized to be unique,\n so initialize the matrix to be the identity. We then pad or truncate\n as necessary so that init_state is of size\n (batch_size, self.mem_slots, self.mem_size).\n Args:\n batch_size: The size of the batch.\n trainable: Whether the initial state is trainable. This is always True.\n Returns:\n init_state: A truncated or padded matrix of size\n (batch_size, self.mem_slots, self.mem_size).\n \"\"\"\n init_state = torch.stack([torch.eye(self.mem_slots) for _ in range(batch_size)])\n\n # pad the matrix with zeros\n if self.mem_size > self.mem_slots:\n difference = self.mem_size - self.mem_slots\n pad = torch.zeros((batch_size, self.mem_slots, difference))\n init_state = torch.cat([init_state, pad], -1)\n\n # truncation. take the first 'self.mem_size' components\n elif self.mem_size < self.mem_slots:\n init_state = init_state[:, :, :self.mem_size]\n\n return init_state\n\n def multihead_attention(self, input, memory):\n \"\"\"\n Perform multi-head attention from 'Attention is All You Need'.\n Implementation of the attention mechanism from\n https://arxiv.org/abs/1706.03762.\n Args:\n memory: Memory tensor to perform attention on.\n Returns:\n new_memory: New memory tensor.\n \"\"\"\n\n q = self.query_proj(memory)\n k = self.key_proj(input)\n v = self.value_proj(input)\n\n q = q.reshape(q.size(0), q.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n k = k.reshape(k.size(0), k.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n v = v.reshape(v.size(0), v.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n scores = torch.matmul(q, k.transpose(2, 3))\n\n scores = torch.softmax(scores, dim = -1)\n self.attn_log = scores[0]\n if not self.null_attention:\n if self.use_topk:\n topk = torch.topk(scores, dim = -1, k = self.topk)\n mask = torch.zeros(scores.size()).to(scores.device)\n mask.scatter_(3, topk.indices, 1)\n scores = scores * mask\n else:\n memory_flat = memory.reshape(memory.size(0), -1).unsqueeze(1)\n memory_flat = memory_flat.repeat(1, input.shape[1], 1)\n\n N = torch.cat((input, memory_flat), dim = 2)\n N = self.competition_mlp(N)\n\n N = torch.nn.functional.gumbel_softmax(N, dim = 2, hard = True, tau = 0.5)\n\n N = N[:, :, 0]\n\n scores = scores * N.unsqueeze(1).unsqueeze(1)\n\n\n output = torch.matmul(scores, v)\n\n \"\"\"#print(memory.size())\n # First, a simple linear projection is used to construct queries\n qkv = self.qkv_projector(memory)\n # apply layernorm for every dim except the batch dim\n qkv = self.qkv_layernorm(qkv)\n\n # mem_slots needs to be dynamically computed since mem_slots got concatenated with inputs\n # example: self.mem_slots=10 and seq_length is 3, and then mem_slots is 10 + 1 = 11 for each 3 step forward pass\n # this is the same as self.mem_slots_plus_input, but defined to keep the sonnet implementation code style\n mem_slots = memory.shape[1] # denoted as N\n\n # split the qkv to multiple heads H\n # [B, N, F] => [B, N, H, F/H]\n qkv_reshape = qkv.view(qkv.shape[0], mem_slots, self.num_heads, self.qkv_size)\n\n # [B, N, H, F/H] => [B, H, N, F/H]\n qkv_transpose = qkv_reshape.permute(0, 2, 1, 3)\n\n # [B, H, N, key_size], [B, H, N, key_size], [B, H, N, value_size]\n q, k, v = torch.split(qkv_transpose, [self.key_size, self.key_size, self.value_size], -1)\n\n # scale q with d_k, the dimensionality of the key vectors\n q *= (self.key_size ** -0.5)\n\n # make it [B, H, N, N]\n dot_product = torch.matmul(q, k.permute(0, 1, 3, 2))\n weights = F.softmax(dot_product, dim=-1)\n\n if self.use_topk:\n topk = torch.topk(weights, dim = -1, k = self.topk)\n mask = torch.zeros(weights.size()).to(weights.device)\n mask.scatter_(3, topk.indices, 1)\n weights = weights * mask\n\n # output is [B, H, N, V]\n output = torch.matmul(weights, v)\"\"\"\n\n # [B, H, N, V] => [B, N, H, V] => [B, N, H*V]\n output_transpose = output.permute(0, 2, 1, 3).contiguous()\n new_memory = output_transpose.view((output_transpose.shape[0], output_transpose.shape[1], -1))\n\n return new_memory\n\n\n @property\n def state_size(self):\n return [self.mem_slots, self.mem_size]\n\n @property\n def output_size(self):\n return self.mem_slots * self.mem_size\n\n def calculate_gate_size(self):\n \"\"\"\n Calculate the gate size from the gate_style.\n Returns:\n The per sample, per head parameter size of each gate.\n \"\"\"\n if self.gate_style == 'unit':\n return self.mem_size\n elif self.gate_style == 'memory':\n return 1\n else: # self.gate_style == None\n return 0\n\n def print_log(self):\n print(self.attn_log)\n\n def create_gates(self, inputs, memory):\n \"\"\"\n Create input and forget gates for this step using `inputs` and `memory`.\n Args:\n inputs: Tensor input.\n memory: The current state of memory.\n Returns:\n input_gate: A LSTM-like insert gate.\n forget_gate: A LSTM-like forget gate.\n \"\"\"\n # We'll create the input and forget gates at once. Hence, calculate double\n # the gate size.\n\n # equation 8: since there is no output gate, h is just a tanh'ed m\n memory = torch.tanh(memory)\n\n # TODO: check this input flattening is correct\n # sonnet uses this, but i think it assumes time step of 1 for all cases\n # if inputs is (B, T, features) where T > 1, this gets incorrect\n # inputs = inputs.view(inputs.shape[0], -1)\n\n # fixed implementation\n if len(inputs.shape) == 3:\n #if inputs.shape[1] > 1:\n # raise ValueError(\n # \"input seq length is larger than 1. create_gate function is meant to be called for each step, with input seq length of 1\")\n inputs = inputs.view(inputs.shape[0], -1)\n # matmul for equation 4 and 5\n # there is no output gate, so equation 6 is not implemented\n gate_inputs = self.input_gate_projector(inputs)\n gate_inputs = gate_inputs.unsqueeze(dim=1)\n gate_memory = self.memory_gate_projector(memory)\n else:\n raise ValueError(\"input shape of create_gate function is 2, expects 3\")\n\n # this completes the equation 4 and 5\n #print(gate_inputs.size())\n #print(gate_memory.size())\n gates = gate_memory + gate_inputs\n gates = torch.split(gates, split_size_or_sections=int(gates.shape[2] / 2), dim=2)\n input_gate, forget_gate = gates\n assert input_gate.shape[2] == forget_gate.shape[2]\n\n # to be used for equation 7\n input_gate = torch.sigmoid(input_gate + self.input_bias)\n forget_gate = torch.sigmoid(forget_gate + self.forget_bias)\n\n return input_gate, forget_gate\n\n def attend_over_memory(self, inputs, memory):\n \"\"\"\n Perform multiheaded attention over `memory`.\n Args:\n memory: Current relational memory.\n Returns:\n The attended-over memory.\n \"\"\"\n for _ in range(self.num_blocks):\n attended_memory = self.multihead_attention(inputs, memory)\n\n # Add a skip connection to the multiheaded attention's input.\n memory = self.attended_memory_layernorm(memory + attended_memory)\n\n # add a skip connection to the attention_mlp's input.\n attention_mlp = memory\n for i, l in enumerate(self.attention_mlp):\n attention_mlp = self.attention_mlp[i](attention_mlp)\n attention_mlp = F.relu(attention_mlp)\n memory = self.multihead_attention(memory, memory, use_topk_ = False, store_log = False)\n memory = self.attended_memory_layernorm2(memory + attention_mlp)\n\n return memory\n\n def forward_step(self, inputs, memory, treat_input_as_matrix=False):\n \"\"\"\n Forward step of the relational memory core.\n Args:\n inputs: Tensor input.\n memory: Memory output from the previous time step.\n treat_input_as_matrix: Optional, whether to treat `input` as a sequence\n of matrices. Default to False, in which case the input is flattened\n into a vector.\n Returns:\n output: This time step's output.\n next_memory: The next version of memory to use.\n \"\"\"\n\n if treat_input_as_matrix:\n # keep (Batch, Seq, ...) dim (0, 1), flatten starting from dim 2\n inputs = inputs.view(inputs.shape[0], inputs.shape[1], -1)\n # apply linear layer for dim 2\n inputs_reshape = self.input_projector(inputs)\n else:\n # keep (Batch, ...) dim (0), flatten starting from dim 1\n inputs = inputs.view(inputs.shape[0], -1)\n # apply linear layer for dim 1\n inputs = self.input_projector(inputs)\n # unsqueeze the time step to dim 1\n inputs_reshape = inputs.unsqueeze(dim=1)\n\n #memory_plus_input = torch.cat([memory, inputs_reshape], dim=1)\n #print(memory_plus_input.size())\n next_memory = self.attend_over_memory(inputs_reshape, memory)\n\n # cut out the concatenated input vectors from the original memory slots\n #n = inputs_reshape.shape[1]\n #next_memory = next_memory[:, :-n, :]\n\n if self.gate_style == 'unit' or self.gate_style == 'memory':\n # these gates are sigmoid-applied ones for equation 7\n input_gate, forget_gate = self.create_gates(inputs_reshape, memory)\n # equation 7 calculation\n next_memory = input_gate * torch.tanh(next_memory)\n next_memory += forget_gate * memory\n\n\n output = next_memory.view(next_memory.shape[0], -1)\n return output, next_memory\n\n def forward(self, inputs, memory, parallel = True):\n # Starting each batch, we detach the hidden state from how it was previously produced.\n # If we didn't, the model would try backpropagating all the way to start of the dataset.\n # memory = self.repackage_hidden(memory)\n\n # for loop implementation of (entire) recurrent forward pass of the model\n # inputs is batch first [batch, seq], and output logit per step is [batch, vocab]\n # so the concatenated logits are [seq * batch, vocab]\n\n # targets are flattened [seq, batch] => [seq * batch], so the dimension is correct\n\n logits = []\n #print(inputs.size())\n #print(memory.size())\n #memory = self.repackage_hidden(memory)\n # shape[1] is seq_lenth T\n if not parallel:\n for idx_step in range(inputs.shape[1]):\n logit, memory = self.forward_step(inputs[:, idx_step], memory)\n logits.append(logit)\n logits = torch.cat(logits)\n else:\n logits, memory = self.forward_step(inputs, memory, treat_input_as_matrix = True)\n \n memory_out = self.output_projector(memory.view(memory.shape[0], -1))\n\n #print(inputs.size())\n #print(memory_out.size())\n #print('------')\n if self.return_all_outputs:\n return logits, memory_out , memory\n else:\n return logits, memory_out, memory" }, { "identifier": "GroupLinearLayer", "path": "multi_part_assembly/utils/wx_transformer_utilities/group_linear_layer.py", "snippet": "class GroupLinearLayer(nn.Module):\n\n def __init__(self, din, dout, num_blocks, bias=True, a = None):\n super(GroupLinearLayer, self).__init__()\n self.nb = num_blocks\n self.dout = dout\n\n if a is None:\n a = 1. / math.sqrt(dout * num_blocks)\n\n #gain = 1.0 / math.sqrt(2)\n #a = gain * math.sqrt(6.0 / (din + dout))\n\n self.weight = nn.Parameter(torch.FloatTensor(num_blocks,din,dout).uniform_(-a,a))\n\n self.bias = bias\n\n if bias is True:\n self.bias = nn.Parameter(torch.FloatTensor(num_blocks,dout).uniform_(-a,a))\n #self.bias = nn.Parameter(torch.zeros(dout*num_blocks))\n else:\n self.bias = None\n\n def forward(self,x):\n\n\t#input: ts x bs x blocks*nhid\n\t#ts*bs , blocks, nhid\n\t#blocks, ts*bs, nhid\n ts,bs,m = x.shape\t\n\n x = x.reshape((ts*bs, self.nb, m//self.nb))\n x = x.permute(1,0,2)\n x = torch.bmm(x,self.weight)\n x = x.permute(1,0,2)\n \n if not self.bias is None:\n x = x + self.bias\n\n x = x.reshape((ts, bs, self.dout*self.nb))\n \n #if not self.bias is None:\n # x += self.bias\n\n return x" }, { "identifier": "MemoryAttention", "path": "multi_part_assembly/utils/wx_transformer_utilities/basic_mha.py", "snippet": "class MemoryAttention(nn.Module):\n def __init__(self, n_blocks_query, n_blocks_val, dim_query, dim_val, n_heads=8):\n super(MemoryAttention, self).__init__()\n\n self.n_heads = n_heads\n self.n_blocks_val = n_blocks_val\n self.dim_val = dim_val\n self.block_dim_val = dim_val // self.n_blocks_val\n\n self.n_blocks_query = n_blocks_query\n self.dim_query = dim_query\n self.block_dim_query = dim_query // self.n_blocks_query\n\n self.head_dim = 64\n self.scale = self.head_dim ** -0.5\n\n #self.n_blocks_val * self.block_dim_val\n\n self.query_net = GroupLinearLayer(self.block_dim_query, self.head_dim * self.n_heads, n_blocks_query)\n self.key_net = GroupLinearLayer(self.block_dim_val, self.head_dim * self.n_heads, n_blocks_val)\n self.value_net = GroupLinearLayer(self.block_dim_val, self.head_dim * self.n_heads, n_blocks_val)\n self.final = GroupLinearLayer(self.head_dim * self.n_heads, self.block_dim_query, n_blocks_query)\n\n def forward(self, q, kv):\n\n #comes in as: bs, pos*emb.\n #positions_attend x T*bs x emb\n\n\n #q = q.permute(1,0,2)\n #kv = kv.permute(1,0,2)\n\n #print('kv shape after permute', kv.shape)\n\n seq_len_q,bsz,_ = q.shape\n seq_len_v,bsz,_ = kv.shape\n\n q = q.reshape((seq_len_q, bsz, self.n_blocks_query * self.block_dim_query))\n\n kv = kv.reshape((seq_len_v, bsz, self.n_blocks_val * self.block_dim_val))\n\n q = self.query_net(q).view(seq_len_q, bsz, self.n_blocks_query, self.n_heads, self.head_dim)\n k = self.key_net(kv).view(seq_len_v, bsz, self.n_blocks_val, self.n_heads, self.head_dim)\n v = self.value_net(kv).view(seq_len_v, bsz, self.n_blocks_val, self.n_heads, self.head_dim)\n\n q = q.transpose(2,3) * self.scale\n k = k.transpose(2,3)\n v = v.transpose(2,3)\n score = torch.matmul(q, k.transpose(3,4))\n #print('score shape', score.shape)\n score = F.softmax(score, dim=-1)\n out = torch.matmul(score, v).transpose(2,3)\n #print('out shape', out.shape)\n score = score.mean(dim=2)\n\n out = out.reshape(seq_len_q, bsz, self.n_blocks_query * self.head_dim * self.n_heads)\n out = self.final(out)\n out = out.view(seq_len_q, bsz, self.dim_query)\n\n\n return out, score" }, { "identifier": "quant_noise", "path": "multi_part_assembly/utils/wx_transformer_utilities/quant_noise.py", "snippet": "def quant_noise(module, p, block_size):\n \"\"\"\n Wraps modules and applies quantization noise to the weights for\n subsequent quantization with Iterative Product Quantization as\n described in \"Training with Quantization Noise for Extreme Model Compression\"\n\n Args:\n - module: nn.Module\n - p: amount of Quantization Noise\n - block_size: size of the blocks for subsequent quantization with iPQ\n\n Remarks:\n - Module weights must have the right sizes wrt the block size\n - Only Linear, Embedding and Conv2d modules are supported for the moment\n - For more detail on how to quantize by blocks with convolutional weights,\n see \"And the Bit Goes Down: Revisiting the Quantization of Neural Networks\"\n - We implement the simplest form of noise here as stated in the paper\n which consists in randomly dropping blocks\n \"\"\"\n\n # if no quantization noise, don't register hook\n if p <= 0:\n return module\n\n # supported modules\n assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d))\n\n # test whether module.weight has the right sizes wrt block_size\n is_conv = module.weight.ndim == 4\n\n # 2D matrix\n if not is_conv:\n assert module.weight.size(1) % block_size == 0, \"Input features must be a multiple of block sizes\"\n\n # 4D matrix\n else:\n # 1x1 convolutions\n if module.kernel_size == (1, 1):\n assert module.in_channels % block_size == 0, \"Input channels must be a multiple of block sizes\"\n # regular convolutions\n else:\n k = module.kernel_size[0] * module.kernel_size[1]\n assert k % block_size == 0, \"Kernel size must be a multiple of block size\"\n\n def _forward_pre_hook(mod, input):\n # no noise for evaluation\n if mod.training:\n if not is_conv:\n # gather weight and sizes\n weight = mod.weight\n in_features = weight.size(1)\n out_features = weight.size(0)\n\n # split weight matrix into blocks and randomly drop selected blocks\n mask = torch.zeros(in_features // block_size * out_features, device=weight.device)\n mask.bernoulli_(p)\n mask = mask.repeat_interleave(block_size, -1).view(-1, in_features)\n\n else:\n # gather weight and sizes\n weight = mod.weight\n in_channels = mod.in_channels\n out_channels = mod.out_channels\n\n # split weight matrix into blocks and randomly drop selected blocks\n if mod.kernel_size == (1, 1):\n mask = torch.zeros(int(in_channels // block_size * out_channels), device=weight.device)\n mask.bernoulli_(p)\n mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels)\n else:\n mask = torch.zeros(weight.size(0), weight.size(1), device=weight.device)\n mask.bernoulli_(p)\n mask = mask.unsqueeze(2).unsqueeze(3).repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1])\n\n # scale weights and apply mask\n mask = mask.to(torch.bool) # x.bool() is not currently supported in TorchScript\n s = 1 / (1 - p)\n mod.weight.data = s * weight.masked_fill(mask, 0)\n\n module.register_forward_pre_hook(_forward_pre_hook)\n return module" }, { "identifier": "FairseqDropout", "path": "multi_part_assembly/utils/wx_transformer_utilities/fairseq_dropout.py", "snippet": "class FairseqDropout(nn.Module):\n\n def __init__(self, p, module_name=None):\n super().__init__()\n self.p = p\n self.module_name = module_name\n self.apply_during_inference = False\n\n def forward(self, x, inplace: bool = False):\n if self.training or self.apply_during_inference:\n return F.dropout(x, p=self.p, training=True, inplace=inplace)\n else:\n return x\n\n def make_generation_fast_(\n self,\n name: str,\n retain_dropout: bool = False,\n retain_dropout_modules: Optional[List[str]] = None,\n **kwargs\n ):\n if retain_dropout:\n if retain_dropout_modules is not None and self.module_name is None:\n logger.warning(\n 'Cannot enable dropout during inference for module {} '\n 'because module_name was not set'.format(name)\n )\n elif (\n retain_dropout_modules is None # if None, apply to all modules\n or self.module_name in retain_dropout_modules\n ):\n logger.info(\n 'Enabling dropout during inference for module: {}'.format(name)\n )\n self.apply_during_inference = True\n else:\n logger.info('Disabling dropout for module: {}'.format(name))" } ]
from typing import Dict, List, Optional from .layer_norm import LayerNorm from .multihead_attention import MultiheadAttention from .relational_memory import RelationalMemory from .group_linear_layer import GroupLinearLayer from .basic_mha import MemoryAttention from .quant_noise import quant_noise from .fairseq_dropout import FairseqDropout from torch import Tensor import torch import torch.nn as nn import multi_part_assembly.utils.wx_transformer_utilities.fairseq_utils as utils import random import torch.nn.functional as F
15,532
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #from fairseq.modules.shared_group_linear_layer import SharedGroupLinearLayer class TransformerEncoderLayerVanilla(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, out_proj = None): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = self.build_self_attention(self.embed_dim, args) self.self_attn_layer_norm = LayerNorm(self.embed_dim, eps=1e-5) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, "activation_fn", "relu") ) self.activation_dropout = getattr(args, "activation_dropout", 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, "relu_dropout", 0) self.normalize_before = args.encoder_normalize_before self.fc1 = self.build_fc1(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = self.build_fc2(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, eps=1e-5) if out_proj is not None: self.final_linear = nn.Linear(args.encoder_embed_dim, out_proj) else: self.final_linear = None def build_fc1(self, input_dim, output_dim): return nn.Linear(input_dim, output_dim) def build_fc2(self, input_dim, output_dim): return nn.Linear(input_dim, output_dim) def build_self_attention(self, embed_dim, args):
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #from fairseq.modules.shared_group_linear_layer import SharedGroupLinearLayer class TransformerEncoderLayerVanilla(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, out_proj = None): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = self.build_self_attention(self.embed_dim, args) self.self_attn_layer_norm = LayerNorm(self.embed_dim, eps=1e-5) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, "activation_fn", "relu") ) self.activation_dropout = getattr(args, "activation_dropout", 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, "relu_dropout", 0) self.normalize_before = args.encoder_normalize_before self.fc1 = self.build_fc1(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = self.build_fc2(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, eps=1e-5) if out_proj is not None: self.final_linear = nn.Linear(args.encoder_embed_dim, out_proj) else: self.final_linear = None def build_fc1(self, input_dim, output_dim): return nn.Linear(input_dim, output_dim) def build_fc2(self, input_dim, output_dim): return nn.Linear(input_dim, output_dim) def build_self_attention(self, embed_dim, args):
return MultiheadAttention(
1
2023-12-15 13:13:01+00:00
24k
m-abr/FCPCodebase
world/World.py
[ { "identifier": "Logger", "path": "logs/Logger.py", "snippet": "class Logger():\n _folder = None\n\n def __init__(self, is_enabled:bool, topic:str) -> None:\n self.no_of_entries = 0 \n self.enabled = is_enabled\n self.topic = topic\n\n def write(self, msg:str, timestamp:bool=True, step:int=None) -> None:\n '''\n Write `msg` to file named `self.topic`\n\n Parameters\n ----------\n msg : str\n message to be written\n step : int\n simulation step is written before the message to provide additional information\n default is `None` (nothing is written before the message)\n '''\n if not self.enabled: return\n\n # The log folder is only created if needed\n if Logger._folder is None: \n rnd = ''.join(random.choices(ascii_uppercase, k=6)) # Useful if multiple processes are running in parallel \n Logger._folder = \"./logs/\" + datetime.now().strftime(\"%Y-%m-%d_%H.%M.%S__\") + rnd + \"/\"\n print(\"\\nLogger Info: see\",Logger._folder)\n Path(Logger._folder).mkdir(parents=True, exist_ok=True)\n\n self.no_of_entries += 1\n\n with open(Logger._folder + self.topic + \".log\", 'a+') as f:\n prefix = \"\"\n write_step = step is not None\n if timestamp or write_step:\n prefix = \"{\"\n if timestamp: \n prefix += datetime.now().strftime(\"%a %H:%M:%S\")\n if write_step: prefix += \" \"\n if write_step:\n prefix += f'Step:{step}'\n prefix += \"} \"\n f.write(prefix + msg + \"\\n\")" }, { "identifier": "Matrix_4x4", "path": "math_ops/Matrix_4x4.py", "snippet": "class Matrix_4x4():\n\n def __init__(self, matrix = None) -> None:\n '''\n Constructor examples:\n a = Matrix_4x4( ) # create identity matrix\n b = Matrix_4x4( [[1,1,1,1],[2,2,2,2],[3,3,3,3],[4,4,4,4]] ) # manually initialize matrix\n c = Matrix_4x4( [1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4] ) # manually initialize matrix\n d = Matrix_4x4( b ) # copy constructor\n '''\n if matrix is None:\n self.m = np.identity(4)\n elif type(matrix) == Matrix_4x4: \n self.m = np.copy(matrix.m)\n elif type(matrix) == Matrix_3x3: \n self.m = np.identity(4)\n self.m[0:3,0:3] = matrix.m\n else:\n self.m = np.asarray(matrix)\n self.m.shape = (4,4) #reshape if needed, throw error if impossible\n\n\n @classmethod\n def from_translation(cls, translation_vec):\n '''\n Create transformation matrix from translation_vec translation\n e.g. Matrix_4x4.from_translation((a,b,c))\n output: [[1,0,0,a],[0,1,0,b],[0,0,1,c],[0,0,0,1]]\n '''\n mat = np.identity(4)\n mat[0:3,3] = translation_vec\n return cls(mat)\n\n @classmethod\n def from_3x3_and_translation(cls, mat3x3:Matrix_3x3, translation_vec):\n '''\n Create transformation matrix from rotation matrix (3x3) and translation\n e.g. Matrix_4x4.from_3x3_and_translation(r,(a,b,c)) \n output: [[r00,r01,r02,a],[r10,r11,r12,b],[r20,r21,r22,c],[0,0,0,1]]\n '''\n mat = np.identity(4)\n mat[0:3,0:3] = mat3x3.m\n mat[0:3,3] = translation_vec\n return cls(mat)\n\n def translate(self, translation_vec, in_place=False):\n '''\n Translates the current transformation matrix\n\n Parameters\n ----------\n translation_vec : array_like, length 3\n translation vector\n in_place: bool, optional\n * True: the internal matrix is changed in-place\n * False: a new matrix is returned and the current one is not changed \n\n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n vec = np.array([*translation_vec,1])# conversion to 4D vector\n np.matmul(self.m, vec, out=vec) # compute only 4th column\n\n if in_place:\n self.m[:,3] = vec\n return self\n else:\n ret = Matrix_4x4(self.m)\n ret.m[:,3] = vec\n return ret\n\n\n def get_translation(self):\n ''' Get translation vector (x,y,z) '''\n return self.m[0:3,3] # return view\n\n def get_x(self):\n return self.m[0,3]\n\n def get_y(self):\n return self.m[1,3]\n\n def get_z(self):\n return self.m[2,3]\n\n def get_rotation_4x4(self):\n ''' Get Matrix_4x4 without translation ''' \n mat = Matrix_4x4(self)\n mat.m[0:3,3] = 0\n return mat\n\n def get_rotation(self):\n ''' Get rotation Matrix_3x3 '''\n return Matrix_3x3(self.m[0:3,0:3])\n\n def get_distance(self):\n ''' Get translation vector length '''\n return np.linalg.norm(self.m[0:3,3])\n\n def get_roll_deg(self):\n ''' Get angle around the x-axis in degrees, Rotation order: RotZ*RotY*RotX=Rot '''\n if self.m[2,1] == 0 and self.m[2,2] == 0: \n return 180\n return atan2(self.m[2,1], self.m[2,2]) * 180 / pi\n\n def get_pitch_deg(self):\n ''' Get angle around the y-axis in degrees, Rotation order: RotZ*RotY*RotX=Rot '''\n return atan2(-self.m[2,0], sqrt(self.m[2,1]*self.m[2,1] + self.m[2,2]*self.m[2,2])) * 180 / pi\n\n def get_yaw_deg(self):\n ''' Get angle around the z-axis in degrees, Rotation order: RotZ*RotY*RotX=Rot '''\n if self.m[1,0] == 0 and self.m[0,0] == 0: \n return atan2(self.m[0,1], self.m[1,1]) * 180 / pi\n return atan2(self.m[1,0], self.m[0,0]) * 180 / pi\n \n def get_inclination_deg(self):\n ''' Get inclination of z-axis in relation to reference z-axis '''\n return 90 - (asin(np.clip(self.m[2,2],-1,1)) * 180 / pi)\n\n def rotate_deg(self, rotation_vec, rotation_deg, in_place=False):\n '''\n Rotates the current transformation matrix\n\n Parameters\n ----------\n rotation_vec : array_like, length 3\n rotation vector\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n return self.rotate_rad(rotation_vec, rotation_deg * (pi/180) , in_place)\n\n \n def rotate_rad(self, rotation_vec, rotation_rad, in_place=False):\n '''\n Rotates the current transformation matrix\n\n Parameters\n ----------\n rotation_vec : array_like, length 3\n rotation vector\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n\n if rotation_rad == 0: \n return self if in_place else Matrix_4x4(self)\n\n # shortcuts for rotation around 1 axis\n if rotation_vec[0]==0:\n if rotation_vec[1]==0:\n if rotation_vec[2]==1:\n return self.rotate_z_rad(rotation_rad, in_place)\n elif rotation_vec[2]==-1:\n return self.rotate_z_rad(-rotation_rad, in_place)\n elif rotation_vec[2]==0:\n if rotation_vec[1]==1:\n return self.rotate_y_rad(rotation_rad, in_place)\n elif rotation_vec[1]==-1:\n return self.rotate_y_rad(-rotation_rad, in_place)\n elif rotation_vec[1]==0 and rotation_vec[2]==0:\n if rotation_vec[0]==1:\n return self.rotate_x_rad(rotation_rad, in_place)\n elif rotation_vec[0]==-1:\n return self.rotate_x_rad(-rotation_rad, in_place)\n \n c = np.math.cos(rotation_rad)\n c1 = 1 - c\n s = np.math.sin(rotation_rad)\n x = rotation_vec[0]\n y = rotation_vec[1]\n z = rotation_vec[2]\n xxc1 = x * x * c1\n yyc1 = y * y * c1\n zzc1 = z * z * c1\n xyc1 = x * y * c1\n xzc1 = x * z * c1\n yzc1 = y * z * c1\n xs = x * s\n ys = y * s\n zs = z * s\n\n mat = np.array([\n [xxc1 + c, xyc1 - zs, xzc1 + ys, 0],\n [xyc1 + zs, yyc1 + c, yzc1 - xs, 0],\n [xzc1 - ys, yzc1 + xs, zzc1 + c, 0],\n [0, 0, 0, 1]])\n\n return self.multiply(mat, in_place)\n\n\n def rotate_x_rad(self, rotation_rad, in_place=False):\n '''\n Rotates the current transformation matrix around the x-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n if rotation_rad == 0: \n return self if in_place else Matrix_4x4(self)\n \n c = np.math.cos(rotation_rad)\n s = np.math.sin(rotation_rad)\n\n mat = np.array([\n [1, 0, 0, 0],\n [0, c,-s, 0],\n [0, s, c, 0],\n [0, 0, 0, 1]])\n\n return self.multiply(mat, in_place)\n\n def rotate_y_rad(self, rotation_rad, in_place=False):\n '''\n Rotates the current transformation matrix around the y-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n if rotation_rad == 0: \n return self if in_place else Matrix_4x4(self)\n \n c = np.math.cos(rotation_rad)\n s = np.math.sin(rotation_rad)\n\n mat = np.array([\n [ c, 0, s, 0],\n [ 0, 1, 0, 0],\n [-s, 0, c, 0],\n [ 0, 0, 0, 1]])\n\n return self.multiply(mat, in_place)\n\n def rotate_z_rad(self, rotation_rad, in_place=False):\n '''\n Rotates the current transformation matrix around the z-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n if rotation_rad == 0: \n return self if in_place else Matrix_4x4(self)\n \n c = np.math.cos(rotation_rad)\n s = np.math.sin(rotation_rad)\n\n mat = np.array([\n [ c,-s, 0, 0],\n [ s, c, 0, 0],\n [ 0, 0, 1, 0],\n [ 0, 0, 0, 1]])\n\n return self.multiply(mat, in_place)\n\n def rotate_x_deg(self, rotation_deg, in_place=False):\n '''\n Rotates the current transformation matrix around the x-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n return self.rotate_x_rad(rotation_deg * (pi/180), in_place)\n\n def rotate_y_deg(self, rotation_deg, in_place=False):\n '''\n Rotates the current transformation matrix around the y-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n return self.rotate_y_rad(rotation_deg * (pi/180), in_place)\n\n def rotate_z_deg(self, rotation_deg, in_place=False):\n '''\n Rotates the current transformation matrix around the z-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n return self.rotate_z_rad(rotation_deg * (pi/180), in_place)\n\n def invert(self, in_place=False):\n '''\n Inverts the current transformation matrix\n\n Parameters\n ----------\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n\n if in_place:\n self.m = np.linalg.inv(self.m)\n return self\n else:\n return Matrix_4x4(np.linalg.inv(self.m))\n\n def multiply(self,mat, in_place=False):\n '''\n Multiplies the current transformation matrix by mat\n\n Parameters\n ----------\n mat : Matrix_4x4 or array_like\n multiplier matrix or 3D vector\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed (if mat is a 4x4 matrix)\n \n Returns\n -------\n result : Matrix_4x4 | array_like\n Matrix_4x4 is returned if mat is a matrix (self is returned if in_place is True); \n a 3D vector is returned if mat is a vector\n '''\n if type(mat) == Matrix_4x4: \n mat = mat.m\n else:\n mat = np.asarray(mat) # conversion to array, if needed\n if mat.ndim == 1: # multiplication by 3D vector\n vec = np.append(mat,1) # conversion to 4D vector\n return np.matmul(self.m, vec)[0:3] # conversion to 3D vector\n\n if in_place:\n np.matmul(self.m, mat, self.m)\n return self\n else:\n return Matrix_4x4(np.matmul(self.m, mat))\n\n def __call__(self,mat, is_spherical=False):\n '''\n Multiplies the current transformation matrix by mat and returns a new matrix or vector\n\n Parameters\n ----------\n mat : Matrix_4x4 or array_like\n multiplier matrix or 3D vector\n is_spherical : bool\n only relevant if mat is a 3D vector, True if it uses spherical coordinates\n \n Returns\n -------\n result : Matrix_4x4 | array_like\n Matrix_4x4 is returned if mat is a matrix; \n a 3D vector is returned if mat is a vector\n '''\n\n if is_spherical and mat.ndim == 1: mat = M.deg_sph2cart(mat)\n return self.multiply(mat,False)" }, { "identifier": "Draw", "path": "world/commons/Draw.py", "snippet": "class Draw():\n _socket = None\n\n def __init__(self, is_enabled:bool, unum:int, host:str, port:int) -> None:\n self.enabled = is_enabled \n self._is_team_right = None\n self._unum = unum \n self._prefix = f'?{unum}_'.encode() # temporary prefix that should never be used in normal circumstances\n \n #Create one socket for all instances\n if Draw._socket is None:\n Draw._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM )\n Draw._socket.connect((host, port))\n Draw.clear_all()\n\n\n def set_team_side(self, is_right):\n ''' Called by world parser to switch side '''\n '''\n Generate an appropriate player ID\n RoboViz has a bug/feature: we send \"swap buffers for player: 'l_1' and RoboViz\n will swap every buffer that contains 'l_1' in the name, including \n 'l_10' and 'l_11'. To avoid that, we swap the separator to 'l-10', 'l-11'\n '''\n self._is_team_right = is_right\n self._prefix = f\"{'r' if is_right else 'l'}{'_' if self._unum < 10 else '-'}{self._unum}_\".encode() #e.g. b'l_5', b'l-10'\n\n\n @staticmethod\n def _send(msg, id, flush):\n ''' Private method to send message if RoboViz is accessible '''\n try:\n if flush:\n Draw._socket.send(msg + id + b'\\x00\\x00\\x00' + id + b'\\x00')\n else:\n Draw._socket.send(msg + id + b'\\x00')\n except ConnectionRefusedError:\n pass\n\n \n def circle(self, pos2d, radius, thickness, color:bytes, id:str, flush=True):\n ''' \n Draw circle\n\n Examples\n ----------\n Circle in 2D (z=0): circle((-1,2), 3, 2, Draw.Color.red, \"my_circle\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert not np.isnan(pos2d).any(), \"Argument 'pos2d' contains 'nan' values\"\n\n if self._is_team_right:\n pos2d = (-pos2d[0],-pos2d[1]) \n\n msg = b'\\x01\\x00' + (\n f'{f\"{pos2d[0] :.4f}\":.6s}'\n f'{f\"{pos2d[1] :.4f}\":.6s}'\n f'{f\"{radius :.4f}\":.6s}'\n f'{f\"{thickness :.4f}\":.6s}').encode() + color\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n\n def line(self, p1, p2, thickness, color:bytes, id:str, flush=True):\n ''' \n Draw line\n\n Examples\n ----------\n Line in 3D: line((0,0,0), (0,0,2), 3, Draw.Color.red, \"my_line\") \n Line in 2D (z=0): line((0,0), (0,1), 3, Draw.Color.red, \"my_line\") \n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert not np.isnan(p1).any(), \"Argument 'p1' contains 'nan' values\"\n assert not np.isnan(p2).any(), \"Argument 'p2' contains 'nan' values\"\n\n z1 = p1[2] if len(p1)==3 else 0\n z2 = p2[2] if len(p2)==3 else 0\n\n if self._is_team_right: \n p1 = (-p1[0],-p1[1],p1[2]) if len(p1)==3 else (-p1[0],-p1[1])\n p2 = (-p2[0],-p2[1],p2[2]) if len(p2)==3 else (-p2[0],-p2[1])\n\n msg = b'\\x01\\x01' + (\n f'{f\"{p1[0] :.4f}\":.6s}'\n f'{f\"{p1[1] :.4f}\":.6s}'\n f'{f\"{z1 :.4f}\":.6s}'\n f'{f\"{p2[0] :.4f}\":.6s}'\n f'{f\"{p2[1] :.4f}\":.6s}'\n f'{f\"{z2 :.4f}\":.6s}'\n f'{f\"{thickness :.4f}\":.6s}').encode() + color\n\n Draw._send(msg, self._prefix + id.encode(), flush)\n \n\n def point(self, pos, size, color:bytes, id:str, flush=True):\n ''' \n Draw point\n\n Examples\n ----------\n Point in 3D: point((1,1,1), 3, Draw.Color.red, \"my_point\")\n Point in 2D (z=0): point((1,1), 3, Draw.Color.red, \"my_point\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert not np.isnan(pos).any(), \"Argument 'pos' contains 'nan' values\"\n\n z = pos[2] if len(pos)==3 else 0\n\n if self._is_team_right: \n pos = (-pos[0],-pos[1],pos[2]) if len(pos)==3 else (-pos[0],-pos[1])\n\n msg = b'\\x01\\x02' + (\n f'{f\"{pos[0] :.4f}\":.6s}'\n f'{f\"{pos[1] :.4f}\":.6s}'\n f'{f\"{z :.4f}\":.6s}'\n f'{f\"{size :.4f}\":.6s}').encode() + color\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n\n def sphere(self, pos, radius, color:bytes, id:str, flush=True):\n ''' \n Draw sphere\n\n Examples\n ----------\n Sphere in 3D: sphere((1,1,1), 3, Draw.Color.red, \"my_sphere\")\n Sphere in 2D (z=0): sphere((1,1), 3, Draw.Color.red, \"my_sphere\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert not np.isnan(pos).any(), \"Argument 'pos' contains 'nan' values\"\n\n z = pos[2] if len(pos)==3 else 0\n\n if self._is_team_right: \n pos = (-pos[0],-pos[1],pos[2]) if len(pos)==3 else (-pos[0],-pos[1])\n\n msg = b'\\x01\\x03' + (\n f'{f\"{pos[0] :.4f}\":.6s}'\n f'{f\"{pos[1] :.4f}\":.6s}'\n f'{f\"{z :.4f}\":.6s}'\n f'{f\"{radius :.4f}\":.6s}').encode() + color\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n\n def polygon(self, vertices, color:bytes, alpha:int, id:str, flush=True):\n ''' \n Draw polygon\n\n Examples\n ----------\n Polygon in 3D: polygon(((0,0,0),(1,0,0),(0,1,0)), Draw.Color.red, 255, \"my_polygon\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n assert 0<=alpha<=255, \"The alpha channel (degree of opacity) must be in range [0,255]\"\n\n if self._is_team_right: \n vertices = [(-v[0],-v[1],v[2]) for v in vertices]\n\n msg = b'\\x01\\x04' + bytes([len(vertices)]) + color + alpha.to_bytes(1,'big')\n\n for v in vertices:\n msg += (\n f'{f\"{v[0] :.4f}\":.6s}'\n f'{f\"{v[1] :.4f}\":.6s}'\n f'{f\"{v[2] :.4f}\":.6s}').encode()\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n\n def annotation(self, pos, text, color:bytes, id:str, flush=True):\n ''' \n Draw annotation\n\n Examples\n ----------\n Annotation in 3D: annotation((1,1,1), \"SOMEtext!\", Draw.Color.red, \"my_annotation\")\n Annotation in 2D (z=0): annotation((1,1), \"SOMEtext!\", Draw.Color.red, \"my_annotation\")\n '''\n if not self.enabled: return\n if type(text) != bytes: text = str(text).encode()\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n z = pos[2] if len(pos)==3 else 0\n\n if self._is_team_right: \n pos = (-pos[0],-pos[1],pos[2]) if len(pos)==3 else (-pos[0],-pos[1])\n\n msg = b'\\x02\\x00' + (\n f'{f\"{pos[0] :.4f}\":.6s}'\n f'{f\"{pos[1] :.4f}\":.6s}'\n f'{f\"{z :.4f}\":.6s}').encode() + color + text + b'\\x00'\n \n Draw._send(msg, self._prefix + id.encode(), flush)\n\n \n def arrow(self, p1, p2, arrowhead_size, thickness, color:bytes, id:str, flush=True):\n ''' \n Draw arrow\n\n Examples\n ----------\n Arrow in 3D: arrow((0,0,0), (0,0,2), 0.1, 3, Draw.Color.red, \"my_arrow\")\n Arrow in 2D (z=0): arrow((0,0), (0,1), 0.1, 3, Draw.Color.red, \"my_arrow\")\n '''\n if not self.enabled: return\n assert type(color)==bytes, \"The RGB color must be a bytes object, e.g. red: b'\\xFF\\x00\\x00'\"\n\n # No need to invert sides, the called shapes will handle that\n if len(p1)==2: p1 = M.to_3d(p1) \n else: p1 = np.asarray(p1)\n if len(p2)==2: p2 = M.to_3d(p2) \n else: p2 = np.asarray(p2)\n\n vec = p2-p1\n vec_size = np.linalg.norm(vec)\n if vec_size == 0: return #return without warning/error\n if arrowhead_size > vec_size: arrowhead_size = vec_size\n\n ground_proj_perpendicular = np.array([ vec[1], -vec[0], 0 ])\n\n if np.all(ground_proj_perpendicular == 0): #vertical arrow\n ground_proj_perpendicular = np.array([ arrowhead_size/2, 0, 0 ])\n else:\n ground_proj_perpendicular *= arrowhead_size/2 / np.linalg.norm(ground_proj_perpendicular)\n\n head_start = p2 - vec * (arrowhead_size/vec_size)\n head_pt1 = head_start + ground_proj_perpendicular\n head_pt2 = head_start - ground_proj_perpendicular\n\n self.line(p1,p2,thickness,color,id,False)\n self.line(p2,head_pt1,thickness,color,id,False)\n self.line(p2,head_pt2,thickness,color,id,flush)\n\n\n def flush(self, id):\n ''' Flush specific drawing by ID '''\n if not self.enabled: return\n\n Draw._send(b'\\x00\\x00', self._prefix + id.encode(), False)\n\n def clear(self, id):\n ''' Clear specific drawing by ID '''\n if not self.enabled: return\n\n Draw._send(b'\\x00\\x00', self._prefix + id.encode(), True) #swap buffer twice\n\n\n def clear_player(self):\n ''' Clear all drawings made by this player '''\n if not self.enabled: return\n\n Draw._send(b'\\x00\\x00', self._prefix, True) #swap buffer twice\n\n\n @staticmethod\n def clear_all():\n ''' Clear all drawings of all players '''\n if Draw._socket is not None:\n Draw._send(b'\\x00\\x00\\x00\\x00\\x00',b'',False) #swap buffer twice using no id\n\n\n class Color():\n '''\n Based on X11 colors\n The names are restructured to make better suggestions\n '''\n pink_violet = b'\\xC7\\x15\\x85'\n pink_hot = b'\\xFF\\x14\\x93'\n pink_violet_pale = b'\\xDB\\x70\\x93'\n pink = b'\\xFF\\x69\\xB4'\n pink_pale = b'\\xFF\\xB6\\xC1'\n \n red_dark = b'\\x8B\\x00\\x00'\n red = b'\\xFF\\x00\\x00'\n red_brick = b'\\xB2\\x22\\x22'\n red_crimson = b'\\xDC\\x14\\x3C'\n red_indian = b'\\xCD\\x5C\\x5C'\n red_salmon = b'\\xFA\\x80\\x72'\n\n orange_red = b'\\xFF\\x45\\x00'\n orange = b'\\xFF\\x8C\\x00'\n orange_ligth = b'\\xFF\\xA5\\x00'\n\n yellow_gold = b'\\xFF\\xD7\\x00'\n yellow = b'\\xFF\\xFF\\x00'\n yellow_light = b'\\xBD\\xB7\\x6B'\n\n brown_maroon =b'\\x80\\x00\\x00'\n brown_dark = b'\\x8B\\x45\\x13'\n brown = b'\\xA0\\x52\\x2D'\n brown_gold = b'\\xB8\\x86\\x0B'\n brown_light = b'\\xCD\\x85\\x3F'\n brown_pale = b'\\xDE\\xB8\\x87'\n\n green_dark = b'\\x00\\x64\\x00' \n green = b'\\x00\\x80\\x00' \n green_lime = b'\\x32\\xCD\\x32' \n green_light = b'\\x00\\xFF\\x00' \n green_lawn = b'\\x7C\\xFC\\x00' \n green_pale = b'\\x90\\xEE\\x90' \n\n cyan_dark = b'\\x00\\x80\\x80' \n cyan_medium = b'\\x00\\xCE\\xD1' \n cyan = b'\\x00\\xFF\\xFF' \n cyan_light = b'\\xAF\\xEE\\xEE'\n\n blue_dark = b'\\x00\\x00\\x8B' \n blue = b'\\x00\\x00\\xFF' \n blue_royal = b'\\x41\\x69\\xE1' \n blue_medium = b'\\x1E\\x90\\xFF' \n blue_light = b'\\x00\\xBF\\xFF'\n blue_pale = b'\\x87\\xCE\\xEB'\n\n purple_violet = b'\\x94\\x00\\xD3' \n purple_magenta = b'\\xFF\\x00\\xFF' \n purple_light = b'\\xBA\\x55\\xD3' \n purple_pale = b'\\xDD\\xA0\\xDD'\n\n white = b'\\xFF\\xFF\\xFF'\n gray_10 = b'\\xE6\\xE6\\xE6'\n gray_20 = b'\\xCC\\xCC\\xCC'\n gray_30 = b'\\xB2\\xB2\\xB2' \n gray_40 = b'\\x99\\x99\\x99'\n gray_50 = b'\\x80\\x80\\x80'\n gray_60 = b'\\x66\\x66\\x66'\n gray_70 = b'\\x4C\\x4C\\x4C'\n gray_80 = b'\\x33\\x33\\x33'\n gray_90 = b'\\x1A\\x1A\\x1A'\n black = b'\\x00\\x00\\x00' \n\n @staticmethod\n def get(r,g,b):\n ''' Get RGB color (0-255) '''\n return bytes([int(r),int(g),int(b)])" }, { "identifier": "Other_Robot", "path": "world/commons/Other_Robot.py", "snippet": "class Other_Robot():\n def __init__(self, unum, is_teammate) -> None:\n self.unum = unum # convenient variable to indicate uniform number (same as other robot's index + 1)\n self.is_self = False # convenient flag to indicate if this robot is self\n self.is_teammate = is_teammate # convenient variable to indicate if this robot is from our team\n self.is_visible = False # True if this robot was seen in the last message from the server (it doesn't mean we know its absolute location)\n self.body_parts_cart_rel_pos = dict() # cartesian relative position of the robot's visible body parts\n self.body_parts_sph_rel_pos = dict() # spherical relative position of the robot's visible body parts\n self.vel_filter = 0.3 # EMA filter coefficient applied to self.state_filtered_velocity\n self.vel_decay = 0.95 # velocity decay at every vision cycle (neutralized if velocity is updated)\n\n\n # State variables: these are computed when this robot is visible and when the original robot is able to self-locate\n self.state_fallen = False # true if the robot is lying down (updated when head is visible)\n self.state_last_update = 0 # World.time_local_ms when the state was last updated\n self.state_horizontal_dist = 0 # horizontal head distance if head is visible, otherwise, average horizontal distance of visible body parts (the distance is updated by vision or radio when state_abs_pos gets a new value, but also when the other player is not visible, by assuming its last position)\n self.state_abs_pos = None # 3D head position if head is visible, otherwise, 2D average position of visible body parts, or, 2D radio head position\n self.state_orientation = 0 # orientation based on pair of lower arms or feet, or average of both (WARNING: may be older than state_last_update) \n self.state_ground_area = None # (pt_2d,radius) projection of player area on ground (circle), not precise if farther than 3m (for performance), useful for obstacle avoidance when it falls\n self.state_body_parts_abs_pos = dict() # 3D absolute position of each body part\n self.state_filtered_velocity = np.zeros(3) # 3D filtered velocity (m/s) (if the head is not visible, the 2D part is updated and v.z decays)" }, { "identifier": "Robot", "path": "world/Robot.py", "snippet": "class Robot():\n STEPTIME = 0.02 # Fixed step time\n VISUALSTEP = 0.04 # Fixed visual step time\n SQ_STEPTIME = STEPTIME * STEPTIME\n GRAVITY = np.array([0,0,-9.81])\n IMU_DECAY = 0.996 #IMU's velocity decay\n \n #------------------ constants to force symmetry in joints/effectors\n\n MAP_PERCEPTOR_TO_INDEX = {\"hj1\":0, \"hj2\":1, \"llj1\":2, \"rlj1\":3,\n \"llj2\":4, \"rlj2\":5, \"llj3\":6, \"rlj3\":7,\n \"llj4\":8, \"rlj4\":9, \"llj5\":10,\"rlj5\":11,\n \"llj6\":12,\"rlj6\":13,\"laj1\":14,\"raj1\":15,\n \"laj2\":16,\"raj2\":17,\"laj3\":18,\"raj3\":19,\n \"laj4\":20,\"raj4\":21,\"llj7\":22,\"rlj7\":23 }\n\n # Fix symmetry issues 1a/4 (identification) \n FIX_PERCEPTOR_SET = {'rlj2','rlj6','raj2','laj3','laj4'}\n FIX_INDICES_LIST = [5,13,17,18,20]\n\n # Recommended height for unofficial beam (near ground)\n BEAM_HEIGHTS = [0.4, 0.43, 0.4, 0.46, 0.4]\n\n\n def __init__(self, unum:int, robot_type:int) -> None:\n robot_xml = \"nao\"+str(robot_type)+\".xml\" # Typical NAO file name\n self.type = robot_type\n self.beam_height = Robot.BEAM_HEIGHTS[robot_type]\n self.no_of_joints = 24 if robot_type == 4 else 22 \n\n #Fix symmetry issues 1b/4 (identification) \n self.FIX_EFFECTOR_MASK = np.ones(self.no_of_joints)\n self.FIX_EFFECTOR_MASK[Robot.FIX_INDICES_LIST] = -1\n\n self.body_parts = dict() # keys='body part names' (given by the robot's XML), values='Body_Part objects'\n self.unum = unum # Robot's uniform number\n self.gyro = np.zeros(3) # Angular velocity along the three axes of freedom of the robot's torso (deg/s)\n self.acc = np.zeros(3) # Proper acceleration along the three axes of freedom of the robot's torso (m/s2)\n self.frp = dict() # foot \"lf\"/\"rf\", toe \"lf1\"/\"rf1\" resistance perceptor (relative [p]oint of origin + [f]orce vector) e.g. {\"lf\":(px,py,pz,fx,fy,fz)}\n self.feet_toes_last_touch = {\"lf\":0,\"rf\":0,\"lf1\":0,\"rf1\":0} # foot \"lf\"/\"rf\", toe \"lf1\"/\"rf1\" World.time_local_ms when foot/toe last touched any surface\n self.feet_toes_are_touching = {\"lf\":False,\"rf\":False,\"lf1\":False,\"rf1\":False} # foot \"lf\"/\"rf\", toe \"lf1\"/\"rf1\" True if touching in last received server message\n self.fwd_kinematics_list = None # List of body parts, ordered according to dependencies\n self.rel_cart_CoM_position = np.zeros(3) # Center of Mass position, relative to head, in cartesian coordinates (m)\n\n # Joint variables are optimized for performance / array operations\n self.joints_position = np.zeros(self.no_of_joints) # Joints' angular position (deg)\n self.joints_speed = np.zeros(self.no_of_joints) # Joints' angular speed (rad/s)\n self.joints_target_speed = np.zeros(self.no_of_joints) # Joints' target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg)\n self.joints_target_last_speed = np.zeros(self.no_of_joints) # Joints' last target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg)\n self.joints_info = [None] * self.no_of_joints # Joints' constant information (see class Joint_Info)\n self.joints_transform = [Matrix_4x4() for _ in range(self.no_of_joints)] # Joints' transformation matrix\n\n # Localization variables relative to head\n self.loc_head_to_field_transform = Matrix_4x4() # Transformation matrix from head to field\n self.loc_field_to_head_transform = Matrix_4x4() # Transformation matrix from field to head\n self.loc_rotation_head_to_field = Matrix_3x3() # Rotation matrix from head to field\n self.loc_rotation_field_to_head = Matrix_3x3() # Rotation matrix from field to head\n self.loc_head_position = np.zeros(3) # Absolute head position (m)\n self.loc_head_position_history = deque(maxlen=40)# Absolute head position history (queue with up to 40 old positions at intervals of 0.04s, where index 0 is the previous position)\n self.loc_head_velocity = np.zeros(3) # Absolute head velocity (m/s) (Warning: possibly noisy)\n self.loc_head_orientation = 0 # Head orientation (deg)\n self.loc_is_up_to_date = False # False if this is not a visual step, or not enough elements are visible\n self.loc_last_update = 0 # World.time_local_ms when the localization was last updated\n self.loc_head_position_last_update = 0 # World.time_local_ms when loc_head_position was last updated by vision or radio\n self.radio_fallen_state = False # True if (radio says we fell) and (radio is significantly more recent than loc)\n self.radio_last_update = 0 # World.time_local_ms when radio_fallen_state was last updated (and possibly loc_head_position)\n\n # Localization variables relative to torso\n self.loc_torso_to_field_rotation = Matrix_3x3() # Rotation matrix from torso to field \n self.loc_torso_to_field_transform = Matrix_4x4() # Transformation matrix from torso to field\n self.loc_torso_roll = 0 # Torso roll (deg)\n self.loc_torso_pitch = 0 # Torso pitch (deg) \n self.loc_torso_orientation = 0 # Torso orientation (deg)\n self.loc_torso_inclination = 0 # Torso inclination (deg) (inclination of z-axis in relation to field z-axis)\n self.loc_torso_position = np.zeros(3) # Absolute torso position (m)\n self.loc_torso_velocity = np.zeros(3) # Absolute torso velocity (m/s)\n self.loc_torso_acceleration = np.zeros(3) # Absolute Coordinate acceleration (m/s2)\n\n # Other localization variables\n self.cheat_abs_pos = np.zeros(3) # Absolute head position provided by the server as cheat (m)\n self.cheat_ori = 0.0 # Absolute head orientation provided by the server as cheat (deg)\n self.loc_CoM_position = np.zeros(3) # Absolute CoM position (m)\n self.loc_CoM_velocity = np.zeros(3) # Absolute CoM velocity (m/s)\n\n # Localization special variables\n '''\n self.loc_head_z is often equivalent to self.loc_head_position[2], but sometimes it differs.\n There are situations in which the rotation and translation cannot be computed, \n but the z-coordinate can still be found through vision, in which case:\n self.loc_is_up_to_date is False\n self.loc_head_z_is_up_to_date is True\n It should be used in applications which rely on z as an independent coordinate, such\n as detecting if the robot has fallen, or as an observation for machine learning.\n It should NEVER be used for 3D transformations.\n '''\n self.loc_head_z = 0 # Absolute head position (z) - see above for explanation (m)\n self.loc_head_z_is_up_to_date = False # False if this is not a visual step, or not enough elements are visible\n self.loc_head_z_last_update = 0 # World.time_local_ms when loc_head_z was last computed\n self.loc_head_z_vel = 0 # Absolute head velocity (z) (m/s)\n\n # Localization + Gyroscope\n # These variables are reliable. The gyroscope is used to update the rotation when waiting for the next visual cycle\n self.imu_torso_roll = 0 # Torso roll (deg) (src: Localization + Gyro)\n self.imu_torso_pitch = 0 # Torso pitch (deg) (src: Localization + Gyro)\n self.imu_torso_orientation = 0 # Torso orientation (deg) (src: Localization + Gyro)\n self.imu_torso_inclination = 0 # Torso inclination (deg) (src: Localization + Gyro)\n self.imu_torso_to_field_rotation = Matrix_3x3() # Rotation matrix from torso to field (src: Localization + Gyro)\n self.imu_last_visual_update = 0 # World.time_local_ms when the IMU data was last updated with visual information \n\n # Localization + Gyroscope + Accelerometer\n # Warning: these variables are unreliable, since small errors in the Localization Orientation lead to \n # wrong acceleration -> wrong velocity -> wrong position\n self.imu_weak_torso_to_field_transform = Matrix_4x4() # Transformation matrix from torso to field (src: Localization + Gyro + Acc)\n self.imu_weak_head_to_field_transform = Matrix_4x4() # Transformation matrix from head to field (src: Localization + Gyro + Acc)\n self.imu_weak_field_to_head_transform = Matrix_4x4() # Transformation matrix from field to head (src: Localization + Gyro + Acc)\n self.imu_weak_torso_position = np.zeros(3) # Absolute torso position (m) (src: Localization + Gyro + Acc)\n self.imu_weak_torso_velocity = np.zeros(3) # Absolute torso velocity (m/s) (src: Localization + Gyro + Acc)\n self.imu_weak_torso_acceleration = np.zeros(3) # Absolute torso acceleration (m/s2) (src: Localization + Gyro + Acc)\n self.imu_weak_torso_next_position = np.zeros(3) # Absolute position in next step estimate (m) (src: Localization + Gyro + Acc)\n self.imu_weak_torso_next_velocity = np.zeros(3) # Absolute velocity in next step estimate (m/s) (src: Localization + Gyro + Acc)\n self.imu_weak_CoM_position = np.zeros(3) # Absolute CoM position (m) (src: Localization + Gyro + Acc)\n self.imu_weak_CoM_velocity = np.zeros(3) # Absolute CoM velocity (m/s) (src: Localization + Gyro + Acc)\n\n\n #Using explicit variables to enable IDE suggestions\n self.J_HEAD_YAW = 0\n self.J_HEAD_PITCH = 1\n self.J_LLEG_YAW_PITCH = 2\n self.J_RLEG_YAW_PITCH = 3\n self.J_LLEG_ROLL = 4\n self.J_RLEG_ROLL = 5\n self.J_LLEG_PITCH = 6\n self.J_RLEG_PITCH = 7\n self.J_LKNEE = 8\n self.J_RKNEE = 9\n self.J_LFOOT_PITCH = 10\n self.J_RFOOT_PITCH = 11\n self.J_LFOOT_ROLL = 12\n self.J_RFOOT_ROLL = 13\n self.J_LARM_PITCH = 14\n self.J_RARM_PITCH = 15\n self.J_LARM_ROLL = 16\n self.J_RARM_ROLL = 17\n self.J_LELBOW_YAW = 18\n self.J_RELBOW_YAW = 19\n self.J_LELBOW_ROLL = 20\n self.J_RELBOW_ROLL = 21\n self.J_LTOE_PITCH = 22\n self.J_RTOE_PITCH = 23\n\n\n #------------------ parse robot xml\n\n dir = M.get_active_directory(\"/world/commons/robots/\")\n robot_xml_root = xmlp.parse(dir + robot_xml).getroot()\n\n joint_no = 0\n for child in robot_xml_root:\n if child.tag == \"bodypart\":\n self.body_parts[child.attrib['name']] = Body_Part(child.attrib['mass'])\n elif child.tag == \"joint\":\n self.joints_info[joint_no] = Joint_Info(child)\n self.joints_position[joint_no] = 0.0\n ji = self.joints_info[joint_no]\n\n #save joint if body part is 1st anchor (to simplify model traversal in a single direction)\n self.body_parts[ji.anchor0_part].joints.append(Robot.MAP_PERCEPTOR_TO_INDEX[ji.perceptor]) \n\n joint_no += 1\n if joint_no == self.no_of_joints: break #ignore extra joints\n\n else:\n raise NotImplementedError\n\n assert joint_no == self.no_of_joints, \"The Robot XML and the robot type don't match!\"\n\n\n def get_head_abs_vel(self, history_steps:int):\n '''\n Get robot's head absolute velocity (m/s)\n\n Parameters\n ----------\n history_steps : int\n number of history steps to consider [1,40]\n\n Examples\n --------\n get_head_abs_vel(1) is equivalent to (current abs pos - last abs pos) / 0.04\n get_head_abs_vel(2) is equivalent to (current abs pos - abs pos 0.08s ago) / 0.08\n get_head_abs_vel(3) is equivalent to (current abs pos - abs pos 0.12s ago) / 0.12\n '''\n assert 1 <= history_steps <= 40, \"Argument 'history_steps' must be in range [1,40]\"\n\n if len(self.loc_head_position_history) == 0:\n return np.zeros(3)\n\n h_step = min(history_steps, len(self.loc_head_position_history))\n t = h_step * Robot.VISUALSTEP\n\n return (self.loc_head_position - self.loc_head_position_history[h_step-1]) / t\n \n\n def _initialize_kinematics(self):\n\n #starting with head\n parts={\"head\"}\n sequential_body_parts = [\"head\"]\n\n while len(parts) > 0:\n part = parts.pop()\n\n for j in self.body_parts[part].joints:\n\n p = self.joints_info[j].anchor1_part\n\n if len(self.body_parts[p].joints) > 0: #add body part if it is the 1st anchor of some joint\n parts.add(p)\n sequential_body_parts.append(p)\n\n self.fwd_kinematics_list = [(self.body_parts[part],j, self.body_parts[self.joints_info[j].anchor1_part] ) \n for part in sequential_body_parts for j in self.body_parts[part].joints]\n\n #Fix symmetry issues 4/4 (kinematics)\n for i in Robot.FIX_INDICES_LIST:\n self.joints_info[i].axes *= -1\n aux = self.joints_info[i].min\n self.joints_info[i].min = -self.joints_info[i].max\n self.joints_info[i].max = -aux\n\n\n def update_localization(self, localization_raw, time_local_ms): \n\n # parse raw data\n loc = localization_raw.astype(float) #32bits to 64bits for consistency\n self.loc_is_up_to_date = bool(loc[32])\n self.loc_head_z_is_up_to_date = bool(loc[34])\n\n if self.loc_head_z_is_up_to_date:\n time_diff = (time_local_ms - self.loc_head_z_last_update) / 1000 \n self.loc_head_z_vel = (loc[33] - self.loc_head_z) / time_diff\n self.loc_head_z = loc[33]\n self.loc_head_z_last_update = time_local_ms\n\n # Save last position to history at every vision cycle (even if not up to date) (update_localization is only called at vision cycles)\n self.loc_head_position_history.appendleft(np.copy(self.loc_head_position))\n\n if self.loc_is_up_to_date:\n time_diff = (time_local_ms - self.loc_last_update) / 1000\n self.loc_last_update = time_local_ms\n self.loc_head_to_field_transform.m[:] = loc[0:16].reshape((4,4))\n self.loc_field_to_head_transform.m[:] = loc[16:32].reshape((4,4))\n \n # extract data (related to the robot's head)\n self.loc_rotation_head_to_field = self.loc_head_to_field_transform.get_rotation()\n self.loc_rotation_field_to_head = self.loc_field_to_head_transform.get_rotation()\n p = self.loc_head_to_field_transform.get_translation()\n self.loc_head_velocity = (p - self.loc_head_position) / time_diff\n self.loc_head_position = p\n self.loc_head_position_last_update = time_local_ms\n self.loc_head_orientation = self.loc_head_to_field_transform.get_yaw_deg()\n self.radio_fallen_state = False\n\n # extract data (related to the center of mass)\n p = self.loc_head_to_field_transform(self.rel_cart_CoM_position)\n self.loc_CoM_velocity = (p - self.loc_CoM_position) / time_diff\n self.loc_CoM_position = p\n\n # extract data (related to the robot's torso)\n t = self.get_body_part_to_field_transform('torso')\n self.loc_torso_to_field_transform = t\n self.loc_torso_to_field_rotation = t.get_rotation()\n self.loc_torso_orientation = t.get_yaw_deg()\n self.loc_torso_pitch = t.get_pitch_deg()\n self.loc_torso_roll = t.get_roll_deg()\n self.loc_torso_inclination = t.get_inclination_deg()\n p = t.get_translation()\n self.loc_torso_velocity = (p - self.loc_torso_position) / time_diff\n self.loc_torso_position = p\n self.loc_torso_acceleration = self.loc_torso_to_field_rotation.multiply(self.acc) + Robot.GRAVITY\n\n\n def head_to_body_part_transform(self, body_part_name, coords, is_batch=False):\n '''\n If coord is a vector or list of vectors:\n Convert cartesian coordinates that are relative to head to coordinates that are relative to a body part \n\n If coord is a Matrix_4x4 or a list of Matrix_4x4:\n Convert pose that is relative to head to a pose that is relative to a body part \n \n Parameters\n ----------\n body_part_name : `str`\n name of body part (given by the robot's XML)\n coords : array_like\n One 3D position or list of 3D positions\n is_batch : `bool`\n Indicates if coords is a batch of 3D positions\n\n Returns\n -------\n coord : `list` or ndarray\n A numpy array is returned if is_batch is False, otherwise, a list of arrays is returned\n '''\n head_to_bp_transform : Matrix_4x4 = self.body_parts[body_part_name].transform.invert()\n \n if is_batch:\n return [head_to_bp_transform(c) for c in coords]\n else:\n return head_to_bp_transform(coords)\n\n\n\n def get_body_part_to_field_transform(self, body_part_name) -> Matrix_4x4:\n '''\n Computes the transformation matrix from body part to field, from which we can extract its absolute position and rotation.\n For best results, use this method when self.loc_is_up_to_date is True. Otherwise, the forward kinematics\n will not be synced with the localization data and strange results may occur.\n '''\n return self.loc_head_to_field_transform.multiply(self.body_parts[body_part_name].transform)\n\n def get_body_part_abs_position(self, body_part_name) -> np.ndarray:\n '''\n Computes the absolute position of a body part considering the localization data and forward kinematics.\n For best results, use this method when self.loc_is_up_to_date is True. Otherwise, the forward kinematics\n will not be synced with the localization data and strange results may occur.\n '''\n return self.get_body_part_to_field_transform(body_part_name).get_translation()\n\n def get_joint_to_field_transform(self, joint_index) -> Matrix_4x4:\n '''\n Computes the transformation matrix from joint to field, from which we can extract its absolute position and rotation.\n For best results, use this method when self.loc_is_up_to_date is True. Otherwise, the forward kinematics\n will not be synced with the localization data and strange results may occur.\n '''\n return self.loc_head_to_field_transform.multiply(self.joints_transform[joint_index])\n\n def get_joint_abs_position(self, joint_index) -> np.ndarray:\n '''\n Computes the absolute position of a joint considering the localization data and forward kinematics.\n For best results, use this method when self.loc_is_up_to_date is True. Otherwise, the forward kinematics\n will not be synced with the localization data and strange results may occur.\n '''\n return self.get_joint_to_field_transform(joint_index).get_translation()\n\n def update_pose(self):\n\n if self.fwd_kinematics_list is None:\n self._initialize_kinematics()\n\n for body_part, j, child_body_part in self.fwd_kinematics_list:\n ji = self.joints_info[j]\n self.joints_transform[j].m[:] = body_part.transform.m\n self.joints_transform[j].translate(ji.anchor0_axes, True)\n child_body_part.transform.m[:] = self.joints_transform[j].m\n child_body_part.transform.rotate_deg(ji.axes, self.joints_position[j], True)\n child_body_part.transform.translate(ji.anchor1_axes_neg, True)\n\n self.rel_cart_CoM_position = np.average([b.transform.get_translation() for b in self.body_parts.values()], 0,\n [b.mass for b in self.body_parts.values()])\n\n\n def update_imu(self, time_local_ms):\n\n # update IMU\n if self.loc_is_up_to_date:\n self.imu_torso_roll = self.loc_torso_roll\n self.imu_torso_pitch = self.loc_torso_pitch \n self.imu_torso_orientation = self.loc_torso_orientation\n self.imu_torso_inclination = self.loc_torso_inclination\n self.imu_torso_to_field_rotation.m[:] = self.loc_torso_to_field_rotation.m\n self.imu_weak_torso_to_field_transform.m[:] = self.loc_torso_to_field_transform.m\n self.imu_weak_head_to_field_transform.m[:] = self.loc_head_to_field_transform.m\n self.imu_weak_field_to_head_transform.m[:] = self.loc_field_to_head_transform.m\n self.imu_weak_torso_position[:] = self.loc_torso_position\n self.imu_weak_torso_velocity[:] = self.loc_torso_velocity\n self.imu_weak_torso_acceleration[:] = self.loc_torso_acceleration\n self.imu_weak_torso_next_position = self.loc_torso_position + self.loc_torso_velocity * Robot.STEPTIME + self.loc_torso_acceleration * (0.5 * Robot.SQ_STEPTIME)\n self.imu_weak_torso_next_velocity = self.loc_torso_velocity + self.loc_torso_acceleration * Robot.STEPTIME\n self.imu_weak_CoM_position[:] = self.loc_CoM_position\n self.imu_weak_CoM_velocity[:] = self.loc_CoM_velocity\n self.imu_last_visual_update = time_local_ms\n else:\n g = self.gyro / 50 # convert degrees per second to degrees per step\n\n self.imu_torso_to_field_rotation.multiply( Matrix_3x3.from_rotation_deg(g), in_place=True, reverse_order=True)\n\n self.imu_torso_orientation = self.imu_torso_to_field_rotation.get_yaw_deg()\n self.imu_torso_pitch = self.imu_torso_to_field_rotation.get_pitch_deg()\n self.imu_torso_roll = self.imu_torso_to_field_rotation.get_roll_deg()\n\n self.imu_torso_inclination = atan(sqrt(tan(self.imu_torso_roll/180*pi)**2+tan(self.imu_torso_pitch/180*pi)**2))*180/pi\n\n # Update position and velocity until 0.2 seconds has passed since last visual update\n if time_local_ms < self.imu_last_visual_update + 200:\n self.imu_weak_torso_position[:] = self.imu_weak_torso_next_position\n if self.imu_weak_torso_position[2] < 0: self.imu_weak_torso_position[2] = 0 # limit z coordinate to positive values\n self.imu_weak_torso_velocity[:] = self.imu_weak_torso_next_velocity * Robot.IMU_DECAY # stability tradeoff\n else:\n self.imu_weak_torso_velocity *= 0.97 # without visual updates for 0.2s, the position is locked, and the velocity decays to zero\n\n # convert proper acceleration to coordinate acceleration and fix rounding bias\n self.imu_weak_torso_acceleration = self.imu_torso_to_field_rotation.multiply(self.acc) + Robot.GRAVITY\n self.imu_weak_torso_to_field_transform = Matrix_4x4.from_3x3_and_translation(self.imu_torso_to_field_rotation,self.imu_weak_torso_position)\n self.imu_weak_head_to_field_transform = self.imu_weak_torso_to_field_transform.multiply(self.body_parts[\"torso\"].transform.invert())\n self.imu_weak_field_to_head_transform = self.imu_weak_head_to_field_transform.invert()\n p = self.imu_weak_head_to_field_transform(self.rel_cart_CoM_position)\n self.imu_weak_CoM_velocity = (p-self.imu_weak_CoM_position)/Robot.STEPTIME\n self.imu_weak_CoM_position = p\n\n # Next Position = x0 + v0*t + 0.5*a*t^2, Next velocity = v0 + a*t\n self.imu_weak_torso_next_position = self.imu_weak_torso_position + self.imu_weak_torso_velocity * Robot.STEPTIME + self.imu_weak_torso_acceleration * (0.5 * Robot.SQ_STEPTIME)\n self.imu_weak_torso_next_velocity = self.imu_weak_torso_velocity + self.imu_weak_torso_acceleration * Robot.STEPTIME\n\n\n\n def set_joints_target_position_direct(self,indices,values:np.ndarray,harmonize=True,max_speed=7.03,tolerance=0.012,limit_joints=True) -> int:\n '''\n Computes the speed of a list of joints, taking as argument the target position\n\n Parameters\n ----------\n indices : `int`/`list`/`slice`/numpy array\n joint indices\n values : numpy array \n target position for each listed joint index\n harmonize : `bool`\n if True, all joints reach target at same time\n max_speed : `float`\n max. speed for all joints in deg/step\n Most joints have a maximum speed of 351.77 deg/s according to rcssserver3d/data/rsg/agent/nao/hingejoint.rsg\n That translates as 7.0354 deg/step or 6.1395 rad/s\n tolerance : `float`\n angle error tolerance (in degrees) to return that target was reached (returns -1)\n limit_joints : `bool`\n limit values to the joints' range of motion\n\n Returns\n -------\n remaining_steps : `int`\n predicted number of remaining steps or -1 if target was already reached\n\n Examples\n -------\n (let p[tx] be the joint position at t=x)\n\n Example for return value: moving joint[0] from 0deg to 10deg\n pos[t0]: 0, speed[t0]: 7deg/step, ret=2 # target will predictedly be reached in 2 steps\n pos[t1]: 7, speed[t1]: 3deg/step, ret=1 # target will predictedly be reached in 1 step (send final action)\n pos[t2]: 10, speed[t2]: 0deg/step, ret=0 # target was predictedly already reached \n pos[t3]: 10, speed[t3]: 0deg/step, ret=-1 # (best case scenario) server reported with delay, that target was reached (see tolerance)\n pos[t?]: 10, speed[t?]: 0deg/step, ret=-1 # if there is friction, it may take some additional steps \n\n If everything worked as predicted we could stop calling this function when ret==1\n If we need precision, it is recommended to wait for ret==-1\n\n Example 1:\n set_joints_target_position_direct(range(2,4),np.array([10.0,5.0]),harmonize=True) \n Joint[2] p[t0]: 0 target pos: 10 -> p[t1]=5, p[t2]=10\n Joint[3] p[t0]: 0 target pos: 5 -> p[t1]=2.5, p[t2]=5\n\n Example 2:\n set_joints_target_position_direct([2,3],np.array([10.0,5.0]),harmonize=False) \n Joint[2] p[t0]: 0 target pos: 10 -> p[t1]=7, p[t2]=10\n Joint[3] p[t0]: 0 target pos: 5 -> p[t1]=5, p[t2]=5 \n '''\n\n assert type(values) == np.ndarray, \"'values' argument must be a numpy array\"\n np.nan_to_num(values, copy=False) # Replace NaN with zero and infinity with large finite numbers\n\n # limit range of joints\n if limit_joints: \n if type(indices) == list or type(indices) == np.ndarray:\n for i in range(len(indices)):\n values[i] = np.clip(values[i], self.joints_info[indices[i]].min, self.joints_info[indices[i]].max)\n elif type(indices) == slice:\n info = self.joints_info[indices]\n for i in range(len(info)):\n values[i] = np.clip(values[i], info[i].min, info[i].max)\n else: # int\n values[0] = np.clip(values[0], self.joints_info[indices].min, self.joints_info[indices].max)\n\n #predicted_diff: predicted difference between reported position and actual position\n\n predicted_diff = self.joints_target_last_speed[indices] * 1.1459156 #rad/s to deg/step\n predicted_diff = np.asarray(predicted_diff)\n np.clip(predicted_diff,-7.03,7.03,out=predicted_diff) #saturate predicted movement in-place\n\n #reported_dist: difference between reported position and target position\n\n reported_dist = values - self.joints_position[indices]\n if np.all((np.abs(reported_dist) < tolerance)) and np.all((np.abs(predicted_diff) < tolerance)):\n self.joints_target_speed[indices] = 0\n return -1\n \n deg_per_step = reported_dist - predicted_diff\n\n relative_max = np.max( np.abs(deg_per_step) ) / max_speed\n remaining_steps = np.ceil( relative_max )\n\n if remaining_steps == 0:\n self.joints_target_speed[indices] = 0\n return 0\n\n if harmonize: \n deg_per_step /= remaining_steps\n else:\n np.clip(deg_per_step,-max_speed,max_speed,out=deg_per_step) #limit maximum speed\n\n self.joints_target_speed[indices] = deg_per_step * 0.87266463 #convert to rad/s\n\n return remaining_steps\n\n\n\n def get_command(self) -> bytes:\n '''\n Builds commands string from self.joints_target_speed\n '''\n j_speed = self.joints_target_speed * self.FIX_EFFECTOR_MASK #Fix symmetry issues 3/4 (effectors)\n cmd = \"\".join(f\"({self.joints_info[i].effector} {j_speed[i]:.5f})\" for i in range(self.no_of_joints)).encode('utf-8')\n\n self.joints_target_last_speed = self.joints_target_speed #1. both point to the same array\n self.joints_target_speed = np.zeros_like(self.joints_target_speed) #2. create new array for joints_target_speed\n return cmd" } ]
from collections import deque from cpp.ball_predictor import ball_predictor from cpp.localization import localization from logs.Logger import Logger from math import atan2, pi from math_ops.Matrix_4x4 import Matrix_4x4 from world.commons.Draw import Draw from world.commons.Other_Robot import Other_Robot from world.Robot import Robot import numpy as np
19,012
class World(): STEPTIME = 0.02 # Fixed step time STEPTIME_MS = 20 # Fixed step time in milliseconds VISUALSTEP = 0.04 # Fixed visual step time VISUALSTEP_MS = 40 # Fixed visual step time in milliseconds # play modes in our favor M_OUR_KICKOFF = 0 M_OUR_KICK_IN = 1 M_OUR_CORNER_KICK = 2 M_OUR_GOAL_KICK = 3 M_OUR_FREE_KICK = 4 M_OUR_PASS = 5 M_OUR_DIR_FREE_KICK = 6 M_OUR_GOAL = 7 M_OUR_OFFSIDE = 8 # play modes in their favor M_THEIR_KICKOFF = 9 M_THEIR_KICK_IN = 10 M_THEIR_CORNER_KICK = 11 M_THEIR_GOAL_KICK = 12 M_THEIR_FREE_KICK = 13 M_THEIR_PASS = 14 M_THEIR_DIR_FREE_KICK = 15 M_THEIR_GOAL = 16 M_THEIR_OFFSIDE = 17 # neutral play modes M_BEFORE_KICKOFF = 18 M_GAME_OVER = 19 M_PLAY_ON = 20 # play mode groups MG_OUR_KICK = 0 MG_THEIR_KICK = 1 MG_ACTIVE_BEAM = 2 MG_PASSIVE_BEAM = 3 MG_OTHER = 4 # play on, game over FLAGS_CORNERS_POS = ((-15,-10,0), (-15,+10,0), (+15,-10,0), (+15,+10,0)) FLAGS_POSTS_POS = ((-15,-1.05,0.8),(-15,+1.05,0.8),(+15,-1.05,0.8),(+15,+1.05,0.8)) def __init__(self,robot_type:int, team_name:str, unum:int, apply_play_mode_correction:bool, enable_draw:bool, logger:Logger, host:str) -> None: self.team_name = team_name # Name of our team self.team_name_opponent : str = None # Name of opponent team self.apply_play_mode_correction = apply_play_mode_correction # True to adjust ball position according to play mode self.step = 0 # Total number of received simulation steps (always in sync with self.time_local_ms) self.time_server = 0.0 # Time, in seconds, as indicated by the server (this time is NOT reliable, use only for synchronization between agents) self.time_local_ms = 0 # Reliable simulation time in milliseconds, use this when possible (it is incremented 20ms for every TCP message) self.time_game = 0.0 # Game time, in seconds, as indicated by the server self.goals_scored = 0 # Goals score by our team self.goals_conceded = 0 # Goals conceded by our team self.team_side_is_left : bool = None # True if our team plays on the left side (this value is later changed by the world parser) self.play_mode = None # Play mode of the soccer game, provided by the server self.play_mode_group = None # Certain play modes share characteristics, so it makes sense to group them self.flags_corners : dict = None # corner flags, key=(x,y,z), always assume we play on the left side self.flags_posts : dict = None # goal posts, key=(x,y,z), always assume we play on the left side self.ball_rel_head_sph_pos = np.zeros(3) # Ball position relative to head (spherical coordinates) (m, deg, deg) self.ball_rel_head_cart_pos = np.zeros(3) # Ball position relative to head (cartesian coordinates) (m) self.ball_rel_torso_cart_pos = np.zeros(3) # Ball position relative to torso (cartesian coordinates) (m) self.ball_rel_torso_cart_pos_history = deque(maxlen=20) # Ball position relative to torso history (queue with up to 20 old positions at intervals of 0.04s, where index 0 is the previous position) self.ball_abs_pos = np.zeros(3) # Ball absolute position (up to date if self.ball_is_visible and self.robot.loc_is_up_to_date) (m) self.ball_abs_pos_history = deque(maxlen=20) # Ball absolute position history (queue with up to 20 old positions at intervals of 0.04s, where index 0 is the previous position) self.ball_abs_pos_last_update = 0 # World.time_local_ms when self.ball_abs_pos was last updated by vision or radio self.ball_abs_vel = np.zeros(3) # Ball velocity vector based on the last 2 known values of self.ball_abs_pos (m/s) (Warning: noisy if ball is distant, use instead get_ball_abs_vel) self.ball_abs_speed = 0 # Ball scalar speed based on the last 2 known values of self.ball_abs_pos (m/s) (Warning: noisy if ball is distant, use instead ||get_ball_abs_vel||) self.ball_is_visible = False # True if the last server message contained vision information related to the ball self.is_ball_abs_pos_from_vision = False # True if ball_abs_pos originated from vision, False if it originated from radio self.ball_last_seen = 0 # World.time_local_ms when ball was last seen (note: may be different from self.ball_abs_pos_last_update) self.ball_cheat_abs_pos = np.zeros(3) # Absolute ball position provided by the server as cheat (m) self.ball_cheat_abs_vel = np.zeros(3) # Absolute velocity vector based on the last 2 values of self.ball_cheat_abs_pos (m/s) self.ball_2d_pred_pos = np.zeros((1,2)) # prediction of current and future 2D ball positions* self.ball_2d_pred_vel = np.zeros((1,2)) # prediction of current and future 2D ball velocities* self.ball_2d_pred_spd = np.zeros(1) # prediction of current and future 2D ball linear speeds* # *at intervals of 0.02 s until ball comes to a stop or gets out of bounds (according to prediction) self.lines = np.zeros((30,6)) # Position of visible lines, relative to head, start_pos+end_pos (spherical coordinates) (m, deg, deg, m, deg, deg) self.line_count = 0 # Number of visible lines self.vision_last_update = 0 # World.time_local_ms when last vision update was received self.vision_is_up_to_date = False # True if the last server message contained vision information self.teammates = [Other_Robot(i, True ) for i in range(1,12)] # List of teammates, ordered by unum self.opponents = [Other_Robot(i, False) for i in range(1,12)] # List of opponents, ordered by unum self.teammates[unum-1].is_self = True # This teammate is self
class World(): STEPTIME = 0.02 # Fixed step time STEPTIME_MS = 20 # Fixed step time in milliseconds VISUALSTEP = 0.04 # Fixed visual step time VISUALSTEP_MS = 40 # Fixed visual step time in milliseconds # play modes in our favor M_OUR_KICKOFF = 0 M_OUR_KICK_IN = 1 M_OUR_CORNER_KICK = 2 M_OUR_GOAL_KICK = 3 M_OUR_FREE_KICK = 4 M_OUR_PASS = 5 M_OUR_DIR_FREE_KICK = 6 M_OUR_GOAL = 7 M_OUR_OFFSIDE = 8 # play modes in their favor M_THEIR_KICKOFF = 9 M_THEIR_KICK_IN = 10 M_THEIR_CORNER_KICK = 11 M_THEIR_GOAL_KICK = 12 M_THEIR_FREE_KICK = 13 M_THEIR_PASS = 14 M_THEIR_DIR_FREE_KICK = 15 M_THEIR_GOAL = 16 M_THEIR_OFFSIDE = 17 # neutral play modes M_BEFORE_KICKOFF = 18 M_GAME_OVER = 19 M_PLAY_ON = 20 # play mode groups MG_OUR_KICK = 0 MG_THEIR_KICK = 1 MG_ACTIVE_BEAM = 2 MG_PASSIVE_BEAM = 3 MG_OTHER = 4 # play on, game over FLAGS_CORNERS_POS = ((-15,-10,0), (-15,+10,0), (+15,-10,0), (+15,+10,0)) FLAGS_POSTS_POS = ((-15,-1.05,0.8),(-15,+1.05,0.8),(+15,-1.05,0.8),(+15,+1.05,0.8)) def __init__(self,robot_type:int, team_name:str, unum:int, apply_play_mode_correction:bool, enable_draw:bool, logger:Logger, host:str) -> None: self.team_name = team_name # Name of our team self.team_name_opponent : str = None # Name of opponent team self.apply_play_mode_correction = apply_play_mode_correction # True to adjust ball position according to play mode self.step = 0 # Total number of received simulation steps (always in sync with self.time_local_ms) self.time_server = 0.0 # Time, in seconds, as indicated by the server (this time is NOT reliable, use only for synchronization between agents) self.time_local_ms = 0 # Reliable simulation time in milliseconds, use this when possible (it is incremented 20ms for every TCP message) self.time_game = 0.0 # Game time, in seconds, as indicated by the server self.goals_scored = 0 # Goals score by our team self.goals_conceded = 0 # Goals conceded by our team self.team_side_is_left : bool = None # True if our team plays on the left side (this value is later changed by the world parser) self.play_mode = None # Play mode of the soccer game, provided by the server self.play_mode_group = None # Certain play modes share characteristics, so it makes sense to group them self.flags_corners : dict = None # corner flags, key=(x,y,z), always assume we play on the left side self.flags_posts : dict = None # goal posts, key=(x,y,z), always assume we play on the left side self.ball_rel_head_sph_pos = np.zeros(3) # Ball position relative to head (spherical coordinates) (m, deg, deg) self.ball_rel_head_cart_pos = np.zeros(3) # Ball position relative to head (cartesian coordinates) (m) self.ball_rel_torso_cart_pos = np.zeros(3) # Ball position relative to torso (cartesian coordinates) (m) self.ball_rel_torso_cart_pos_history = deque(maxlen=20) # Ball position relative to torso history (queue with up to 20 old positions at intervals of 0.04s, where index 0 is the previous position) self.ball_abs_pos = np.zeros(3) # Ball absolute position (up to date if self.ball_is_visible and self.robot.loc_is_up_to_date) (m) self.ball_abs_pos_history = deque(maxlen=20) # Ball absolute position history (queue with up to 20 old positions at intervals of 0.04s, where index 0 is the previous position) self.ball_abs_pos_last_update = 0 # World.time_local_ms when self.ball_abs_pos was last updated by vision or radio self.ball_abs_vel = np.zeros(3) # Ball velocity vector based on the last 2 known values of self.ball_abs_pos (m/s) (Warning: noisy if ball is distant, use instead get_ball_abs_vel) self.ball_abs_speed = 0 # Ball scalar speed based on the last 2 known values of self.ball_abs_pos (m/s) (Warning: noisy if ball is distant, use instead ||get_ball_abs_vel||) self.ball_is_visible = False # True if the last server message contained vision information related to the ball self.is_ball_abs_pos_from_vision = False # True if ball_abs_pos originated from vision, False if it originated from radio self.ball_last_seen = 0 # World.time_local_ms when ball was last seen (note: may be different from self.ball_abs_pos_last_update) self.ball_cheat_abs_pos = np.zeros(3) # Absolute ball position provided by the server as cheat (m) self.ball_cheat_abs_vel = np.zeros(3) # Absolute velocity vector based on the last 2 values of self.ball_cheat_abs_pos (m/s) self.ball_2d_pred_pos = np.zeros((1,2)) # prediction of current and future 2D ball positions* self.ball_2d_pred_vel = np.zeros((1,2)) # prediction of current and future 2D ball velocities* self.ball_2d_pred_spd = np.zeros(1) # prediction of current and future 2D ball linear speeds* # *at intervals of 0.02 s until ball comes to a stop or gets out of bounds (according to prediction) self.lines = np.zeros((30,6)) # Position of visible lines, relative to head, start_pos+end_pos (spherical coordinates) (m, deg, deg, m, deg, deg) self.line_count = 0 # Number of visible lines self.vision_last_update = 0 # World.time_local_ms when last vision update was received self.vision_is_up_to_date = False # True if the last server message contained vision information self.teammates = [Other_Robot(i, True ) for i in range(1,12)] # List of teammates, ordered by unum self.opponents = [Other_Robot(i, False) for i in range(1,12)] # List of opponents, ordered by unum self.teammates[unum-1].is_self = True # This teammate is self
self.draw = Draw(enable_draw, unum, host, 32769) # Draw object for current player
2
2023-12-16 23:40:23+00:00
24k
Sam-Izdat/tinycio
src/tinycio/lut.py
[ { "identifier": "ColorSpace", "path": "src/tinycio/colorspace.py", "snippet": "class ColorSpace:\n \"\"\"\n Color space conversion. Applies OETFs and EOTFs as needed but omits tonemapping. Cylindrical transformations are \n treated as distinct color spaces. Example:\n\n .. highlight:: python\n .. code-block:: python\n \n cs_in = ColorSpace.Variant.SRGB_LIN\n cs_out = ColorSpace.Variant.OKLAB\n oklab_image = ColorSpace.convert(srgb_image, source=cs_in, destination=cs_out)\n \"\"\"\n class Variant(IntEnum):\n \"\"\"\n Color space enum. For a list of available options, see :ref:`ref_color_spaces`.\n \"\"\"\n UNKNOWN = 1<<0 \n NONCOLOR = 1<<1 \n CIE_XYZ = 1<<2 \n CIE_XYY = 1<<3 \n SRGB = 1<<4 \n SRGB_LIN = 1<<5 \n REC709 = 1<<6 \n REC2020 = 1<<7 \n REC2020_LIN = 1<<8 \n DCI_P3 = 1<<9 \n DCI_P3_LIN = 1<<10 \n DISPLAY_P3 = 1<<11 \n ACESCG = 1<<12 \n ACESCC = 1<<13 \n ACESCCT = 1<<14 \n ACES2065_1 = 1<<15 \n LMS = 1<<16 \n OKLAB = 1<<17 \n CIELAB = 1<<18 \n CIELUV = 1<<19 \n HSV = 1<<20 \n HSL = 1<<21 \n OKHSV = 1<<22\n OKHSL = 1<<23\n\n SCENE_LINEAR = SRGB_LIN | REC2020_LIN | DCI_P3_LIN | ACESCG | ACES2065_1 | CIE_XYZ\n PERCEPTUAL = OKLAB | CIELAB | CIELUV | OKHSL | OKHSV\n CYLINDRICAL = HSL | HSV | OKHSL | OKHSV\n\n GAMUT_SRGB = SRGB | SRGB_LIN | REC709 | HSL | HSV\n GAMUT_AP0 = ACES2065_1\n GAMUT_AP1 = ACESCG | ACESCC | ACESCCT\n GAMUT_REC2020 = REC2020 | REC2020_LIN\n GAMUT_DCI_P3 = DCI_P3 | DCI_P3_LIN\n GAMUT_DISPLAY_P3= DISPLAY_P3\n GAMUT_OKLAB = OKLAB | OKHSL | OKHSV\n GAMUT_CIE_XYZ = CIE_XYZ | CIE_XYY\n GAMUT_CIELAB = CIELAB\n GAMUT_CIELUV = CIELUV\n GAMUT_OTHER = LMS | UNKNOWN | NONCOLOR\n\n WP_D65 = SRGB | SRGB_LIN | REC709 | DISPLAY_P3 | REC2020 | REC2020_LIN | CIE_XYZ | CIE_XYY\n WP_CCT_6300 = DCI_P3 | DCI_P3_LIN\n WP_CCT_6000 = ACESCG | ACESCC | ACESCCT | ACES2065_1\n\n MODEL_RGB = SRGB | SRGB_LIN | REC709 | REC2020 | REC2020_LIN | DCI_P3 | DCI_P3_LIN | DISPLAY_P3 | \\\n ACESCG | ACESCC | ACESCCT | ACES2065_1\n MODEL_CIE = CIE_XYZ | CIE_XYY | CIELAB | CIELUV\n MODEL_CAM = 0\n MODEL_YUV = 0\n MODEL_OTHER = LMS | HSL | HSV | OKLAB # is OKLAB CAM-based?\n \n NEGATIVE = OKLAB | CIELAB | CIELUV | GAMUT_AP0\n NON_NEGATIVE = ~NEGATIVE\n\n DISABLED = CIELUV\n UNSUPPORTED = OKHSV | OKHSL # disabled doesn't go here - CS must have alternate path\n SUPPORTED = ~UNSUPPORTED \n\n # FIXME: LUV doesn't quite match expected values, needs further testing\n\n mat_xyz_to_srgb = [\n [3.24096994190452134, -1.53738317757009346, -0.498610760293003284],\n [-0.969243636280879826, 1.87596750150772067, 0.0415550574071756125],\n [0.0556300796969936084, -0.203976958888976564, 1.05697151424287856]]\n\n mat_srgb_to_xyz = [\n [0.412390799265959481, 0.357584339383877964, 0.180480788401834288],\n [0.212639005871510358, 0.715168678767755927, 0.072192315360733715],\n [0.0193308187155918507, 0.119194779794625988, 0.950532152249660581]]\n\n mat_srgb_to_acescg = [\n [ 0.6130974024, 0.3395231462, 0.04737945141],\n [ 0.07019372247, 0.916353879, 0.01345239847],\n [ 0.02061559288, 0.1095697729, 0.8698146341]]\n\n # NOTE: Includes \"D60\"/D65 white point conversion\n mat_acescg_to_srgb = [\n [ 1.705050993, -0.6217921206,-0.083258872],\n [-0.1302564175, 1.140804737, -0.01054831907],\n [-0.02400335681,-0.1289689761, 1.152972333]]\n\n # NOTE: Includes \"D60\"/D65 white point conversion\n mat_srgb_to_aces2065_1 = [\n [ 0.439632982, 0.382988698, 0.17737832],\n [ 0.0897764431, 0.813439429, 0.0967841284],\n [ 0.0175411704, 0.111546553, 0.870912277]]\n\n mat_aces2065_1_to_srgb = [\n [ 2.52168619, -1.13413099, -0.387555198],\n [-0.276479914, 1.37271909, -0.0962391736],\n [-0.015378065, -0.152975336, 1.1683534]]\n\n mat_srgb_to_displayp3 = [\n [ 0.822461969, 0.177538031, 1.15772692e-10],\n [ 0.0331941989, 0.966805801, 1.95085037e-11],\n [ 0.0170826307, 0.0723974405, 0.910519929]]\n\n mat_displayp3_to_srgb = [\n [ 1.22494018, -0.224940176, -4.77534979e-11],\n [-0.0420569547, 1.04205695, 3.37864801e-11],\n [-0.0196375546,-0.0786360454, 1.0982736]] \n\n # NOTE: No chromatic adaptation\n mat_srgb_to_dcip3 = [\n [0.868579739716132409, 0.128919138460847047, 0.00250112182302054368],\n [0.0345404102543194426, 0.961811386361919975, 0.0036482033837605824],\n [0.0167714290414502718, 0.0710399977868858352, 0.912188573171663893]]\n\n # NOTE: No chromatic adaptation\n mat_dcip3_to_srgb = [\n [ 1.15751640619975871, -0.154962378073857756, -0.00255402812590095854],\n [-0.0415000715306859699, 1.04556792307969925, -0.00406785154901328463],\n [-0.0180500389562539583,-0.0785782726530290654, 1.09662831160928302]]\n\n # NOTE: No chromatic adaptation\n mat_dcip3_to_xyz = [\n [ 0.445169815564552417, 0.277134409206777664, 0.172282669815564564],\n [ 0.209491677912730539, 0.721595254161043636, 0.0689130679262258258],\n [-3.63410131696985616e-17, 0.0470605600539811521, 0.907355394361973415]]\n\n # NOTE: No chromatic adaptation\n mat_xyz_to_dcip3 = [\n [2.7253940304917328, -1.01800300622718496, -0.440163195190036463],\n [-0.795168025808764195, 1.689732054843624, 0.0226471906084774533],\n [0.0412418913957000325, -0.0876390192158623825, 1.10092937864632191]]\n\n mat_srgb_to_rec2020 = [\n [ 0.627403896, 0.329283039, 0.0433130657],\n [ 0.0690972894, 0.919540395, 0.0113623156],\n [ 0.0163914389, 0.0880133077, 0.895595253]]\n\n mat_rec2020_to_srgb = [\n [ 1.660491, -0.587641139,-0.0728498633],\n [-0.124550475, 1.1328999, -0.00834942258],\n [-0.0181507633,-0.100578898, 1.11872966]]\n\n mat_rec2020_to_xyz = [\n [0.636958048301291, 0.144616903586208, 0.168880975164172],\n [0.262700212011267, 0.677998071518871, 0.059301716469862],\n [4.99410657446607e-17, 0.0280726930490874, 1.06098505771079]]\n\n mat_xyz_to_rec2020 = [\n [1.71665118797127, -0.355670783776393, -0.25336628137366],\n [-0.666684351832489, 1.61648123663494, 0.0157685458139111],\n [0.0176398574453108, -0.0427706132578085, 0.942103121235474]]\n\n # NOTE: No chromatic adaptation\n mat_acescg_to_xyz = [\n [ 0.66245418, 0.13400421, 0.15618769],\n [ 0.27222872, 0.67408177, 0.05368952],\n [-0.00557465, 0.00406073, 1.0103391 ]]\n\n # NOTE: No chromatic adaptation\n mat_xyz_to_acescg = [\n [ 1.64102338, -0.32480329, -0.2364247 ],\n [-0.66366286, 1.61533159, 0.01675635],\n [ 0.01172189, -0.00828444, 0.98839486]]\n\n # NOTE: For CIE XYZ color\n mat_d60_to_d65 = [\n [ 0.98722400,-0.00611327, 0.01595330],\n [-0.00759836, 1.00186000, 0.00533002],\n [ 0.00307257,-0.00509595, 1.08168000]]\n\n # NOTE: For CIE XYZ color\n mat_d65_to_d60 = [\n [ 1.01303000, 0.00610531,-0.01497100],\n [ 0.00769823, 0.99816500,-0.00503203],\n [-0.00284131, 0.00468516, 0.92450700]]\n\n # NOTE: For CIE XYZ color\n mat_d65_to_dci = [\n [0.976578896646979768, -0.0154362646984919742, -0.016686021704209866],\n [-0.0256896658505145926, 1.02853916787996963, -0.00378517365630504153],\n [-0.00570574587417104179, 0.0110778657389971485, 0.871176159390377409]]\n \n # NOTE: For CIE XYZ color\n mat_dci_to_d65 = [\n [1.02449672775257752, 0.0151635410224165156, 0.0196885223342066827],\n [0.0256121933371584198, 0.97258630562441342, 0.00471635229242730096],\n [0.0063842306500876874, -0.012268082736730219, 1.14794244517367791]]\n\n mat_xyz_to_lms = [\n [ 0.8951, 0.2664,-0.1614],\n [-0.7502, 1.7135, 0.0367],\n [ 0.0389,-0.0685, 1.0296]]\n\n mat_lms_to_xyz = [\n [ 0.986993, -0.147054, 0.159963],\n [ 0.432305, 0.51836, 0.0492912],\n [ -0.00852866, 0.0400428, 0.968487]]\n\n # OKLAB's XYZ to LMS\n mat_oklab_m1 = [\n [ 0.8189330101, 0.3618667424, -0.1288597137],\n [ 0.0329845436, 0.9293118715, 0.0361456387],\n [ 0.0482003018, 0.2643662691, 0.6338517070]]\n\n # OKLAB's non-linear L'M'S' to OKLAB\n mat_oklab_m2 = [\n [ 0.2104542553, 0.7936177850, -0.0040720468],\n [ 1.9779984951, -2.4285922050, 0.4505937099],\n [ 0.0259040371, 0.7827717662, -0.8086757660]]\n\n # Inverse of OKLAB M1\n mat_oklab_m1_inv = [\n [ 1.22701385, -0.55779998, 0.28125615],\n [-0.04058018, 1.11225687, -0.07167668],\n [-0.07638128, -0.42148198, 1.58616322]]\n\n # Inverse of OKLAB M2\n mat_oklab_m2_inv = [\n [ 1. , 0.39633779, 0.21580376],\n [ 1.00000001, -0.10556134, -0.06385417],\n [ 1.00000005, -0.08948418, -1.29148554]]\n\n @classmethod\n def convert(cls, im:Union[torch.Tensor, ColorImage], source:Variant, destination:Variant) -> torch.Tensor:\n \"\"\"\n Change the color space of an image. Cylindrical transformations HSV/HSL are \n treated as their own color spaces and assumed to be relative to sRGB linear. \n Unless otherwise noted or required by specification (e.g. ACES), we assume D65 white point.\n\n .. warning::\n\n Tone mapping is not included, so converting the color space of HDR values to \n an LDR-designated color space will not automatically reduce dynamic range. For example, \n taking an HDR image from :code:`ACESCG` (AP1) to :code:`SRGB` will yield the sRGB \n gamma curve, but values outside the required range must still be tone mapped or clamped beforehand.\n\n .. warning::\n\n Cylindrical transformations (HSL, HSV) should be given input in [0, 1] linear sRGB range \n (or equivalent). This is not strictly enforced but input outside this range may yield \n unpredictable results or *NaN* values.\n\n :param im: [C=3, H, W] image tensor \n :type im: torch.Tensor | ColorImage\n :param source: color space to convert from\n :param destination: color space to convert to\n :return: image tensor in designated color space\n \"\"\"\n ip, op = source, destination\n cs = cls.Variant\n tf = TransferFunction\n if ip == op: return im\n\n assert im.dim() == 3 and im.size(0) == 3, f\"expected [C=3, H, W] image tensor, got {im.size()}\"\n assert source != 0, f\"Unknown source color space\"\n assert ip & cs.SUPPORTED, f\"Source color space not supported: {source.name}\"\n assert op & cs.SUPPORTED, f\"Destination color space not supported: {destination.name}\"\n assert ip & ~cs.DISABLED, f\"Source color space disabled: {ColorSpace.Variant(ip).name}\"\n assert op & ~cs.DISABLED, f\"Destination color space disabled: {ColorSpace.Variant(op).name}\"\n\n err_not_implemented = f\"Color space conversion not implemented: {ColorSpace.Variant(ip).name} to {ColorSpace.Variant(op).name}\" \n\n # Direct path where it matters, loop-de-loop elsewhere\n if ip == cs.SRGB_LIN:\n if op == cs.SRGB: im = tf.srgb_oetf(im)\n elif op == cs.REC709: im = tf.rec709_oetf(im)\n elif op == cs.REC2020: im = tf.rec2020_oetf(mm(im, cls.mat_srgb_to_rec2020))\n elif op == cs.REC2020_LIN: im = mm(im, cls.mat_srgb_to_rec2020)\n elif op == cs.DCI_P3: im = tf.dcip3_oetf(mm(mm(mm(im, cls.mat_srgb_to_xyz), cls.mat_d65_to_dci), cls.mat_xyz_to_dcip3))\n elif op == cs.DCI_P3_LIN: im = mm(mm(mm(im, cls.mat_srgb_to_xyz), cls.mat_d65_to_dci), cls.mat_xyz_to_dcip3)\n elif op == cs.DISPLAY_P3: im = tf.srgb_oetf(mm(im, cls.mat_srgb_to_displayp3))\n elif op == cs.CIE_XYZ: im = mm(im, cls.mat_srgb_to_xyz)\n elif op == cs.CIE_XYY: im = cls._xyz_to_xyy(mm(im, cls.mat_srgb_to_xyz))\n elif op == cs.LMS: im = cls._xyz_to_lms(mm(im, cls.mat_srgb_to_xyz))\n elif op == cs.ACESCG: im = mm(im, cls.mat_srgb_to_acescg)\n elif op == cs.ACESCC: im = cls._acescg_to_acescc(mm(im, cls.mat_srgb_to_acescg))\n elif op == cs.ACES2065_1: im = mm(im, cls.mat_srgb_to_aces2065_1)\n elif op == cs.CIELAB: im = cls._xyz_to_cielab(mm(im, cls.mat_srgb_to_xyz))\n elif op == cs.CIELUV: im = cls._xyz_to_cieluv(mm(im, cls.mat_srgb_to_xyz))\n elif op == cs.OKLAB: im = cls._rgb_to_oklab(im)\n elif op == cs.HSL: im = cls._rgb_to_hsl(tf.srgb_oetf(im))\n elif op == cs.HSV: im = cls._rgb_to_hsv(tf.srgb_oetf(im))\n else: raise Exception(err_not_implemented)\n elif ip == cs.SRGB:\n if op == cs.HSL: im = cls._rgb_to_hsl(im)\n elif op == cs.HSV: im = cls._rgb_to_hsv(im)\n else: im = cls.convert(tf.srgb_eotf(im), cs.SRGB_LIN, op)\n elif ip == cs.REC709: im = cls.convert(tf.rec709_eotf(im), cs.SRGB_LIN, op)\n elif ip == cs.REC2020: \n if op == cs.REC2020_LIN: im = tf.rec2020_eotf(im)\n elif op == cs.CIE_XYZ: im = mm(tf.rec2020_eotf(im), cls.mat_rec2020_to_xyz)\n elif op == cs.SRGB_LIN: im = mm(tf.rec2020_eotf(im), cls.mat_rec2020_to_srgb)\n else: im = cls.convert(mm(tf.rec2020_eotf(im), cls.mat_rec2020_to_srgb), cs.SRGB_LIN, op)\n elif ip == cs.REC2020_LIN: \n if op == cs.REC2020: im = tf.rec2020_oetf(im)\n elif op == cs.CIE_XYZ: im = mm(im, cls.mat_rec2020_to_xyz)\n elif op == cs.SRGB_LIN: im = mm(im, cls.mat_rec2020_to_srgb)\n else: im = cls.convert(mm(im, cls.mat_rec2020_to_srgb), cs.SRGB_LIN, op)\n elif ip == cs.DCI_P3: \n if op == cs.DCI_P3_LIN: im = tf.dcip3_eotf(im)\n elif op == cs.CIE_XYZ: im = mm(mm(tf.dcip3_eotf(im), cls.mat_dcip3_to_xyz), cls.mat_dci_to_d65)\n else: im = cls.convert(mm(mm(tf.dcip3_eotf(im), cls.mat_dcip3_to_xyz), cls.mat_dci_to_d65), cs.CIE_XYZ, op)\n elif ip == cs.DCI_P3_LIN: \n if op == cs.DCI_P3: im = tf.dcip3_oetf(im)\n elif op == cs.CIE_XYZ: im = mm(mm(im, cls.mat_dcip3_to_xyz), cls.mat_dci_to_d65)\n else: im = cls.convert(mm(mm(im, cls.mat_dcip3_to_xyz), cls.mat_dci_to_d65), cs.CIE_XYZ, op)\n elif ip == cs.DISPLAY_P3: im = cls.convert(mm(tf.srgb_eotf(im), cls.mat_displayp3_to_srgb), cs.SRGB_LIN, op)\n elif ip == cs.CIE_XYZ:\n if op == cs.CIE_XYY: im = cls._xyz_to_xyy(im)\n elif op == cs.REC2020_LIN: im = mm(im, cls.mat_xyz_to_rec2020)\n elif op == cs.REC2020: im = tf.rec2020_oetf(mm(im, cls.mat_xyz_to_rec2020))\n elif op == cs.DCI_P3_LIN: im = mm(mm(im, cls.mat_d65_to_dci), cls.mat_xyz_to_dcip3)\n elif op == cs.DCI_P3: im = tf.dcip3_oetf(mm(mm(im, cls.mat_d65_to_dci), cls.mat_xyz_to_dcip3))\n elif op == cs.LMS: im = cls._xyz_to_lms(im)\n elif op == cs.ACESCG: im = mm(cls._d65_to_d60(im), cls.mat_xyz_to_acescg)\n elif op == cs.CIELAB: im = cls._xyz_to_cielab(im)\n elif op == cs.CIELUV: im = cls._xyz_to_cieluv(im)\n elif op == cs.OKLAB: im = cls._xyz_to_oklab(im)\n else: im = cls.convert(mm(im, cls.mat_xyz_to_srgb), cs.SRGB_LIN, op)\n elif ip == cs.CIE_XYY: \n if op == cs.CIE_XYZ: im = cls._xyy_to_xyz(im)\n else: im = cls.convert(cls._xyy_to_xyz(im), cs.CIE_XYZ, op)\n elif ip == cs.LMS: \n if op == cs.CIE_XYZ: im = cls._lms_to_xyz(im)\n else: im = cls.convert(cls._lms_to_xyz(im), cs.CIE_XYZ, op)\n elif ip == cs.ACESCG:\n # if op == cs.CIE_XYZ: im = cls._d60_to_d65(mm(im, cls.mat_acescg_to_xyz)) # FIXME: fails unit test (?)\n if op == cs.ACESCC: im = cls._acescg_to_acescc(im)\n else: im = cls.convert(mm(im, cls.mat_acescg_to_srgb), cs.SRGB_LIN, op)\n elif ip == cs.ACESCC:\n if op == cs.ACESCG: im = cls._acescc_to_acescg(im)\n else: im = cls.convert(cls._acescc_to_acescg(im), cs.ACESCG, op)\n elif ip == cs.ACES2065_1: im = cls.convert(mm(im, cls.mat_aces2065_1_to_srgb), cs.SRGB_LIN, op)\n elif ip == cs.HSL:\n if op == cs.SRGB: im = cls._hsl_to_rgb(im)\n else: im = cls.convert(tf.srgb_eotf(cls._hsl_to_rgb(im)), cs.SRGB_LIN, op)\n elif ip == cs.HSV:\n if op == cs.SRGB: im = cls._hsv_to_rgb(im)\n else: im = cls.convert(tf.srgb_eotf(cls._hsv_to_rgb(im)), cs.SRGB_LIN, op)\n elif ip == cs.CIELAB: im = cls.convert(cls._cielab_to_xyz(im), cs.CIE_XYZ, op)\n elif ip == cs.CIELUV: im = cls.convert(cls._cieluv_to_xyz(im), cs.CIE_XYZ, op)\n elif ip == cs.OKLAB:\n if op == cs.CIE_XYZ: im = cls._oklab_to_xyz(im)\n else: im = cls.convert(cls._oklab_to_rgb(im), cs.SRGB_LIN, op)\n else: raise Exception(err_not_implemented)\n\n return im\n\n @classmethod\n def _xyz_to_xyy(cls, xyz:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert CIE XYZ color space to CIE xyY color space.\n\n :param xyz: Input CIE XYZ color space tensor\n :return: CIE xyY color space tensor\n \"\"\"\n X = xyz[0:1]\n Y = xyz[1:2]\n Z = xyz[2:3]\n x = X / (X + Y + Z)\n y = Y / (X + Y + Z)\n return torch.cat([x, y, Y], dim=0)\n\n @classmethod\n def _xyy_to_xyz(cls, xyy:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert CIE xyY color space to CIE XYZ color space.\n\n :param xyy: Input CIE xyY color space tensor\n :return: CIE XYZ color space tensor\n \"\"\"\n x = xyy[0:1]\n y = xyy[1:2]\n Y = xyy[2:3]\n X = (Y / y) * x\n Z = (Y / y) * (1. - x - y)\n return torch.cat([X, Y, Z], dim=0)\n\n @classmethod\n def _xyz_to_lms(cls, xyz:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert CIE XYZ color space to LMS color space.\n\n :param xyz: Input CIE XYZ color space tensor\n :return: LMS color space tensor\n \"\"\"\n return mm(xyz, cls.mat_xyz_to_lms)\n\n @classmethod\n def _lms_to_xyz(cls, lms:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert LMS color space to CIE XYZ color space.\n\n :param lms: Input LMS color space tensor\n :return: CIE XYZ color space tensor\n \"\"\"\n return mm(lms, cls.mat_lms_to_xyz)\n\n @classmethod\n def _acescg_to_acescc(cls, cg:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert scene-linear ACEScg to log ACEScc.\n\n :param lms: Input ACEScg color space tensor\n :return: ACEScc color space tensor\n \"\"\"\n res = torch.where(cg < 0.00003051757, \n (torch.log2(0.00001525878 + cg * 0.5) + 9.72) / 17.52, \n (torch.log2(cg) + 9.72) / 17.52)\n return res\n\n @classmethod\n def _acescc_to_acescg(cls, cc:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert log ACEScc to scene-linear ACEScg.\n\n :param lms: Input ACEScc color space tensor\n :return: ACEScg color space tensor\n \"\"\"\n res = torch.where(cc < -0.3013698630, \n (torch.exp2(cc * 17.52 - 9.72) - 0.00001525878) * 2,\n torch.exp2(cc * 17.52 - 9.72))\n return res\n\n @classmethod\n def _xyz_to_oklab(cls, xyz:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert CIE XYZ color space to OKLAB color space.\n\n :param xyz: Input CIE XYZ color space tensor\n :return: OKLAB color space tensor\n \"\"\" \n lms = mm(xyz, cls.mat_oklab_m1)\n lms_p = torch.pow(torch.abs(lms), 0.3333333333) * torch.sign(lms).float()\n lab = mm(lms_p, cls.mat_oklab_m2)\n return lab\n\n @classmethod\n def _oklab_to_xyz(cls, lab:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert OKLAB color space to CIE XYZ color space.\n\n :param lab: Input OKLAB color space tensor\n :return: CIE XYZ color space tensor\n \"\"\"\n lms_p = mm(lab, cls.mat_oklab_m2_inv)\n lms = torch.pow(lms_p, 3.)\n xyz = mm(lms, cls.mat_oklab_m1_inv)\n return xyz\n\n\n @classmethod\n def __pivot_xyz_to_lab(cls, val): \n return torch.where(val > 0.008856, torch.pow(val, 0.3333333333), ((val * 903.3) + 16.0) / 116.0)\n\n @classmethod\n def _xyz_to_cielab(cls, xyz:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert color space from CIE XYZ to CIELAB.\n\n :param xyz: Input CIE XYZ color space tensor\n :return: CIELAB color space tensor\n \"\"\"\n # https://github.com/CairX/convert-colors-py/blob/master/convcolors/__init__.py\n # MIT License\n\n # Copyright (c) 2022 Thomas Cairns\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE. \n x = xyz[0:1] / 0.95047 \n y = xyz[1:2] / 1.00000 \n z = xyz[2:3] / 1.08883 \n\n x = cls.__pivot_xyz_to_lab(x)\n y = cls.__pivot_xyz_to_lab(y)\n z = cls.__pivot_xyz_to_lab(z)\n\n l = torch.maximum(torch.zeros_like(y).to(y.device), (116.0 * y) - 16.0)\n a = (x - y) * 500.0\n b = (y - z) * 200.0\n return torch.cat([l, a, b], dim=0)\n\n @classmethod\n def _cielab_to_xyz(cls, lab:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert color space from CIELAB to CIE XYZ.\n \n .. note::\n\n Assumes D65 standard illuminant.\n\n :param lab: Input CIELAB color space tensor\n :return: CIE XYZ color space tensor\n \"\"\"\n # https://github.com/CairX/convert-colors-py/blob/master/convcolors/__init__.py\n # MIT License\n\n # Copyright (c) 2022 Thomas Cairns\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n l = lab[0:1]\n a = lab[1:2]\n b = lab[2:3]\n\n # Reminder: The y values is calculated first as it can be reused\n # for the calculation of x and z.\n y = (l + 16.0) / 116.0\n x = y + (a / 500.0)\n z = y - (b / 200.0)\n\n x3 = x * x * x\n z3 = z * z * z\n y3 = y * y * y\n\n x = torch.where(x3 > 0.008856, x3, ((x * 116.0) - 16.0) / 903.3)\n y = torch.where(l > 7.9996248, y3, l / 903.3)\n z = torch.where(z3 > 0.008856, z3, ((z * 116.0) - 16.0) / 903.3)\n\n x = x * 0.95047 \n y = y * 1.00000 \n z = z * 1.08883\n\n return torch.cat([x, y, z], dim=0)\n\n def _xyz_to_cieluv(image:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Converts CIE XYZ to CIELUV. \n \n .. note::\n\n Assumes D65 standard illuminant.\n\n :param image: A pytorch tensor of shape (3, n_pixels_x, n_pixels_y) in which the channels are X, Y, Z\n :return: A pytorch tensor of shape (3, n_pixels_x, n_pixels_y) in which the channels are L, U, V\n \"\"\"\n # https://github.com/stefanLeong/S2CRNet/blob/main/scripts/utils/color.py\n # MIT License\n\n # Copyright (c) 2021 StefanLeong\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n if len(image.size()) == 3:\n small_L = (29. / 3) ** 3 * image[1]\n large_L = 116 * torch.pow(image[1], 1 / 3.) - 16\n L = torch.where(image[1] <= (6. / 29) ** 3, small_L, large_L)\n\n denom = (image[0] + 15 * image[1] + 3 * image[2])\n u_prime = torch.where(denom != 0., 4 * image[0] / denom, 0.)\n v_prime = torch.where(denom != 0., 9 * image[1] / denom, 0.)\n d = 0\n elif len(image.size()) == 4:\n small_L = (29. / 3) ** 3 * image[:, 1]\n large_L = 116 * torch.pow(image[:, 1], 1 / 3.) - 16\n L = torch.where(image[:, 1] <= (6. / 29) ** 3, small_L, large_L)\n\n denom = (image[:, 0] + 15 * image[:, 1] + 3 * image[:, 2])\n u_prime = torch.where(denom > 0., 4 * image[:, 0] / denom, 0.)\n v_prime = torch.where(denom > 0., 9 * image[:, 1] / denom, 0.)\n d = 1\n\n u = 13 * L * (u_prime - .2009)\n v = 13 * L * (v_prime - .4610)\n\n luv_image = torch.stack((L, u, v), dim=d)\n\n return luv_image\n\n def _cieluv_to_xyz(image:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Converts CIELUV to CIE XYZ. \n \n .. note::\n\n Assumes D65 standard illuminant.\n\n :param image: A pytorch tensor of shape (3, n_pixels_x, n_pixels_y) in which the channels are L, U, V\n :return: A pytorch tensor of shape (3, n_pixels_x, n_pixels_y) in which the channels are X, Y, Z\n \"\"\"\n # https://github.com/stefanLeong/S2CRNet/blob/main/scripts/utils/color.py\n # MIT License\n\n # Copyright (c) 2021 StefanLeong\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n if len(image.size()) == 3:\n denom = (13 * image[0])\n u_prime = torch.where(denom != 0., image[1] / denom, 0.) + .2009\n v_prime = torch.where(denom != 0., image[2] / denom, 0.) + .4610\n\n small_Y = image[0] * (3. / 29) ** 3\n large_Y = ((image[0] + 16.) / 116.) ** 3\n\n Y = torch.where(image[0] <= 8, small_Y, large_Y)\n d = 0\n # batch of images\n elif len(image.size()) == 4:\n denom = (13 * image[:, 0])\n u_prime = torch.where(denom != 0., image[:, 1] / denom, 0.) + .2009\n v_prime = torch.where(denom != 0., image[:, 2] / denom, 0.) + .4610\n\n small_Y = image[:, 0] * (3. / 29) ** 3\n large_Y = ((image[:, 0] + 16.) / 116.) ** 3\n\n Y = torch.where(image[:, 0] <= 8, small_Y, large_Y)\n d = 1\n\n X = torch.where(v_prime != 0., Y * 9 * u_prime / (4 * v_prime), 0.)\n Z = torch.where(v_prime != 0., Y * (12 - 3 * u_prime - 20 * v_prime) / (4 * v_prime), 0.)\n\n xyz_image = torch.stack((X, Y, Z), dim=d)\n\n return xyz_image\n\n @classmethod\n def _rgb_to_oklab(cls, rgb:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert color space from linear sRGB to OKLAB.\n\n :param rgb: Input linear sRGB color space tensor\n :return: OKLAB color space tensor\n \"\"\"\n cr = rgb[0:1]\n cg = rgb[1:2]\n cb = rgb[2:3]\n\n l = 0.4122214708 * cr + 0.5363325363 * cg + 0.0514459929 * cb;\n m = 0.2119034982 * cr + 0.6806995451 * cg + 0.1073969566 * cb;\n s = 0.0883024619 * cr + 0.2817188376 * cg + 0.6299787005 * cb;\n\n l_ = torch.pow(torch.abs(l), 0.3333333333) * torch.sign(l).float()\n m_ = torch.pow(torch.abs(m), 0.3333333333) * torch.sign(m).float()\n s_ = torch.pow(torch.abs(s), 0.3333333333) * torch.sign(s).float()\n\n return torch.cat([\n 0.2104542553 * l_ + 0.7936177850 * m_ - 0.0040720468 * s_,\n 1.9779984951 * l_ - 2.4285922050 * m_ + 0.4505937099 * s_,\n 0.0259040371 * l_ + 0.7827717662 * m_ - 0.8086757660 * s_], dim=0)\n\n @classmethod\n def _oklab_to_rgb(cls, lab:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert color space from OKLAB to linear sRGB.\n\n :param lab: Input OKLAB color space tensor\n :return: Linear sRGB color space tensor\n \"\"\"\n cl = lab[0:1]\n ca = lab[1:2]\n cb = lab[2:3]\n\n l_ = cl + 0.3963377774 * ca + 0.2158037573 * cb\n m_ = cl - 0.1055613458 * ca - 0.0638541728 * cb\n s_ = cl - 0.0894841775 * ca - 1.2914855480 * cb\n\n l = l_*l_*l_\n m = m_*m_*m_\n s = s_*s_*s_\n\n return torch.cat([\n +4.0767416621 * l - 3.3077115913 * m + 0.2309699292 * s,\n -1.2684380046 * l + 2.6097574011 * m - 0.3413193965 * s,\n -0.0041960863 * l - 0.7034186147 * m + 1.7076147010 * s], dim=0)\n\n @classmethod\n def _rgb_to_hsl(cls, rgb: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Transform sRGB image tensor to sRGB-relative HSL. \n \n .. note::\n\n expects non-linear sRGB w/ gamma curve as input\n\n :param rgb: Input sRGB image tensor\n :return: HSL image tensor\n \"\"\"\n # https://github.com/windingwind/seal-3d/blob/main/SealNeRF/color_utils.py\n # MIT License\n\n # Copyright (c) 2022 hawkey\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n rgb = rgb.unsqueeze(0)\n cmax, cmax_idx = torch.max(rgb, dim=1, keepdim=True)\n cmin = torch.min(rgb, dim=1, keepdim=True)[0]\n delta = cmax - cmin\n hsl_h = torch.empty_like(rgb[:, 0:1, :, :])\n cmax_idx[delta == 0] = 3\n hsl_h[cmax_idx == 0] = (((rgb[:, 1:2] - rgb[:, 2:3]) / delta) % 6)[cmax_idx == 0]\n hsl_h[cmax_idx == 1] = (((rgb[:, 2:3] - rgb[:, 0:1]) / delta) + 2)[cmax_idx == 1]\n hsl_h[cmax_idx == 2] = (((rgb[:, 0:1] - rgb[:, 1:2]) / delta) + 4)[cmax_idx == 2]\n hsl_h[cmax_idx == 3] = 0.\n hsl_h /= 6.\n\n hsl_l = (cmax + cmin) / 2.\n hsl_s = torch.empty_like(hsl_h)\n hsl_s[hsl_l == 0] = 0\n hsl_s[hsl_l == 1] = 0\n hsl_l_ma = torch.bitwise_and(hsl_l > 0, hsl_l < 1)\n hsl_l_s0_5 = torch.bitwise_and(hsl_l_ma, hsl_l <= 0.5)\n hsl_l_l0_5 = torch.bitwise_and(hsl_l_ma, hsl_l > 0.5)\n hsl_s[hsl_l_s0_5] = ((cmax - cmin) / (hsl_l * 2.))[hsl_l_s0_5]\n hsl_s[hsl_l_l0_5] = ((cmax - cmin) / (- hsl_l * 2. + 2.))[hsl_l_l0_5]\n return torch.cat([hsl_h, hsl_s, hsl_l], dim=1).squeeze(0)\n\n @classmethod\n def _hsl_to_rgb(cls, hsl: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Transform sRGB-relative HSL image tensor to sRGB. \n \n .. note::\n\n returns non-linear sRGB w/ gamma curve as output\n\n :param hsl: Input HSL image tensor\n :return: sRGB image tensor\n \"\"\"\n # https://github.com/windingwind/seal-3d/blob/main/SealNeRF/color_utils.py\n # MIT License\n\n # Copyright (c) 2022 hawkey\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n hsl = hsl.unsqueeze(0)\n hsl_h, hsl_s, hsl_l = hsl[:, 0:1], hsl[:, 1:2], hsl[:, 2:3]\n _c = (-torch.abs(hsl_l * 2. - 1.) + 1) * hsl_s\n _x = _c * (-torch.abs(hsl_h * 6. % 2. - 1) + 1.)\n _m = hsl_l - _c / 2.\n idx = (hsl_h * 6.).type(torch.uint8)\n idx = (idx % 6).expand(-1, 3, -1, -1)\n rgb = torch.empty_like(hsl).to(hsl.device)\n _o = torch.zeros_like(_c).to(hsl.device)\n rgb[idx == 0] = torch.cat([_c, _x, _o], dim=1)[idx == 0]\n rgb[idx == 1] = torch.cat([_x, _c, _o], dim=1)[idx == 1]\n rgb[idx == 2] = torch.cat([_o, _c, _x], dim=1)[idx == 2]\n rgb[idx == 3] = torch.cat([_o, _x, _c], dim=1)[idx == 3]\n rgb[idx == 4] = torch.cat([_x, _o, _c], dim=1)[idx == 4]\n rgb[idx == 5] = torch.cat([_c, _o, _x], dim=1)[idx == 5]\n rgb += _m\n return rgb.squeeze(0)\n\n @classmethod\n def _rgb_to_hsv(cls, rgb: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Transform sRGB image tensor to sRGB-relative HSV. \n \n .. note::\n\n expects non-linear sRGB w/ gamma curve as input\n\n .. warning::\n\n input tensor will be clamped to [0, 1] range\n\n :param rgb: Input sRGB image tensor\n :return: HSV image tensor\n \"\"\"\n # https://github.com/windingwind/seal-3d/blob/main/SealNeRF/color_utils.py\n # MIT License\n\n # Copyright (c) 2022 hawkey\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n rgb = rgb.clamp(0.,1.).unsqueeze(0)\n cmax, cmax_idx = torch.max(rgb, dim=1, keepdim=True)\n cmin = torch.min(rgb, dim=1, keepdim=True)[0]\n delta = cmax - cmin\n hsv_h = torch.empty_like(rgb[:, 0:1, :, :])\n cmax_idx[delta == 0] = 3\n hsv_h[cmax_idx == 0] = (((rgb[:, 1:2] - rgb[:, 2:3]) / delta) % 6)[cmax_idx == 0]\n hsv_h[cmax_idx == 1] = (((rgb[:, 2:3] - rgb[:, 0:1]) / delta) + 2)[cmax_idx == 1]\n hsv_h[cmax_idx == 2] = (((rgb[:, 0:1] - rgb[:, 1:2]) / delta) + 4)[cmax_idx == 2]\n hsv_h[cmax_idx == 3] = 0.\n hsv_h /= 6.\n hsv_s = torch.where(cmax == 0, torch.tensor(0.).type_as(rgb), delta / cmax)\n hsv_v = cmax\n return torch.cat([hsv_h, hsv_s, hsv_v], dim=1).squeeze(0)\n\n @classmethod\n def _hsv_to_rgb(cls, hsv: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Transform sRGB-relative HSV image tensor to sRGB. \n \n .. note::\n \n returns non-linear sRGB w/ gamma curve as output\n\n :param hsv: Input HSV image tensor\n :return: sRGB image tensor\n \"\"\"\n # https://github.com/windingwind/seal-3d/blob/main/SealNeRF/color_utils.py\n # MIT License\n\n # Copyright (c) 2022 hawkey\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n hsv = hsv.unsqueeze(0)\n hsv_h, hsv_s, hsv_l = hsv[:, 0:1], hsv[:, 1:2], hsv[:, 2:3]\n _c = hsv_l * hsv_s\n _x = _c * (- torch.abs(hsv_h * 6. % 2. - 1) + 1.)\n _m = hsv_l - _c\n _o = torch.zeros_like(_c).to(hsv.device)\n idx = (hsv_h * 6.).type(torch.uint8)\n idx = (idx % 6).expand(-1, 3, -1, -1)\n rgb = torch.empty_like(hsv).to(hsv.device)\n rgb[idx == 0] = torch.cat([_c, _x, _o], dim=1)[idx == 0]\n rgb[idx == 1] = torch.cat([_x, _c, _o], dim=1)[idx == 1]\n rgb[idx == 2] = torch.cat([_o, _c, _x], dim=1)[idx == 2]\n rgb[idx == 3] = torch.cat([_o, _x, _c], dim=1)[idx == 3]\n rgb[idx == 4] = torch.cat([_x, _o, _c], dim=1)[idx == 4]\n rgb[idx == 5] = torch.cat([_c, _o, _x], dim=1)[idx == 5]\n rgb += _m\n return rgb.squeeze(0)\n\n @classmethod\n def _d60_to_d65(cls, im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert CIE XYZ image from \"D60\" to D65 white point.\n\n :param im: Input image tensor\n :return: Converted image tensor\n \"\"\"\n # There is not really a CIE D60 white point, but that's what everyone calls what ACES uses.\n return mm(im, cls.mat_d60_to_d65)\n\n @classmethod\n def _d65_to_d60(cls, im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert CIE XYZ image from D65 to \"D60\" white point.\n\n :param torch.Tensor im: Input image tensor\n :return: Converted image tensor\n \"\"\"\n return mm(im, cls.mat_d65_to_d60)" }, { "identifier": "load_lut", "path": "src/tinycio/fsio/lutfile.py", "snippet": "def load_lut(fp:str, lut_format:LUTFormat=LUTFormat.UNKNOWN) -> torch.Tensor:\n \"\"\"\n Load LUT from file.\n\n :param fp: File path to load from.\n :param lut_format: Format of the LUT.\n :return: lattice as PyTorch tensor\n \"\"\"\n fp = os.path.realpath(fp)\n fn, fnext = os.path.splitext(fp)\n if lut_format == LUTFormat.UNKNOWN: lut_format = _infer_lut_file_format(fnext)\n assert lut_format > LUTFormat.UNKNOWN, \"Unrecognized LUT format\"\n lattice = None\n if lut_format == LUTFormat.CUBE_3D:\n with open(fp, 'r') as file:\n # Read lines and filter out comments\n lines = [line.strip() for line in file.readlines() if len(line) > 0 and \"#\" not in line]\n\n # Find the line indicating the start of the LUT data\n lut_start_index = next((i for i, line in enumerate(lines) if line.startswith('LUT_3D_SIZE')), None)\n\n if lut_start_index is None: raise ValueError(\"LUT_3D_SIZE indicator not found in the .cube file.\")\n\n # Extract LUT data\n lut_data = [list(map(float, line.split())) for line in lines[lut_start_index + 1:] \\\n if len(line) > 0 and len(line.split()) == 3 and \"#\" not in line] \n\n # Convert the LUT data to a PyTorch tensor\n lut_size = int(lines[lut_start_index].split()[1])\n\n lattice = torch.tensor(lut_data).view(lut_size, lut_size, lut_size, 3).permute(2,1,0,3)\n else:\n raise Exception(\"Unsupported LUT format\")\n return lattice" }, { "identifier": "save_lut", "path": "src/tinycio/fsio/lutfile.py", "snippet": "def save_lut(lattice:torch.Tensor, fp:str, lut_format:LUTFormat=LUTFormat.UNKNOWN) -> bool:\n \"\"\"\n Save LUT to a file.\n\n .. warning:: \n \n This will overwrite existing files.\n\n :param lattice: PyTorch tensor representing the LUT.\n :param fp: File path for saving the LUT.\n :param lut_format: Format of the LUT.\n :return: True if successful\n \"\"\"\n fp = os.path.realpath(fp)\n fn, fnext = os.path.splitext(fp)\n\n if lut_format == LUTFormat.UNKNOWN: lut_format = _infer_lut_file_format(fnext)\n\n if lut_format == LUTFormat.CUBE_3D:\n # Convert the torch tensor to a list of lists\n lut_data_list = lattice.permute(2,1,0,3).reshape(-1, 3).tolist()\n\n # Write the LUT data to the file\n with open(fp, 'w') as file:\n file.write(f\"LUT_3D_SIZE {lattice.size(0)}\\n\")\n for entry in lut_data_list:\n file.write(\" \".join(map(str, entry)) + \"\\n\")\n\n else:\n raise Exception(\"Unsupported LUT format\")\n\n return True" }, { "identifier": "_infer_lut_file_format", "path": "src/tinycio/fsio/lutfile.py", "snippet": "def _infer_lut_file_format(ext:str) -> LUTFormat:\n ext = ext.strip().lower()\n if ext == '.cube': return LUTFormat.CUBE_3D\n else: return LUTFormat.UNKNOWN" }, { "identifier": "_generate_linear_cube_lut", "path": "src/tinycio/fsio/lutfile.py", "snippet": "def _generate_linear_cube_lut(size: int):\n \"\"\"\n Generate a baseline linear cube LUT.\n\n :param size: Size of the cube LUT (e.g., 33 for a 33x33x33 cube).\n :return: Torch tensor representing the linear cube LUT.\n \"\"\"\n linear_values = torch.linspace(0.0, 1.0, size)\n grid = torch.meshgrid(linear_values, linear_values, linear_values, indexing=\"xy\")\n lut_data = torch.stack(grid, dim=-1).permute(1,0,2,3) # TODO: WTF is even going on here?\n return lut_data" }, { "identifier": "LUTFormat", "path": "src/tinycio/fsio/format.py", "snippet": "class LUTFormat(IntEnum):\n \"\"\"\n Lookup table format. Available options are:\n\n .. highlight:: text\n .. code-block:: text\n \n - UNKNOWN\n - CUBE_3D\n \"\"\"\n UNKNOWN = 1<<0 # no color space specified - flag for guessing\n CUBE_3D = 1<<1 # 3D CUBE LUT https://resolve.cafe/developers/luts/\n\n LUT_3D = CUBE_3D # | etc for later" }, { "identifier": "srgb_luminance", "path": "src/tinycio/util/colorutil.py", "snippet": "def srgb_luminance(im_srgb:Union[torch.Tensor, ColorImage]) -> torch.Tensor:\n \"\"\"\n Return relative luminance of linear sRGB image.\n\n :param im_srgb: [C=3, H, W] color image tensor in sRGB color space\n :type im_srgb: torch.Tensor | ColorImage\n :return: [C=1, H, W] image tensor\n \"\"\"\n lum_r, lum_g, lum_b = 0.2126, 0.7152, 0.0722\n return lum_r * im_srgb[0:1,...] + lum_g * im_srgb[1:2,...] + lum_b * im_srgb[2:3,...]" }, { "identifier": "trilinear_interpolation", "path": "src/tinycio/util/miscutil.py", "snippet": "def trilinear_interpolation(im_3d:torch.Tensor, indices:Union[ColorImage, torch.Tensor]) -> torch.Tensor:\n \"\"\"\n Interpolate 3D image tensor.\n\n :param im_3d: Input 3D image tensor of shape (C, D, H, W).\n :param indices: Indices into the tensor.\n :return: Interpolated color values.\n \"\"\"\n # NOTE: Internal - leaving this clutter undocumented intentionally\n indices_floor = indices.floor().to(torch.long)\n indices_ceil = indices.ceil().clamp(0, im_3d.size(0) - 1).to(torch.long)\n\n weights = (indices - indices_floor).float()\n\n c000 = im_3d[indices_floor[0], indices_floor[1], indices_floor[2]]\n c001 = im_3d[indices_floor[0], indices_floor[1], indices_ceil[2]]\n c010 = im_3d[indices_floor[0], indices_ceil[1], indices_floor[2]]\n c011 = im_3d[indices_floor[0], indices_ceil[1], indices_ceil[2]]\n c100 = im_3d[indices_ceil[0], indices_floor[1], indices_floor[2]]\n c101 = im_3d[indices_ceil[0], indices_floor[1], indices_ceil[2]]\n c110 = im_3d[indices_ceil[0], indices_ceil[1], indices_floor[2]]\n c111 = im_3d[indices_ceil[0], indices_ceil[1], indices_ceil[2]]\n\n interpolated_values = torch.zeros_like(c000).requires_grad_()\n interpolated_values = (\n (1 - weights[0]) * (1 - weights[1]) * (1 - weights[2]) * c000.permute(2,0,1) +\n (1 - weights[0]) * (1 - weights[1]) * weights[2] * c001.permute(2,0,1) +\n (1 - weights[0]) * weights[1] * (1 - weights[2]) * c010.permute(2,0,1) +\n (1 - weights[0]) * weights[1] * weights[2] * c011.permute(2,0,1) +\n weights[0] * (1 - weights[1]) * (1 - weights[2]) * c100.permute(2,0,1) +\n weights[0] * (1 - weights[1]) * weights[2] * c101.permute(2,0,1) +\n weights[0] * weights[1] * (1 - weights[2]) * c110.permute(2,0,1) +\n weights[0] * weights[1] * weights[2] * c111.permute(2,0,1)\n )\n\n return interpolated_values" }, { "identifier": "feature_moments_calculation", "path": "src/tinycio/loss.py", "snippet": "def feature_moments_calculation(feat, eps=1e-5):\n # https://github.com/semchan/NLUT/blob/main/LICENSE\n # MIT License\n # <!-- Copyright (c) 2010, 2011 the Friendika Project -->\n # All rights reserved.\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in\n # all copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n # THE SOFTWARE\n size = feat.size()\n assert (len(size) == 3)\n N, C = size[:2]\n feat_var = feat.view(N, C, -1).var(dim=2) + eps\n # feat_std = feat_var.sqrt().view(N, C, 1, 1)\n # the first order\n feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1)\n\n # the second order\n feat_size = 2\n # N, C = size[:2]\n feat_p2 = torch.abs(feat-feat_mean).pow(feat_size).view(N, C, -1)\n N, C,L = feat_p2.shape\n feat_p2 = feat_p2.sum(dim=2)/L\n feat_p2 = feat_p2.pow(1/feat_size).view(N, C, 1)\n # the third order\n feat_size = 3\n # N, C = size[:2]\n feat_p3 = torch.abs(feat-feat_mean).pow(feat_size).view(N, C, -1)\n # N, C,L = feat_p3.shape\n feat_p3 = feat_p3.sum(dim=2)/L\n feat_p3 = feat_p3.pow(1/feat_size).view(N, C, 1)\n\n return feat_mean.view(N, C) , feat_p2.view(N, C), feat_p3.view(N, C)" } ]
import typing import os import torch import torch.optim as optim import torch.nn as nn import torch.nn.functional as F from typing import Union from enum import IntEnum from contextlib import nullcontext from .colorspace import ColorSpace from .fsio.lutfile import load_lut, save_lut, _infer_lut_file_format, _generate_linear_cube_lut from .fsio.format import LUTFormat from .util.colorutil import srgb_luminance from .util.miscutil import trilinear_interpolation from .loss import feature_moments_calculation
17,698
from __future__ import annotations class LookupTable: """ Color lookup table. Example: .. highlight:: python .. code-block:: python lut = LookupTable.get_negative() im_negative = lut.apply(im) :param size: Size of the LUT. :param lattice: Lattice as tensor (defaults to linear). :param lut_format: Format of the LUT. """ size = 32 lattice = None
from __future__ import annotations class LookupTable: """ Color lookup table. Example: .. highlight:: python .. code-block:: python lut = LookupTable.get_negative() im_negative = lut.apply(im) :param size: Size of the LUT. :param lattice: Lattice as tensor (defaults to linear). :param lut_format: Format of the LUT. """ size = 32 lattice = None
lut_format= LUTFormat.UNKNOWN
5
2023-12-15 15:39:08+00:00
24k
quocanh34/magic-animate-modified
magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0, \n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n \n # Additional\n use_motion_module = False,\n motion_module_resolutions = ( 1,2,4,8 ),\n motion_module_mid_block = False,\n motion_module_decoder_only = False,\n motion_module_type = None,\n motion_module_kwargs = {},\n unet_use_cross_frame_attention = None,\n unet_use_temporal_attention = None,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n \n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n # for controlnet\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # pre-process\n sample = self.conv_in(sample)\n\n # down\n is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None\n\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states)\n\n down_block_res_samples += res_samples\n\n if is_controlnet:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n if is_controlnet:\n sample = sample + mid_block_additional_residual\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n # config[\"mid_block_type\"] = \"UNetMidBlock3DCrossAttn\"\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n \n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n \n return model" }, { "identifier": "ControlNetProcessor", "path": "magicanimate/models/multicontrolnet.py", "snippet": "class ControlNetProcessor(object):\n def __init__(\n self,\n controlnet: ControlNetModel,\n # image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]],\n # controlnet_cond = torch.FloatTensor, #fix\n # conditioning_scale: float = 1.0,\n ):\n self.controlnet = controlnet\n # self.image = image\n # self.controlnet_cond = controlnet_cond #fix\n # self.conditioning_scale = conditioning_scale\n\n # def _default_height_width(self, height, width, image):\n # if isinstance(image, list):\n # image = image[0]\n\n # if height is None:\n # if isinstance(image, PIL.Image.Image):\n # height = image.height\n # elif isinstance(image, torch.Tensor):\n # height = image.shape[3]\n\n # height = (height // 8) * 8 # round down to nearest multiple of 8\n\n # if width is None:\n # if isinstance(image, PIL.Image.Image):\n # width = image.width\n # elif isinstance(image, torch.Tensor):\n # width = image.shape[2]\n\n # width = (width // 8) * 8 # round down to nearest multiple of 8\n\n # return height, width\n\n # def default_height_width(self, height, width):\n # return self._default_height_width(height, width, self.image)\n\n # def _prepare_image(self, image, width, height, batch_size, num_images_per_prompt, device, dtype):\n # if not isinstance(image, torch.Tensor):\n # if isinstance(image, PIL.Image.Image):\n # image = [image]\n\n # if isinstance(image[0], PIL.Image.Image):\n # image = [\n # np.array(i.resize((width, height), resample=PIL_INTERPOLATION[\"lanczos\"]))[None, :] for i in image\n # ]\n # image = np.concatenate(image, axis=0)\n # image = np.array(image).astype(np.float32) / 255.0\n # image = image.transpose(0, 3, 1, 2)\n # image = torch.from_numpy(image)\n # elif isinstance(image[0], torch.Tensor):\n # image = torch.cat(image, dim=0)\n\n # image_batch_size = image.shape[0]\n\n # if image_batch_size == 1:\n # repeat_by = batch_size\n # else:\n # # image batch size is the same as prompt batch size\n # repeat_by = num_images_per_prompt\n\n # image = image.repeat_interleave(repeat_by, dim=0)\n\n # image = image.to(device=device, dtype=dtype)\n\n # return image\n\n # def _check_inputs(self, image, prompt, prompt_embeds):\n # image_is_pil = isinstance(image, PIL.Image.Image)\n # image_is_tensor = isinstance(image, torch.Tensor)\n # image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)\n # image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)\n\n # if not image_is_pil and not image_is_tensor and not image_is_pil_list and not image_is_tensor_list:\n # raise TypeError(\n # \"image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors\"\n # )\n\n # if image_is_pil:\n # image_batch_size = 1\n # elif image_is_tensor:\n # image_batch_size = image.shape[0]\n # elif image_is_pil_list:\n # image_batch_size = len(image)\n # elif image_is_tensor_list:\n # image_batch_size = len(image)\n\n # if prompt is not None and isinstance(prompt, str):\n # prompt_batch_size = 1\n # elif prompt is not None and isinstance(prompt, list):\n # prompt_batch_size = len(prompt)\n # elif prompt_embeds is not None:\n # prompt_batch_size = prompt_embeds.shape[0]\n\n # if image_batch_size != 1 and image_batch_size != prompt_batch_size:\n # raise ValueError(\n # f\"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}\"\n # )\n\n # def check_inputs(self, prompt, prompt_embeds):\n # self._check_inputs(self.image, prompt, prompt_embeds)\n\n # def prepare_image(self, width, height, batch_size, num_images_per_prompt, device, do_classifier_free_guidance):\n # self.image = self._prepare_image(\n # self.image, width, height, batch_size, num_images_per_prompt, device, self.controlnet.dtype\n # )\n # if do_classifier_free_guidance:\n # self.image = torch.cat([self.image] * 2)\n\n def __call__(\n self,\n controlnet_latent_input,\n t,\n encoder_hidden_states,\n controlnet_cond, #fix\n conditioning_scale,\n return_dict,\n ) -> Tuple:\n down_block_res_samples, mid_block_res_sample = self.controlnet(\n controlnet_latent_input,\n t,\n encoder_hidden_states,\n controlnet_cond,\n conditioning_scale, \n return_dict=False,\n )\n down_block_res_samples = [\n down_block_res_sample * conditioning_scale for down_block_res_sample in down_block_res_samples\n ]\n mid_block_res_sample *= conditioning_scale\n return (down_block_res_samples, mid_block_res_sample)" }, { "identifier": "ReferenceAttentionControl", "path": "magicanimate/models/mutual_self_attention.py", "snippet": "class ReferenceAttentionControl():\n \n def __init__(self, \n unet,\n mode=\"write\",\n do_classifier_free_guidance=False,\n attention_auto_machine_weight = float('inf'),\n gn_auto_machine_weight = 1.0,\n style_fidelity = 1.0,\n reference_attn=True,\n reference_adain=False,\n fusion_blocks=\"midup\",\n batch_size=1, \n ) -> None:\n # 10. Modify self attention and group norm\n self.unet = unet\n assert mode in [\"read\", \"write\"]\n assert fusion_blocks in [\"midup\", \"full\"]\n self.reference_attn = reference_attn\n self.reference_adain = reference_adain\n self.fusion_blocks = fusion_blocks\n self.register_reference_hooks(\n mode, \n do_classifier_free_guidance,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n fusion_blocks,\n batch_size=batch_size, \n )\n\n def register_reference_hooks(\n self, \n mode, \n do_classifier_free_guidance,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n dtype=torch.float16,\n batch_size=1, \n num_images_per_prompt=1, \n device=torch.device(\"cpu\"), \n fusion_blocks='midup',\n ):\n MODE = mode\n do_classifier_free_guidance = do_classifier_free_guidance\n attention_auto_machine_weight = attention_auto_machine_weight\n gn_auto_machine_weight = gn_auto_machine_weight\n style_fidelity = style_fidelity\n reference_attn = reference_attn\n reference_adain = reference_adain\n fusion_blocks = fusion_blocks\n num_images_per_prompt = num_images_per_prompt\n dtype=dtype\n if do_classifier_free_guidance:\n uc_mask = (\n torch.Tensor([1] * batch_size * num_images_per_prompt * 16 + [0] * batch_size * num_images_per_prompt * 16)\n .to(device)\n .bool()\n )\n else:\n uc_mask = (\n torch.Tensor([0] * batch_size * num_images_per_prompt * 2)\n .to(device)\n .bool()\n )\n \n def hacked_basic_transformer_inner_forward(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n timestep: Optional[torch.LongTensor] = None,\n cross_attention_kwargs: Dict[str, Any] = None,\n class_labels: Optional[torch.LongTensor] = None,\n video_length=None,\n ):\n if self.use_ada_layer_norm:\n norm_hidden_states = self.norm1(hidden_states, timestep)\n elif self.use_ada_layer_norm_zero:\n norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(\n hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype\n )\n else:\n norm_hidden_states = self.norm1(hidden_states)\n\n # 1. Self-Attention\n cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}\n if self.only_cross_attention:\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n else:\n if MODE == \"write\":\n self.bank.append(norm_hidden_states.clone())\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n if MODE == \"read\":\n self.bank = [rearrange(d.unsqueeze(1).repeat(1, video_length, 1, 1), \"b t l c -> (b t) l c\")[:hidden_states.shape[0]] for d in self.bank]\n hidden_states_uc = self.attn1(norm_hidden_states, \n encoder_hidden_states=torch.cat([norm_hidden_states] + self.bank, dim=1),\n attention_mask=attention_mask) + hidden_states\n hidden_states_c = hidden_states_uc.clone()\n _uc_mask = uc_mask.clone()\n if do_classifier_free_guidance:\n if hidden_states.shape[0] != _uc_mask.shape[0]:\n _uc_mask = (\n torch.Tensor([1] * (hidden_states.shape[0]//2) + [0] * (hidden_states.shape[0]//2))\n .to(device)\n .bool()\n )\n hidden_states_c[_uc_mask] = self.attn1(\n norm_hidden_states[_uc_mask],\n encoder_hidden_states=norm_hidden_states[_uc_mask],\n attention_mask=attention_mask,\n ) + hidden_states[_uc_mask]\n hidden_states = hidden_states_c.clone()\n \n self.bank.clear()\n if self.attn2 is not None:\n # Cross-Attention\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n hidden_states = (\n self.attn2(\n norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n + hidden_states\n )\n\n # Feed-forward\n hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states\n\n # Temporal-Attention\n if self.unet_use_temporal_attention:\n d = hidden_states.shape[1]\n hidden_states = rearrange(hidden_states, \"(b f) d c -> (b d) f c\", f=video_length)\n norm_hidden_states = (\n self.norm_temp(hidden_states, timestep) if self.use_ada_layer_norm else self.norm_temp(hidden_states)\n )\n hidden_states = self.attn_temp(norm_hidden_states) + hidden_states\n hidden_states = rearrange(hidden_states, \"(b d) f c -> (b f) d c\", d=d)\n\n return hidden_states\n \n if self.use_ada_layer_norm_zero:\n attn_output = gate_msa.unsqueeze(1) * attn_output\n hidden_states = attn_output + hidden_states\n\n if self.attn2 is not None:\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n\n # 2. Cross-Attention\n attn_output = self.attn2(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n **cross_attention_kwargs,\n )\n hidden_states = attn_output + hidden_states\n\n # 3. Feed-forward\n norm_hidden_states = self.norm3(hidden_states)\n\n if self.use_ada_layer_norm_zero:\n norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]\n\n ff_output = self.ff(norm_hidden_states)\n\n if self.use_ada_layer_norm_zero:\n ff_output = gate_mlp.unsqueeze(1) * ff_output\n\n hidden_states = ff_output + hidden_states\n\n return hidden_states\n\n def hacked_mid_forward(self, *args, **kwargs):\n eps = 1e-6\n x = self.original_forward(*args, **kwargs)\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append(mean)\n self.var_bank.append(var)\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank) / float(len(self.mean_bank))\n var_acc = sum(self.var_bank) / float(len(self.var_bank))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n x_uc = (((x - mean) / std) * std_acc) + mean_acc\n x_c = x_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n x_c[uc_mask] = x[uc_mask]\n x = style_fidelity * x_c + (1.0 - style_fidelity) * x_uc\n self.mean_bank = []\n self.var_bank = []\n return x\n\n def hack_CrossAttnDownBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n\n # TODO(Patrick, William) - attention mask is not used\n output_states = ()\n\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_DownBlock2D_forward(self, hidden_states, temb=None):\n eps = 1e-6\n\n output_states = ()\n\n for i, resnet in enumerate(self.resnets):\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_CrossAttnUpBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n # TODO(Patrick, William) - attention mask is not used\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n def hacked_UpBlock2D_forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):\n eps = 1e-6\n for i, resnet in enumerate(self.resnets):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)] \n attn_modules = sorted(attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n\n for i, module in enumerate(attn_modules):\n module._original_inner_forward = module.forward\n module.forward = hacked_basic_transformer_inner_forward.__get__(module, BasicTransformerBlock)\n module.bank = []\n module.attn_weight = float(i) / float(len(attn_modules))\n\n if self.reference_adain:\n gn_modules = [self.unet.mid_block]\n self.unet.mid_block.gn_weight = 0\n\n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n module.gn_weight = 1.0 - float(w) / float(len(down_blocks))\n gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n module.gn_weight = float(w) / float(len(up_blocks))\n gn_modules.append(module)\n\n for i, module in enumerate(gn_modules):\n if getattr(module, \"original_forward\", None) is None:\n module.original_forward = module.forward\n if i == 0:\n # mid_block\n module.forward = hacked_mid_forward.__get__(module, torch.nn.Module)\n elif isinstance(module, CrossAttnDownBlock2D):\n module.forward = hack_CrossAttnDownBlock2D_forward.__get__(module, CrossAttnDownBlock2D)\n elif isinstance(module, DownBlock2D):\n module.forward = hacked_DownBlock2D_forward.__get__(module, DownBlock2D)\n elif isinstance(module, CrossAttnUpBlock2D):\n module.forward = hacked_CrossAttnUpBlock2D_forward.__get__(module, CrossAttnUpBlock2D)\n elif isinstance(module, UpBlock2D):\n module.forward = hacked_UpBlock2D_forward.__get__(module, UpBlock2D)\n module.mean_bank = []\n module.var_bank = []\n module.gn_weight *= 2\n \n def update(self, writer, dtype=torch.float16):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in (torch_dfs(writer.unet.mid_block)+torch_dfs(writer.unet.up_blocks)) if isinstance(module, BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in torch_dfs(writer.unet) if isinstance(module, BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0]) \n writer_attn_modules = sorted(writer_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r, w in zip(reader_attn_modules, writer_attn_modules):\n r.bank = [v.clone().to(dtype) for v in w.bank]\n # w.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n writer_gn_modules = [writer.unet.mid_block]\n \n down_blocks = writer.unet.down_blocks\n for w, module in enumerate(down_blocks):\n writer_gn_modules.append(module)\n\n up_blocks = writer.unet.up_blocks\n for w, module in enumerate(up_blocks):\n writer_gn_modules.append(module)\n \n for r, w in zip(reader_gn_modules, writer_gn_modules):\n if len(w.mean_bank) > 0 and isinstance(w.mean_bank[0], list):\n r.mean_bank = [[v.clone().to(dtype) for v in vl] for vl in w.mean_bank]\n r.var_bank = [[v.clone().to(dtype) for v in vl] for vl in w.var_bank]\n else:\n r.mean_bank = [v.clone().to(dtype) for v in w.mean_bank]\n r.var_bank = [v.clone().to(dtype) for v in w.var_bank]\n \n def clear(self):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r in reader_attn_modules:\n r.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n for r in reader_gn_modules:\n r.mean_bank.clear()\n r.var_bank.clear()" }, { "identifier": "get_context_scheduler", "path": "magicanimate/pipelines/context.py", "snippet": "def get_context_scheduler(name: str) -> Callable:\n if name == \"uniform\":\n return uniform\n else:\n raise ValueError(f\"Unknown context_overlap policy {name}\")" }, { "identifier": "get_total_steps", "path": "magicanimate/pipelines/context.py", "snippet": "def get_total_steps(\n scheduler,\n timesteps: List[int],\n num_steps: Optional[int] = None,\n num_frames: int = ...,\n context_size: Optional[int] = None,\n context_stride: int = 3,\n context_overlap: int = 4,\n closed_loop: bool = True,\n):\n return sum(\n len(\n list(\n scheduler(\n i,\n num_steps,\n num_frames,\n context_size,\n context_stride,\n context_overlap,\n )\n )\n )\n for i in range(len(timesteps))\n )" }, { "identifier": "get_tensor_interpolation_method", "path": "magicanimate/utils/util.py", "snippet": "def get_tensor_interpolation_method():\n return tensor_interpolation" } ]
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from magicanimate.models.unet_controlnet import UNet3DConditionModel from magicanimate.models.multicontrolnet import ControlNetProcessor #fix from magicanimate.models.mutual_self_attention import ReferenceAttentionControl from magicanimate.pipelines.context import ( get_context_scheduler, get_total_steps ) from magicanimate.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
15,775
verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx+1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate:
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ # from magicanimate.models.controlnet import ControlNetModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, # controlnet: ControlNetModel, # processors: List[ControlNetProcessor], scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, # controlnet1=processors[0], scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents, rank, decoder_consistency=None): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0]), disable=(rank!=0)): if decoder_consistency is not None: video.append(decoder_consistency(latents[frame_idx:frame_idx+1])) else: video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None, clip_length=16): shape = (batch_size, num_channels_latents, clip_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) latents = latents.repeat(1, 1, video_length//clip_length, 1, 1) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_condition(self, condition1, condition2, num_videos_per_prompt, device, dtype, do_classifier_free_guidance): # Prepare first condition condition1 = torch.from_numpy(condition1.copy()).to(device=device, dtype=dtype) / 255.0 condition1 = torch.stack([condition1 for _ in range(num_videos_per_prompt)], dim=0) condition1 = rearrange(condition1, 'b f h w c -> (b f) c h w').clone() # Prepare second condition condition2 = torch.from_numpy(condition2.copy()).to(device=device, dtype=dtype) / 255.0 condition2 = torch.stack([condition2 for _ in range(num_videos_per_prompt)], dim=0) condition2 = rearrange(condition2, 'b f h w c -> (b f) c h w').clone() # Here, we're averaging the two conditions combined_condition = (condition1*8+condition2*2)/10 if do_classifier_free_guidance: combined_condition = torch.cat([combined_condition] * 2) #combined_condition = torch.from_numpy(combined_condition.copy()).to(device=device, dtype=dtype) return combined_condition def next_step( self, model_output: torch.FloatTensor, timestep: int, x: torch.FloatTensor, eta=0., verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx+1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate:
v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f)
5
2023-12-15 01:22:37+00:00
24k
Azure-Samples/functions-python-web-crawler
.venv/Lib/site-packages/urllib3/connection.py
[ { "identifier": "HTTPHeaderDict", "path": ".venv/Lib/site-packages/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(typing.MutableMapping[str, str]):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-insensitively.\n\n :param kwargs:\n Additional field-value pairs to pass in to ``dict.update``.\n\n A ``dict`` like container for storing HTTP Headers.\n\n Field names are stored and compared case-insensitively in compliance with\n RFC 7230. Iteration provides the first case-sensitive key seen for each\n case-insensitive pair.\n\n Using ``__setitem__`` syntax overwrites fields that compare equal\n case-insensitively in order to maintain ``dict``'s api. For fields that\n compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``\n in a loop.\n\n If multiple fields that are equal case-insensitively are passed to the\n constructor or ``.update``, the behavior is undefined and some will be\n lost.\n\n >>> headers = HTTPHeaderDict()\n >>> headers.add('Set-Cookie', 'foo=bar')\n >>> headers.add('set-cookie', 'baz=quxx')\n >>> headers['content-length'] = '7'\n >>> headers['SET-cookie']\n 'foo=bar, baz=quxx'\n >>> headers['Content-Length']\n '7'\n \"\"\"\n\n _container: typing.MutableMapping[str, list[str]]\n\n def __init__(self, headers: ValidHTTPHeaderSource | None = None, **kwargs: str):\n super().__init__()\n self._container = {} # 'dict' is insert-ordered\n if headers is not None:\n if isinstance(headers, HTTPHeaderDict):\n self._copy_from(headers)\n else:\n self.extend(headers)\n if kwargs:\n self.extend(kwargs)\n\n def __setitem__(self, key: str, val: str) -> None:\n # avoid a bytes/str comparison by decoding before httplib\n if isinstance(key, bytes):\n key = key.decode(\"latin-1\")\n self._container[key.lower()] = [key, val]\n\n def __getitem__(self, key: str) -> str:\n val = self._container[key.lower()]\n return \", \".join(val[1:])\n\n def __delitem__(self, key: str) -> None:\n del self._container[key.lower()]\n\n def __contains__(self, key: object) -> bool:\n if isinstance(key, str):\n return key.lower() in self._container\n return False\n\n def setdefault(self, key: str, default: str = \"\") -> str:\n return super().setdefault(key, default)\n\n def __eq__(self, other: object) -> bool:\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return False\n else:\n other_as_http_header_dict = type(self)(maybe_constructable)\n\n return {k.lower(): v for k, v in self.itermerged()} == {\n k.lower(): v for k, v in other_as_http_header_dict.itermerged()\n }\n\n def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)\n\n def __len__(self) -> int:\n return len(self._container)\n\n def __iter__(self) -> typing.Iterator[str]:\n # Only provide the originally cased names\n for vals in self._container.values():\n yield vals[0]\n\n def discard(self, key: str) -> None:\n try:\n del self[key]\n except KeyError:\n pass\n\n def add(self, key: str, val: str, *, combine: bool = False) -> None:\n \"\"\"Adds a (name, value) pair, doesn't overwrite the value if it already\n exists.\n\n If this is called with combine=True, instead of adding a new header value\n as a distinct item during iteration, this will instead append the value to\n any existing header value with a comma. If no existing header value exists\n for the key, then the value will simply be added, ignoring the combine parameter.\n\n >>> headers = HTTPHeaderDict(foo='bar')\n >>> headers.add('Foo', 'baz')\n >>> headers['foo']\n 'bar, baz'\n >>> list(headers.items())\n [('foo', 'bar'), ('foo', 'baz')]\n >>> headers.add('foo', 'quz', combine=True)\n >>> list(headers.items())\n [('foo', 'bar, baz, quz')]\n \"\"\"\n # avoid a bytes/str comparison by decoding before httplib\n if isinstance(key, bytes):\n key = key.decode(\"latin-1\")\n key_lower = key.lower()\n new_vals = [key, val]\n # Keep the common case aka no item present as fast as possible\n vals = self._container.setdefault(key_lower, new_vals)\n if new_vals is not vals:\n # if there are values here, then there is at least the initial\n # key/value pair\n assert len(vals) >= 2\n if combine:\n vals[-1] = vals[-1] + \", \" + val\n else:\n vals.append(val)\n\n def extend(self, *args: ValidHTTPHeaderSource, **kwargs: str) -> None:\n \"\"\"Generic import function for any type of header-like object.\n Adapted version of MutableMapping.update in order to insert items\n with self.add instead of self.__setitem__\n \"\"\"\n if len(args) > 1:\n raise TypeError(\n f\"extend() takes at most 1 positional arguments ({len(args)} given)\"\n )\n other = args[0] if len(args) >= 1 else ()\n\n if isinstance(other, HTTPHeaderDict):\n for key, val in other.iteritems():\n self.add(key, val)\n elif isinstance(other, typing.Mapping):\n for key, val in other.items():\n self.add(key, val)\n elif isinstance(other, typing.Iterable):\n other = typing.cast(typing.Iterable[typing.Tuple[str, str]], other)\n for key, value in other:\n self.add(key, value)\n elif hasattr(other, \"keys\") and hasattr(other, \"__getitem__\"):\n # THIS IS NOT A TYPESAFE BRANCH\n # In this branch, the object has a `keys` attr but is not a Mapping or any of\n # the other types indicated in the method signature. We do some stuff with\n # it as though it partially implements the Mapping interface, but we're not\n # doing that stuff safely AT ALL.\n for key in other.keys():\n self.add(key, other[key])\n\n for key, value in kwargs.items():\n self.add(key, value)\n\n @typing.overload\n def getlist(self, key: str) -> list[str]:\n ...\n\n @typing.overload\n def getlist(self, key: str, default: _DT) -> list[str] | _DT:\n ...\n\n def getlist(\n self, key: str, default: _Sentinel | _DT = _Sentinel.not_passed\n ) -> list[str] | _DT:\n \"\"\"Returns a list of all the values for the named field. Returns an\n empty list if the key doesn't exist.\"\"\"\n try:\n vals = self._container[key.lower()]\n except KeyError:\n if default is _Sentinel.not_passed:\n # _DT is unbound; empty list is instance of List[str]\n return []\n # _DT is bound; default is instance of _DT\n return default\n else:\n # _DT may or may not be bound; vals[1:] is instance of List[str], which\n # meets our external interface requirement of `Union[List[str], _DT]`.\n return vals[1:]\n\n def _prepare_for_method_change(self) -> Self:\n \"\"\"\n Remove content-specific header fields before changing the request\n method to GET or HEAD according to RFC 9110, Section 15.4.\n \"\"\"\n content_specific_headers = [\n \"Content-Encoding\",\n \"Content-Language\",\n \"Content-Location\",\n \"Content-Type\",\n \"Content-Length\",\n \"Digest\",\n \"Last-Modified\",\n ]\n for header in content_specific_headers:\n self.discard(header)\n return self\n\n # Backwards compatibility for httplib\n getheaders = getlist\n getallmatchingheaders = getlist\n iget = getlist\n\n # Backwards compatibility for http.cookiejar\n get_all = getlist\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}({dict(self.itermerged())})\"\n\n def _copy_from(self, other: HTTPHeaderDict) -> None:\n for key in other:\n val = other.getlist(key)\n self._container[key.lower()] = [key, *val]\n\n def copy(self) -> HTTPHeaderDict:\n clone = type(self)()\n clone._copy_from(self)\n return clone\n\n def iteritems(self) -> typing.Iterator[tuple[str, str]]:\n \"\"\"Iterate over all header lines, including duplicate ones.\"\"\"\n for key in self:\n vals = self._container[key.lower()]\n for val in vals[1:]:\n yield vals[0], val\n\n def itermerged(self) -> typing.Iterator[tuple[str, str]]:\n \"\"\"Iterate over all headers, merging duplicate ones together.\"\"\"\n for key in self:\n val = self._container[key.lower()]\n yield val[0], \", \".join(val[1:])\n\n def items(self) -> HTTPHeaderDictItemView: # type: ignore[override]\n return HTTPHeaderDictItemView(self)\n\n def _has_value_for_header(self, header_name: str, potential_value: str) -> bool:\n if header_name in self:\n return potential_value in self._container[header_name.lower()][1:]\n return False\n\n def __ior__(self, other: object) -> HTTPHeaderDict:\n # Supports extending a header dict in-place using operator |=\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n self.extend(maybe_constructable)\n return self\n\n def __or__(self, other: object) -> HTTPHeaderDict:\n # Supports merging header dicts using operator |\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n result = self.copy()\n result.extend(maybe_constructable)\n return result\n\n def __ror__(self, other: object) -> HTTPHeaderDict:\n # Supports merging header dicts using operator | when other is on left side\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n result = type(self)(maybe_constructable)\n result.extend(self)\n return result" }, { "identifier": "assert_header_parsing", "path": ".venv/Lib/site-packages/urllib3/util/response.py", "snippet": "def assert_header_parsing(headers: httplib.HTTPMessage) -> None:\n \"\"\"\n Asserts whether all headers have been successfully parsed.\n Extracts encountered errors from the result of parsing headers.\n\n Only works on Python 3.\n\n :param http.client.HTTPMessage headers: Headers to verify.\n\n :raises urllib3.exceptions.HeaderParsingError:\n If parsing errors are found.\n \"\"\"\n\n # This will fail silently if we pass in the wrong kind of parameter.\n # To make debugging easier add an explicit check.\n if not isinstance(headers, httplib.HTTPMessage):\n raise TypeError(f\"expected httplib.Message, got {type(headers)}.\")\n\n unparsed_data = None\n\n # get_payload is actually email.message.Message.get_payload;\n # we're only interested in the result if it's not a multipart message\n if not headers.is_multipart():\n payload = headers.get_payload()\n\n if isinstance(payload, (bytes, str)):\n unparsed_data = payload\n\n # httplib is assuming a response body is available\n # when parsing headers even when httplib only sends\n # header data to parse_headers() This results in\n # defects on multipart responses in particular.\n # See: https://github.com/urllib3/urllib3/issues/800\n\n # So we ignore the following defects:\n # - StartBoundaryNotFoundDefect:\n # The claimed start boundary was never found.\n # - MultipartInvariantViolationDefect:\n # A message claimed to be a multipart but no subparts were found.\n defects = [\n defect\n for defect in headers.defects\n if not isinstance(\n defect, (StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect)\n )\n ]\n\n if defects or unparsed_data:\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)" }, { "identifier": "_DEFAULT_TIMEOUT", "path": ".venv/Lib/site-packages/urllib3/util/timeout.py", "snippet": "_DEFAULT_TIMEOUT: Final[_TYPE_DEFAULT] = _TYPE_DEFAULT.token" }, { "identifier": "_TYPE_TIMEOUT", "path": ".venv/Lib/site-packages/urllib3/util/timeout.py", "snippet": "_TYPE_TIMEOUT = typing.Optional[typing.Union[float, _TYPE_DEFAULT]]" }, { "identifier": "Timeout", "path": ".venv/Lib/site-packages/urllib3/util/timeout.py", "snippet": "class Timeout:\n \"\"\"Timeout configuration.\n\n Timeouts can be defined as a default for a pool:\n\n .. code-block:: python\n\n import urllib3\n\n timeout = urllib3.util.Timeout(connect=2.0, read=7.0)\n\n http = urllib3.PoolManager(timeout=timeout)\n\n resp = http.request(\"GET\", \"https://example.com/\")\n\n print(resp.status)\n\n Or per-request (which overrides the default for the pool):\n\n .. code-block:: python\n\n response = http.request(\"GET\", \"https://example.com/\", timeout=Timeout(10))\n\n Timeouts can be disabled by setting all the parameters to ``None``:\n\n .. code-block:: python\n\n no_timeout = Timeout(connect=None, read=None)\n response = http.request(\"GET\", \"https://example.com/\", timeout=no_timeout)\n\n\n :param total:\n This combines the connect and read timeouts into one; the read timeout\n will be set to the time leftover from the connect attempt. In the\n event that both a connect timeout and a total are specified, or a read\n timeout and a total are specified, the shorter timeout will be applied.\n\n Defaults to None.\n\n :type total: int, float, or None\n\n :param connect:\n The maximum amount of time (in seconds) to wait for a connection\n attempt to a server to succeed. Omitting the parameter will default the\n connect timeout to the system default, probably `the global default\n timeout in socket.py\n <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.\n None will set an infinite timeout for connection attempts.\n\n :type connect: int, float, or None\n\n :param read:\n The maximum amount of time (in seconds) to wait between consecutive\n read operations for a response from the server. Omitting the parameter\n will default the read timeout to the system default, probably `the\n global default timeout in socket.py\n <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.\n None will set an infinite timeout.\n\n :type read: int, float, or None\n\n .. note::\n\n Many factors can affect the total amount of time for urllib3 to return\n an HTTP response.\n\n For example, Python's DNS resolver does not obey the timeout specified\n on the socket. Other factors that can affect total request time include\n high CPU load, high swap, the program running at a low priority level,\n or other behaviors.\n\n In addition, the read and total timeouts only measure the time between\n read operations on the socket connecting the client and the server,\n not the total amount of time for the request to return a complete\n response. For most requests, the timeout is raised because the server\n has not sent the first byte in the specified time. This is not always\n the case; if a server streams one byte every fifteen seconds, a timeout\n of 20 seconds will not trigger, even though the request will take\n several minutes to complete.\n\n If your goal is to cut off any request after a set amount of wall clock\n time, consider having a second \"watcher\" thread to cut off a slow\n request.\n \"\"\"\n\n #: A sentinel object representing the default timeout value\n DEFAULT_TIMEOUT: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT\n\n def __init__(\n self,\n total: _TYPE_TIMEOUT = None,\n connect: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n read: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n ) -> None:\n self._connect = self._validate_timeout(connect, \"connect\")\n self._read = self._validate_timeout(read, \"read\")\n self.total = self._validate_timeout(total, \"total\")\n self._start_connect: float | None = None\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}(connect={self._connect!r}, read={self._read!r}, total={self.total!r})\"\n\n # __str__ provided for backwards compatibility\n __str__ = __repr__\n\n @staticmethod\n def resolve_default_timeout(timeout: _TYPE_TIMEOUT) -> float | None:\n return getdefaulttimeout() if timeout is _DEFAULT_TIMEOUT else timeout\n\n @classmethod\n def _validate_timeout(cls, value: _TYPE_TIMEOUT, name: str) -> _TYPE_TIMEOUT:\n \"\"\"Check that a timeout attribute is valid.\n\n :param value: The timeout value to validate\n :param name: The name of the timeout attribute to validate. This is\n used to specify in error messages.\n :return: The validated and casted version of the given value.\n :raises ValueError: If it is a numeric value less than or equal to\n zero, or the type is not an integer, float, or None.\n \"\"\"\n if value is None or value is _DEFAULT_TIMEOUT:\n return value\n\n if isinstance(value, bool):\n raise ValueError(\n \"Timeout cannot be a boolean value. It must \"\n \"be an int, float or None.\"\n )\n try:\n float(value)\n except (TypeError, ValueError):\n raise ValueError(\n \"Timeout value %s was %s, but it must be an \"\n \"int, float or None.\" % (name, value)\n ) from None\n\n try:\n if value <= 0:\n raise ValueError(\n \"Attempted to set %s timeout to %s, but the \"\n \"timeout cannot be set to a value less \"\n \"than or equal to 0.\" % (name, value)\n )\n except TypeError:\n raise ValueError(\n \"Timeout value %s was %s, but it must be an \"\n \"int, float or None.\" % (name, value)\n ) from None\n\n return value\n\n @classmethod\n def from_float(cls, timeout: _TYPE_TIMEOUT) -> Timeout:\n \"\"\"Create a new Timeout from a legacy timeout value.\n\n The timeout value used by httplib.py sets the same timeout on the\n connect(), and recv() socket requests. This creates a :class:`Timeout`\n object that sets the individual timeouts to the ``timeout`` value\n passed to this function.\n\n :param timeout: The legacy timeout value.\n :type timeout: integer, float, :attr:`urllib3.util.Timeout.DEFAULT_TIMEOUT`, or None\n :return: Timeout object\n :rtype: :class:`Timeout`\n \"\"\"\n return Timeout(read=timeout, connect=timeout)\n\n def clone(self) -> Timeout:\n \"\"\"Create a copy of the timeout object\n\n Timeout properties are stored per-pool but each request needs a fresh\n Timeout object to ensure each one has its own start/stop configured.\n\n :return: a copy of the timeout object\n :rtype: :class:`Timeout`\n \"\"\"\n # We can't use copy.deepcopy because that will also create a new object\n # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to\n # detect the user default.\n return Timeout(connect=self._connect, read=self._read, total=self.total)\n\n def start_connect(self) -> float:\n \"\"\"Start the timeout clock, used during a connect() attempt\n\n :raises urllib3.exceptions.TimeoutStateError: if you attempt\n to start a timer that has been started already.\n \"\"\"\n if self._start_connect is not None:\n raise TimeoutStateError(\"Timeout timer has already been started.\")\n self._start_connect = time.monotonic()\n return self._start_connect\n\n def get_connect_duration(self) -> float:\n \"\"\"Gets the time elapsed since the call to :meth:`start_connect`.\n\n :return: Elapsed time in seconds.\n :rtype: float\n :raises urllib3.exceptions.TimeoutStateError: if you attempt\n to get duration for a timer that hasn't been started.\n \"\"\"\n if self._start_connect is None:\n raise TimeoutStateError(\n \"Can't get connect duration for timer that has not started.\"\n )\n return time.monotonic() - self._start_connect\n\n @property\n def connect_timeout(self) -> _TYPE_TIMEOUT:\n \"\"\"Get the value to use when setting a connection timeout.\n\n This will be a positive float or integer, the value None\n (never timeout), or the default system timeout.\n\n :return: Connect timeout.\n :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None\n \"\"\"\n if self.total is None:\n return self._connect\n\n if self._connect is None or self._connect is _DEFAULT_TIMEOUT:\n return self.total\n\n return min(self._connect, self.total) # type: ignore[type-var]\n\n @property\n def read_timeout(self) -> float | None:\n \"\"\"Get the value for the read timeout.\n\n This assumes some time has elapsed in the connection timeout and\n computes the read timeout appropriately.\n\n If self.total is set, the read timeout is dependent on the amount of\n time taken by the connect timeout. If the connection time has not been\n established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be\n raised.\n\n :return: Value to use for the read timeout.\n :rtype: int, float or None\n :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`\n has not yet been called on this object.\n \"\"\"\n if (\n self.total is not None\n and self.total is not _DEFAULT_TIMEOUT\n and self._read is not None\n and self._read is not _DEFAULT_TIMEOUT\n ):\n # In case the connect timeout has not yet been established.\n if self._start_connect is None:\n return self._read\n return max(0, min(self.total - self.get_connect_duration(), self._read))\n elif self.total is not None and self.total is not _DEFAULT_TIMEOUT:\n return max(0, self.total - self.get_connect_duration())\n else:\n return self.resolve_default_timeout(self._read)" }, { "identifier": "to_str", "path": ".venv/Lib/site-packages/urllib3/util/util.py", "snippet": "def to_str(\n x: str | bytes, encoding: str | None = None, errors: str | None = None\n) -> str:\n if isinstance(x, str):\n return x\n elif not isinstance(x, bytes):\n raise TypeError(f\"not expecting type {type(x).__name__}\")\n if encoding or errors:\n return x.decode(encoding or \"utf-8\", errors=errors or \"strict\")\n return x.decode()" }, { "identifier": "wait_for_read", "path": ".venv/Lib/site-packages/urllib3/util/wait.py", "snippet": "def wait_for_read(sock: socket.socket, timeout: float | None = None) -> bool:\n \"\"\"Waits for reading to be available on a given socket.\n Returns True if the socket is readable, or False if the timeout expired.\n \"\"\"\n return wait_for_socket(sock, read=True, timeout=timeout)" }, { "identifier": "_TYPE_BODY", "path": ".venv/Lib/site-packages/urllib3/_base_connection.py", "snippet": "_TYPE_BODY = typing.Union[bytes, typing.IO[typing.Any], typing.Iterable[bytes], str]" }, { "identifier": "ProxyConfig", "path": ".venv/Lib/site-packages/urllib3/_base_connection.py", "snippet": "class ProxyConfig(typing.NamedTuple):\n ssl_context: ssl.SSLContext | None\n use_forwarding_for_https: bool\n assert_hostname: None | str | Literal[False]\n assert_fingerprint: str | None" }, { "identifier": "_ResponseOptions", "path": ".venv/Lib/site-packages/urllib3/_base_connection.py", "snippet": "class _ResponseOptions(typing.NamedTuple):\n # TODO: Remove this in favor of a better\n # HTTP request/response lifecycle tracking.\n request_method: str\n request_url: str\n preload_content: bool\n decode_content: bool\n enforce_content_length: bool" }, { "identifier": "__version__", "path": ".venv/Lib/site-packages/urllib3/_version.py", "snippet": "" }, { "identifier": "ConnectTimeoutError", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class ConnectTimeoutError(TimeoutError):\n \"\"\"Raised when a socket timeout occurs while connecting to a server\"\"\"" }, { "identifier": "HeaderParsingError", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class HeaderParsingError(HTTPError):\n \"\"\"Raised by assert_header_parsing, but we convert it to a log.warning statement.\"\"\"\n\n def __init__(\n self, defects: list[MessageDefect], unparsed_data: bytes | str | None\n ) -> None:\n message = f\"{defects or 'Unknown'}, unparsed data: {unparsed_data!r}\"\n super().__init__(message)" }, { "identifier": "NameResolutionError", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class NameResolutionError(NewConnectionError):\n \"\"\"Raised when host name resolution fails.\"\"\"\n\n def __init__(self, host: str, conn: HTTPConnection, reason: socket.gaierror):\n message = f\"Failed to resolve '{host}' ({reason})\"\n super().__init__(conn, message)" }, { "identifier": "NewConnectionError", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class NewConnectionError(ConnectTimeoutError, HTTPError):\n \"\"\"Raised when we fail to establish a new connection. Usually ECONNREFUSED.\"\"\"\n\n def __init__(self, conn: HTTPConnection, message: str) -> None:\n self.conn = conn\n super().__init__(f\"{conn}: {message}\")\n\n @property\n def pool(self) -> HTTPConnection:\n warnings.warn(\n \"The 'pool' property is deprecated and will be removed \"\n \"in urllib3 v2.1.0. Use 'conn' instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n return self.conn" }, { "identifier": "ProxyError", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class ProxyError(HTTPError):\n \"\"\"Raised when the connection to a proxy fails.\"\"\"\n\n # The original error is also available as __cause__.\n original_error: Exception\n\n def __init__(self, message: str, error: Exception) -> None:\n super().__init__(message, error)\n self.original_error = error" }, { "identifier": "SystemTimeWarning", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class SystemTimeWarning(SecurityWarning):\n \"\"\"Warned when system time is suspected to be wrong\"\"\"" }, { "identifier": "connection", "path": ".venv/Lib/site-packages/urllib3/util/connection.py", "snippet": "_TYPE_SOCKET_OPTIONS = typing.Sequence[typing.Tuple[int, int, typing.Union[int, bytes]]]\nHAS_IPV6 = _has_ipv6(\"::1\")\ndef is_connection_dropped(conn: BaseHTTPConnection) -> bool: # Platform-specific\ndef create_connection(\n address: tuple[str, int],\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n source_address: tuple[str, int] | None = None,\n socket_options: _TYPE_SOCKET_OPTIONS | None = None,\n) -> socket.socket:\ndef _set_socket_options(\n sock: socket.socket, options: _TYPE_SOCKET_OPTIONS | None\n) -> None:\ndef allowed_gai_family() -> socket.AddressFamily:\ndef _has_ipv6(host: str) -> bool:" }, { "identifier": "ssl_", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "HAS_NEVER_CHECK_COMMON_NAME = False\nIS_PYOPENSSL = False\nALPN_PROTOCOLS = [\"http/1.1\"]\n_TYPE_VERSION_INFO = typing.Tuple[int, int, int, str, int]\nHASHFUNC_MAP = {32: md5, 40: sha1, 64: sha256}\n_SSL_VERSION_TO_TLS_VERSION: dict[int, int] = {}\n HAS_NEVER_CHECK_COMMON_NAME = False\n OP_NO_COMPRESSION = 0x20000 # type: ignore[assignment]\n OP_NO_TICKET = 0x4000 # type: ignore[assignment]\n PROTOCOL_TLS_CLIENT = 16 # type: ignore[assignment]\n_TYPE_PEER_CERT_RET = typing.Union[\"_TYPE_PEER_CERT_RET_DICT\", bytes, None]\ndef _is_bpo_43522_fixed(\n implementation_name: str,\n version_info: _TYPE_VERSION_INFO,\n pypy_version_info: _TYPE_VERSION_INFO | None,\n) -> bool:\ndef _is_has_never_check_common_name_reliable(\n openssl_version: str,\n openssl_version_number: int,\n implementation_name: str,\n version_info: _TYPE_VERSION_INFO,\n pypy_version_info: _TYPE_VERSION_INFO | None,\n) -> bool:\ndef assert_fingerprint(cert: bytes | None, fingerprint: str) -> None:\ndef resolve_cert_reqs(candidate: None | int | str) -> VerifyMode:\ndef resolve_ssl_version(candidate: None | int | str) -> int:\ndef create_urllib3_context(\n ssl_version: int | None = None,\n cert_reqs: int | None = None,\n options: int | None = None,\n ciphers: str | None = None,\n ssl_minimum_version: int | None = None,\n ssl_maximum_version: int | None = None,\n) -> ssl.SSLContext:\ndef ssl_wrap_socket(\n sock: socket.socket,\n keyfile: str | None = ...,\n certfile: str | None = ...,\n cert_reqs: int | None = ...,\n ca_certs: str | None = ...,\n server_hostname: str | None = ...,\n ssl_version: int | None = ...,\n ciphers: str | None = ...,\n ssl_context: ssl.SSLContext | None = ...,\n ca_cert_dir: str | None = ...,\n key_password: str | None = ...,\n ca_cert_data: None | str | bytes = ...,\n tls_in_tls: Literal[False] = ...,\n) -> ssl.SSLSocket:\ndef ssl_wrap_socket(\n sock: socket.socket,\n keyfile: str | None = ...,\n certfile: str | None = ...,\n cert_reqs: int | None = ...,\n ca_certs: str | None = ...,\n server_hostname: str | None = ...,\n ssl_version: int | None = ...,\n ciphers: str | None = ...,\n ssl_context: ssl.SSLContext | None = ...,\n ca_cert_dir: str | None = ...,\n key_password: str | None = ...,\n ca_cert_data: None | str | bytes = ...,\n tls_in_tls: bool = ...,\n) -> ssl.SSLSocket | SSLTransportType:\ndef ssl_wrap_socket(\n sock: socket.socket,\n keyfile: str | None = None,\n certfile: str | None = None,\n cert_reqs: int | None = None,\n ca_certs: str | None = None,\n server_hostname: str | None = None,\n ssl_version: int | None = None,\n ciphers: str | None = None,\n ssl_context: ssl.SSLContext | None = None,\n ca_cert_dir: str | None = None,\n key_password: str | None = None,\n ca_cert_data: None | str | bytes = None,\n tls_in_tls: bool = False,\n) -> ssl.SSLSocket | SSLTransportType:\ndef is_ipaddress(hostname: str | bytes) -> bool:\ndef _is_key_file_encrypted(key_file: str) -> bool:\ndef _ssl_wrap_socket_impl(\n sock: socket.socket,\n ssl_context: ssl.SSLContext,\n tls_in_tls: bool,\n server_hostname: str | None = None,\n) -> ssl.SSLSocket | SSLTransportType:\n class _TYPE_PEER_CERT_RET_DICT(TypedDict, total=False):" }, { "identifier": "SKIP_HEADER", "path": ".venv/Lib/site-packages/urllib3/util/request.py", "snippet": "SKIP_HEADER = \"@@@SKIP_HEADER@@@\"" }, { "identifier": "SKIPPABLE_HEADERS", "path": ".venv/Lib/site-packages/urllib3/util/request.py", "snippet": "SKIPPABLE_HEADERS = frozenset([\"accept-encoding\", \"host\", \"user-agent\"])" }, { "identifier": "body_to_chunks", "path": ".venv/Lib/site-packages/urllib3/util/request.py", "snippet": "def body_to_chunks(\n body: typing.Any | None, method: str, blocksize: int\n) -> ChunksAndContentLength:\n \"\"\"Takes the HTTP request method, body, and blocksize and\n transforms them into an iterable of chunks to pass to\n socket.sendall() and an optional 'Content-Length' header.\n\n A 'Content-Length' of 'None' indicates the length of the body\n can't be determined so should use 'Transfer-Encoding: chunked'\n for framing instead.\n \"\"\"\n\n chunks: typing.Iterable[bytes] | None\n content_length: int | None\n\n # No body, we need to make a recommendation on 'Content-Length'\n # based on whether that request method is expected to have\n # a body or not.\n if body is None:\n chunks = None\n if method.upper() not in _METHODS_NOT_EXPECTING_BODY:\n content_length = 0\n else:\n content_length = None\n\n # Bytes or strings become bytes\n elif isinstance(body, (str, bytes)):\n chunks = (to_bytes(body),)\n content_length = len(chunks[0])\n\n # File-like object, TODO: use seek() and tell() for length?\n elif hasattr(body, \"read\"):\n\n def chunk_readable() -> typing.Iterable[bytes]:\n nonlocal body, blocksize\n encode = isinstance(body, io.TextIOBase)\n while True:\n datablock = body.read(blocksize)\n if not datablock:\n break\n if encode:\n datablock = datablock.encode(\"iso-8859-1\")\n yield datablock\n\n chunks = chunk_readable()\n content_length = None\n\n # Otherwise we need to start checking via duck-typing.\n else:\n try:\n # Check if the body implements the buffer API.\n mv = memoryview(body)\n except TypeError:\n try:\n # Check if the body is an iterable\n chunks = iter(body)\n content_length = None\n except TypeError:\n raise TypeError(\n f\"'body' must be a bytes-like object, file-like \"\n f\"object, or iterable. Instead was {body!r}\"\n ) from None\n else:\n # Since it implements the buffer API can be passed directly to socket.sendall()\n chunks = (body,)\n content_length = mv.nbytes\n\n return ChunksAndContentLength(chunks=chunks, content_length=content_length)" }, { "identifier": "assert_fingerprint", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "def assert_fingerprint(cert: bytes | None, fingerprint: str) -> None:\n \"\"\"\n Checks if given fingerprint matches the supplied certificate.\n\n :param cert:\n Certificate as bytes object.\n :param fingerprint:\n Fingerprint as string of hexdigits, can be interspersed by colons.\n \"\"\"\n\n if cert is None:\n raise SSLError(\"No certificate for the peer.\")\n\n fingerprint = fingerprint.replace(\":\", \"\").lower()\n digest_length = len(fingerprint)\n hashfunc = HASHFUNC_MAP.get(digest_length)\n if not hashfunc:\n raise SSLError(f\"Fingerprint of invalid length: {fingerprint}\")\n\n # We need encode() here for py32; works on py2 and p33.\n fingerprint_bytes = unhexlify(fingerprint.encode())\n\n cert_digest = hashfunc(cert).digest()\n\n if not hmac.compare_digest(cert_digest, fingerprint_bytes):\n raise SSLError(\n f'Fingerprints did not match. Expected \"{fingerprint}\", got \"{cert_digest.hex()}\"'\n )" }, { "identifier": "create_urllib3_context", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "def create_urllib3_context(\n ssl_version: int | None = None,\n cert_reqs: int | None = None,\n options: int | None = None,\n ciphers: str | None = None,\n ssl_minimum_version: int | None = None,\n ssl_maximum_version: int | None = None,\n) -> ssl.SSLContext:\n \"\"\"Creates and configures an :class:`ssl.SSLContext` instance for use with urllib3.\n\n :param ssl_version:\n The desired protocol version to use. This will default to\n PROTOCOL_SSLv23 which will negotiate the highest protocol that both\n the server and your installation of OpenSSL support.\n\n This parameter is deprecated instead use 'ssl_minimum_version'.\n :param ssl_minimum_version:\n The minimum version of TLS to be used. Use the 'ssl.TLSVersion' enum for specifying the value.\n :param ssl_maximum_version:\n The maximum version of TLS to be used. Use the 'ssl.TLSVersion' enum for specifying the value.\n Not recommended to set to anything other than 'ssl.TLSVersion.MAXIMUM_SUPPORTED' which is the\n default value.\n :param cert_reqs:\n Whether to require the certificate verification. This defaults to\n ``ssl.CERT_REQUIRED``.\n :param options:\n Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,\n ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``, and ``ssl.OP_NO_TICKET``.\n :param ciphers:\n Which cipher suites to allow the server to select. Defaults to either system configured\n ciphers if OpenSSL 1.1.1+, otherwise uses a secure default set of ciphers.\n :returns:\n Constructed SSLContext object with specified options\n :rtype: SSLContext\n \"\"\"\n if SSLContext is None:\n raise TypeError(\"Can't create an SSLContext object without an ssl module\")\n\n # This means 'ssl_version' was specified as an exact value.\n if ssl_version not in (None, PROTOCOL_TLS, PROTOCOL_TLS_CLIENT):\n # Disallow setting 'ssl_version' and 'ssl_minimum|maximum_version'\n # to avoid conflicts.\n if ssl_minimum_version is not None or ssl_maximum_version is not None:\n raise ValueError(\n \"Can't specify both 'ssl_version' and either \"\n \"'ssl_minimum_version' or 'ssl_maximum_version'\"\n )\n\n # 'ssl_version' is deprecated and will be removed in the future.\n else:\n # Use 'ssl_minimum_version' and 'ssl_maximum_version' instead.\n ssl_minimum_version = _SSL_VERSION_TO_TLS_VERSION.get(\n ssl_version, TLSVersion.MINIMUM_SUPPORTED\n )\n ssl_maximum_version = _SSL_VERSION_TO_TLS_VERSION.get(\n ssl_version, TLSVersion.MAXIMUM_SUPPORTED\n )\n\n # This warning message is pushing users to use 'ssl_minimum_version'\n # instead of both min/max. Best practice is to only set the minimum version and\n # keep the maximum version to be it's default value: 'TLSVersion.MAXIMUM_SUPPORTED'\n warnings.warn(\n \"'ssl_version' option is deprecated and will be \"\n \"removed in urllib3 v2.1.0. Instead use 'ssl_minimum_version'\",\n category=DeprecationWarning,\n stacklevel=2,\n )\n\n # PROTOCOL_TLS is deprecated in Python 3.10 so we always use PROTOCOL_TLS_CLIENT\n context = SSLContext(PROTOCOL_TLS_CLIENT)\n\n if ssl_minimum_version is not None:\n context.minimum_version = ssl_minimum_version\n else: # Python <3.10 defaults to 'MINIMUM_SUPPORTED' so explicitly set TLSv1.2 here\n context.minimum_version = TLSVersion.TLSv1_2\n\n if ssl_maximum_version is not None:\n context.maximum_version = ssl_maximum_version\n\n # Unless we're given ciphers defer to either system ciphers in\n # the case of OpenSSL 1.1.1+ or use our own secure default ciphers.\n if ciphers:\n context.set_ciphers(ciphers)\n\n # Setting the default here, as we may have no ssl module on import\n cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs\n\n if options is None:\n options = 0\n # SSLv2 is easily broken and is considered harmful and dangerous\n options |= OP_NO_SSLv2\n # SSLv3 has several problems and is now dangerous\n options |= OP_NO_SSLv3\n # Disable compression to prevent CRIME attacks for OpenSSL 1.0+\n # (issue #309)\n options |= OP_NO_COMPRESSION\n # TLSv1.2 only. Unless set explicitly, do not request tickets.\n # This may save some bandwidth on wire, and although the ticket is encrypted,\n # there is a risk associated with it being on wire,\n # if the server is not rotating its ticketing keys properly.\n options |= OP_NO_TICKET\n\n context.options |= options\n\n # Enable post-handshake authentication for TLS 1.3, see GH #1634. PHA is\n # necessary for conditional client cert authentication with TLS 1.3.\n # The attribute is None for OpenSSL <= 1.1.0 or does not exist in older\n # versions of Python. We only enable if certificate verification is enabled to work\n # around Python issue #37428\n # See: https://bugs.python.org/issue37428\n if (\n cert_reqs == ssl.CERT_REQUIRED\n and getattr(context, \"post_handshake_auth\", None) is not None\n ):\n context.post_handshake_auth = True\n\n # The order of the below lines setting verify_mode and check_hostname\n # matter due to safe-guards SSLContext has to prevent an SSLContext with\n # check_hostname=True, verify_mode=NONE/OPTIONAL.\n # We always set 'check_hostname=False' for pyOpenSSL so we rely on our own\n # 'ssl.match_hostname()' implementation.\n if cert_reqs == ssl.CERT_REQUIRED and not IS_PYOPENSSL:\n context.verify_mode = cert_reqs\n context.check_hostname = True\n else:\n context.check_hostname = False\n context.verify_mode = cert_reqs\n\n try:\n context.hostname_checks_common_name = False\n except AttributeError: # Defensive: for CPython < 3.8.9 and 3.9.3; for PyPy < 7.3.8\n pass\n\n # Enable logging of TLS session keys via defacto standard environment variable\n # 'SSLKEYLOGFILE', if the feature is available (Python 3.8+). Skip empty values.\n if hasattr(context, \"keylog_filename\"):\n sslkeylogfile = os.environ.get(\"SSLKEYLOGFILE\")\n if sslkeylogfile:\n context.keylog_filename = sslkeylogfile\n\n return context" }, { "identifier": "is_ipaddress", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "def is_ipaddress(hostname: str | bytes) -> bool:\n \"\"\"Detects whether the hostname given is an IPv4 or IPv6 address.\n Also detects IPv6 addresses with Zone IDs.\n\n :param str hostname: Hostname to examine.\n :return: True if the hostname is an IP address, False otherwise.\n \"\"\"\n if isinstance(hostname, bytes):\n # IDN A-label bytes are ASCII compatible.\n hostname = hostname.decode(\"ascii\")\n return bool(_IPV4_RE.match(hostname) or _BRACELESS_IPV6_ADDRZ_RE.match(hostname))" }, { "identifier": "resolve_cert_reqs", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "def resolve_cert_reqs(candidate: None | int | str) -> VerifyMode:\n \"\"\"\n Resolves the argument to a numeric constant, which can be passed to\n the wrap_socket function/method from the ssl module.\n Defaults to :data:`ssl.CERT_REQUIRED`.\n If given a string it is assumed to be the name of the constant in the\n :mod:`ssl` module or its abbreviation.\n (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.\n If it's neither `None` nor a string we assume it is already the numeric\n constant which can directly be passed to wrap_socket.\n \"\"\"\n if candidate is None:\n return CERT_REQUIRED\n\n if isinstance(candidate, str):\n res = getattr(ssl, candidate, None)\n if res is None:\n res = getattr(ssl, \"CERT_\" + candidate)\n return res # type: ignore[no-any-return]\n\n return candidate # type: ignore[return-value]" }, { "identifier": "resolve_ssl_version", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "def resolve_ssl_version(candidate: None | int | str) -> int:\n \"\"\"\n like resolve_cert_reqs\n \"\"\"\n if candidate is None:\n return PROTOCOL_TLS\n\n if isinstance(candidate, str):\n res = getattr(ssl, candidate, None)\n if res is None:\n res = getattr(ssl, \"PROTOCOL_\" + candidate)\n return typing.cast(int, res)\n\n return candidate" }, { "identifier": "ssl_wrap_socket", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "@typing.overload\ndef ssl_wrap_socket(\n sock: socket.socket,\n keyfile: str | None = ...,\n certfile: str | None = ...,\n cert_reqs: int | None = ...,\n ca_certs: str | None = ...,\n server_hostname: str | None = ...,\n ssl_version: int | None = ...,\n ciphers: str | None = ...,\n ssl_context: ssl.SSLContext | None = ...,\n ca_cert_dir: str | None = ...,\n key_password: str | None = ...,\n ca_cert_data: None | str | bytes = ...,\n tls_in_tls: Literal[False] = ...,\n) -> ssl.SSLSocket:\n ..." }, { "identifier": "CertificateError", "path": ".venv/Lib/site-packages/urllib3/util/ssl_match_hostname.py", "snippet": "class CertificateError(ValueError):\n pass" }, { "identifier": "match_hostname", "path": ".venv/Lib/site-packages/urllib3/util/ssl_match_hostname.py", "snippet": "def match_hostname(\n cert: _TYPE_PEER_CERT_RET_DICT | None,\n hostname: str,\n hostname_checks_common_name: bool = False,\n) -> None:\n \"\"\"Verify that *cert* (in decoded format as returned by\n SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125\n rules are followed, but IP addresses are not accepted for *hostname*.\n\n CertificateError is raised on failure. On success, the function\n returns nothing.\n \"\"\"\n if not cert:\n raise ValueError(\n \"empty or no certificate, match_hostname needs a \"\n \"SSL socket or SSL context with either \"\n \"CERT_OPTIONAL or CERT_REQUIRED\"\n )\n try:\n # Divergence from upstream: ipaddress can't handle byte str\n #\n # The ipaddress module shipped with Python < 3.9 does not support\n # scoped IPv6 addresses so we unconditionally strip the Zone IDs for\n # now. Once we drop support for Python 3.9 we can remove this branch.\n if \"%\" in hostname:\n host_ip = ipaddress.ip_address(hostname[: hostname.rfind(\"%\")])\n else:\n host_ip = ipaddress.ip_address(hostname)\n\n except ValueError:\n # Not an IP address (common case)\n host_ip = None\n dnsnames = []\n san: tuple[tuple[str, str], ...] = cert.get(\"subjectAltName\", ())\n key: str\n value: str\n for key, value in san:\n if key == \"DNS\":\n if host_ip is None and _dnsname_match(value, hostname):\n return\n dnsnames.append(value)\n elif key == \"IP Address\":\n if host_ip is not None and _ipaddress_match(value, host_ip):\n return\n dnsnames.append(value)\n\n # We only check 'commonName' if it's enabled and we're not verifying\n # an IP address. IP addresses aren't valid within 'commonName'.\n if hostname_checks_common_name and host_ip is None and not dnsnames:\n for sub in cert.get(\"subject\", ()):\n for key, value in sub:\n if key == \"commonName\":\n if _dnsname_match(value, hostname):\n return\n dnsnames.append(value)\n\n if len(dnsnames) > 1:\n raise CertificateError(\n \"hostname %r \"\n \"doesn't match either of %s\" % (hostname, \", \".join(map(repr, dnsnames)))\n )\n elif len(dnsnames) == 1:\n raise CertificateError(f\"hostname {hostname!r} doesn't match {dnsnames[0]!r}\")\n else:\n raise CertificateError(\"no appropriate subjectAltName fields were found\")" }, { "identifier": "Url", "path": ".venv/Lib/site-packages/urllib3/util/url.py", "snippet": "class Url(\n typing.NamedTuple(\n \"Url\",\n [\n (\"scheme\", typing.Optional[str]),\n (\"auth\", typing.Optional[str]),\n (\"host\", typing.Optional[str]),\n (\"port\", typing.Optional[int]),\n (\"path\", typing.Optional[str]),\n (\"query\", typing.Optional[str]),\n (\"fragment\", typing.Optional[str]),\n ],\n )\n):\n \"\"\"\n Data structure for representing an HTTP URL. Used as a return value for\n :func:`parse_url`. Both the scheme and host are normalized as they are\n both case-insensitive according to RFC 3986.\n \"\"\"\n\n def __new__( # type: ignore[no-untyped-def]\n cls,\n scheme: str | None = None,\n auth: str | None = None,\n host: str | None = None,\n port: int | None = None,\n path: str | None = None,\n query: str | None = None,\n fragment: str | None = None,\n ):\n if path and not path.startswith(\"/\"):\n path = \"/\" + path\n if scheme is not None:\n scheme = scheme.lower()\n return super().__new__(cls, scheme, auth, host, port, path, query, fragment)\n\n @property\n def hostname(self) -> str | None:\n \"\"\"For backwards-compatibility with urlparse. We're nice like that.\"\"\"\n return self.host\n\n @property\n def request_uri(self) -> str:\n \"\"\"Absolute path including the query string.\"\"\"\n uri = self.path or \"/\"\n\n if self.query is not None:\n uri += \"?\" + self.query\n\n return uri\n\n @property\n def authority(self) -> str | None:\n \"\"\"\n Authority component as defined in RFC 3986 3.2.\n This includes userinfo (auth), host and port.\n\n i.e.\n userinfo@host:port\n \"\"\"\n userinfo = self.auth\n netloc = self.netloc\n if netloc is None or userinfo is None:\n return netloc\n else:\n return f\"{userinfo}@{netloc}\"\n\n @property\n def netloc(self) -> str | None:\n \"\"\"\n Network location including host and port.\n\n If you need the equivalent of urllib.parse's ``netloc``,\n use the ``authority`` property instead.\n \"\"\"\n if self.host is None:\n return None\n if self.port:\n return f\"{self.host}:{self.port}\"\n return self.host\n\n @property\n def url(self) -> str:\n \"\"\"\n Convert self into a url\n\n This function should more or less round-trip with :func:`.parse_url`. The\n returned url may not be exactly the same as the url inputted to\n :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls\n with a blank port will have : removed).\n\n Example:\n\n .. code-block:: python\n\n import urllib3\n\n U = urllib3.util.parse_url(\"https://google.com/mail/\")\n\n print(U.url)\n # \"https://google.com/mail/\"\n\n print( urllib3.util.Url(\"https\", \"username:password\",\n \"host.com\", 80, \"/path\", \"query\", \"fragment\"\n ).url\n )\n # \"https://username:[email protected]:80/path?query#fragment\"\n \"\"\"\n scheme, auth, host, port, path, query, fragment = self\n url = \"\"\n\n # We use \"is not None\" we want things to happen with empty strings (or 0 port)\n if scheme is not None:\n url += scheme + \"://\"\n if auth is not None:\n url += auth + \"@\"\n if host is not None:\n url += host\n if port is not None:\n url += \":\" + str(port)\n if path is not None:\n url += path\n if query is not None:\n url += \"?\" + query\n if fragment is not None:\n url += \"#\" + fragment\n\n return url\n\n def __str__(self) -> str:\n return self.url" } ]
import datetime import logging import os import re import socket import sys import typing import warnings import ssl from http.client import HTTPConnection as _HTTPConnection from http.client import HTTPException as HTTPException # noqa: F401 from http.client import ResponseNotReady from socket import timeout as SocketTimeout from typing import Literal from .response import HTTPResponse from .util.ssl_ import _TYPE_PEER_CERT_RET_DICT from .util.ssltransport import SSLTransport from ._collections import HTTPHeaderDict from .util.response import assert_header_parsing from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_TIMEOUT, Timeout from .util.util import to_str from .util.wait import wait_for_read from ._base_connection import _TYPE_BODY from ._base_connection import ProxyConfig as ProxyConfig from ._base_connection import _ResponseOptions as _ResponseOptions from ._version import __version__ from .exceptions import ( ConnectTimeoutError, HeaderParsingError, NameResolutionError, NewConnectionError, ProxyError, SystemTimeWarning, ) from .util import SKIP_HEADER, SKIPPABLE_HEADERS, connection, ssl_ from .util.request import body_to_chunks from .util.ssl_ import assert_fingerprint as _assert_fingerprint from .util.ssl_ import ( create_urllib3_context, is_ipaddress, resolve_cert_reqs, resolve_ssl_version, ssl_wrap_socket, ) from .util.ssl_match_hostname import CertificateError, match_hostname from .util.url import Url from .response import HTTPResponse
15,841
( f"System time is way off (before {RECENT_DATE}). This will probably " "lead to SSL verification errors" ), SystemTimeWarning, ) sock_and_verified = _ssl_wrap_socket_and_match_hostname( sock=sock, cert_reqs=self.cert_reqs, ssl_version=self.ssl_version, ssl_minimum_version=self.ssl_minimum_version, ssl_maximum_version=self.ssl_maximum_version, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, ca_cert_data=self.ca_cert_data, cert_file=self.cert_file, key_file=self.key_file, key_password=self.key_password, server_hostname=server_hostname, ssl_context=self.ssl_context, tls_in_tls=tls_in_tls, assert_hostname=self.assert_hostname, assert_fingerprint=self.assert_fingerprint, ) self.sock = sock_and_verified.socket self.is_verified = sock_and_verified.is_verified # If there's a proxy to be connected to we are fully connected. # This is set twice (once above and here) due to forwarding proxies # not using tunnelling. self._has_connected_to_proxy = bool(self.proxy) def _connect_tls_proxy(self, hostname: str, sock: socket.socket) -> ssl.SSLSocket: """ Establish a TLS connection to the proxy using the provided SSL context. """ # `_connect_tls_proxy` is called when self._tunnel_host is truthy. proxy_config = typing.cast(ProxyConfig, self.proxy_config) ssl_context = proxy_config.ssl_context sock_and_verified = _ssl_wrap_socket_and_match_hostname( sock, cert_reqs=self.cert_reqs, ssl_version=self.ssl_version, ssl_minimum_version=self.ssl_minimum_version, ssl_maximum_version=self.ssl_maximum_version, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, ca_cert_data=self.ca_cert_data, server_hostname=hostname, ssl_context=ssl_context, assert_hostname=proxy_config.assert_hostname, assert_fingerprint=proxy_config.assert_fingerprint, # Features that aren't implemented for proxies yet: cert_file=None, key_file=None, key_password=None, tls_in_tls=False, ) self.proxy_is_verified = sock_and_verified.is_verified return sock_and_verified.socket # type: ignore[return-value] class _WrappedAndVerifiedSocket(typing.NamedTuple): """ Wrapped socket and whether the connection is verified after the TLS handshake """ socket: ssl.SSLSocket | SSLTransport is_verified: bool def _ssl_wrap_socket_and_match_hostname( sock: socket.socket, *, cert_reqs: None | str | int, ssl_version: None | str | int, ssl_minimum_version: int | None, ssl_maximum_version: int | None, cert_file: str | None, key_file: str | None, key_password: str | None, ca_certs: str | None, ca_cert_dir: str | None, ca_cert_data: None | str | bytes, assert_hostname: None | str | Literal[False], assert_fingerprint: str | None, server_hostname: str | None, ssl_context: ssl.SSLContext | None, tls_in_tls: bool = False, ) -> _WrappedAndVerifiedSocket: """Logic for constructing an SSLContext from all TLS parameters, passing that down into ssl_wrap_socket, and then doing certificate verification either via hostname or fingerprint. This function exists to guarantee that both proxies and targets have the same behavior when connecting via TLS. """ default_ssl_context = False if ssl_context is None: default_ssl_context = True context = create_urllib3_context( ssl_version=resolve_ssl_version(ssl_version), ssl_minimum_version=ssl_minimum_version, ssl_maximum_version=ssl_maximum_version, cert_reqs=resolve_cert_reqs(cert_reqs), ) else: context = ssl_context context.verify_mode = resolve_cert_reqs(cert_reqs) # In some cases, we want to verify hostnames ourselves if ( # `ssl` can't verify fingerprints or alternate hostnames assert_fingerprint or assert_hostname # assert_hostname can be set to False to disable hostname checking or assert_hostname is False # We still support OpenSSL 1.0.2, which prevents us from verifying # hostnames easily: https://github.com/pyca/pyopenssl/pull/933
from __future__ import annotations if typing.TYPE_CHECKING: try: # Compiled with SSL? BaseSSLError = ssl.SSLError except (ImportError, AttributeError): ssl = None # type: ignore[assignment] class BaseSSLError(BaseException): # type: ignore[no-redef] pass # Not a no-op, we're adding this to the namespace so it can be imported. ConnectionError = ConnectionError BrokenPipeError = BrokenPipeError log = logging.getLogger(__name__) port_by_scheme = {"http": 80, "https": 443} # When it comes time to update this value as a part of regular maintenance # (ie test_recent_date is failing) update it to ~6 months before the current date. RECENT_DATE = datetime.date(2022, 1, 1) _CONTAINS_CONTROL_CHAR_RE = re.compile(r"[^-!#$%&'*+.^_`|~0-9a-zA-Z]") _HAS_SYS_AUDIT = hasattr(sys, "audit") class HTTPConnection(_HTTPConnection): """ Based on :class:`http.client.HTTPConnection` but provides an extra constructor backwards-compatibility layer between older and newer Pythons. Additional keyword parameters are used to configure attributes of the connection. Accepted parameters include: - ``source_address``: Set the source address for the current connection. - ``socket_options``: Set specific options on the underlying socket. If not specified, then defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy. For example, if you wish to enable TCP Keep Alive in addition to the defaults, you might pass: .. code-block:: python HTTPConnection.default_socket_options + [ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), ] Or you may want to disable the defaults by passing an empty list (e.g., ``[]``). """ default_port: typing.ClassVar[int] = port_by_scheme["http"] # type: ignore[misc] #: Disable Nagle's algorithm by default. #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]`` default_socket_options: typing.ClassVar[connection._TYPE_SOCKET_OPTIONS] = [ (socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) ] #: Whether this connection verifies the host's certificate. is_verified: bool = False #: Whether this proxy connection verified the proxy host's certificate. # If no proxy is currently connected to the value will be ``None``. proxy_is_verified: bool | None = None blocksize: int source_address: tuple[str, int] | None socket_options: connection._TYPE_SOCKET_OPTIONS | None _has_connected_to_proxy: bool _response_options: _ResponseOptions | None _tunnel_host: str | None _tunnel_port: int | None _tunnel_scheme: str | None def __init__( self, host: str, port: int | None = None, *, timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, source_address: tuple[str, int] | None = None, blocksize: int = 16384, socket_options: None | (connection._TYPE_SOCKET_OPTIONS) = default_socket_options, proxy: Url | None = None, proxy_config: ProxyConfig | None = None, ) -> None: super().__init__( host=host, port=port, timeout=Timeout.resolve_default_timeout(timeout), source_address=source_address, blocksize=blocksize, ) self.socket_options = socket_options self.proxy = proxy self.proxy_config = proxy_config self._has_connected_to_proxy = False self._response_options = None self._tunnel_host: str | None = None self._tunnel_port: int | None = None self._tunnel_scheme: str | None = None # https://github.com/python/mypy/issues/4125 # Mypy treats this as LSP violation, which is considered a bug. # If `host` is made a property it violates LSP, because a writeable attribute is overridden with a read-only one. # However, there is also a `host` setter so LSP is not violated. # Potentially, a `@host.deleter` might be needed depending on how this issue will be fixed. @property def host(self) -> str: """ Getter method to remove any trailing dots that indicate the hostname is an FQDN. In general, SSL certificates don't include the trailing dot indicating a fully-qualified domain name, and thus, they don't validate properly when checked against a domain name that includes the dot. In addition, some servers may not expect to receive the trailing dot when provided. However, the hostname with trailing dot is critical to DNS resolution; doing a lookup with the trailing dot will properly only resolve the appropriate FQDN, whereas a lookup without a trailing dot will search the system's search domain list. Thus, it's important to keep the original host around for use only in those cases where it's appropriate (i.e., when doing DNS lookup to establish the actual TCP connection across which we're going to send HTTP requests). """ return self._dns_host.rstrip(".") @host.setter def host(self, value: str) -> None: """ Setter for the `host` property. We assume that only urllib3 uses the _dns_host attribute; httplib itself only uses `host`, and it seems reasonable that other libraries follow suit. """ self._dns_host = value def _new_conn(self) -> socket.socket: """Establish a socket connection and set nodelay settings on it. :return: New socket connection. """ try: sock = connection.create_connection( (self._dns_host, self.port), self.timeout, source_address=self.source_address, socket_options=self.socket_options, ) except socket.gaierror as e: raise NameResolutionError(self.host, self, e) from e except SocketTimeout as e: raise ConnectTimeoutError( self, f"Connection to {self.host} timed out. (connect timeout={self.timeout})", ) from e except OSError as e: raise NewConnectionError( self, f"Failed to establish a new connection: {e}" ) from e # Audit hooks are only available in Python 3.8+ if _HAS_SYS_AUDIT: sys.audit("http.client.connect", self, self.host, self.port) return sock def set_tunnel( self, host: str, port: int | None = None, headers: typing.Mapping[str, str] | None = None, scheme: str = "http", ) -> None: if scheme not in ("http", "https"): raise ValueError( f"Invalid proxy scheme for tunneling: {scheme!r}, must be either 'http' or 'https'" ) super().set_tunnel(host, port=port, headers=headers) self._tunnel_scheme = scheme def connect(self) -> None: self.sock = self._new_conn() if self._tunnel_host: # If we're tunneling it means we're connected to our proxy. self._has_connected_to_proxy = True # TODO: Fix tunnel so it doesn't depend on self.sock state. self._tunnel() # type: ignore[attr-defined] # If there's a proxy to be connected to we are fully connected. # This is set twice (once above and here) due to forwarding proxies # not using tunnelling. self._has_connected_to_proxy = bool(self.proxy) @property def is_closed(self) -> bool: return self.sock is None @property def is_connected(self) -> bool: if self.sock is None: return False return not wait_for_read(self.sock, timeout=0.0) @property def has_connected_to_proxy(self) -> bool: return self._has_connected_to_proxy def close(self) -> None: try: super().close() finally: # Reset all stateful properties so connection # can be re-used without leaking prior configs. self.sock = None self.is_verified = False self.proxy_is_verified = None self._has_connected_to_proxy = False self._response_options = None self._tunnel_host = None self._tunnel_port = None self._tunnel_scheme = None def putrequest( self, method: str, url: str, skip_host: bool = False, skip_accept_encoding: bool = False, ) -> None: """""" # Empty docstring because the indentation of CPython's implementation # is broken but we don't want this method in our documentation. match = _CONTAINS_CONTROL_CHAR_RE.search(method) if match: raise ValueError( f"Method cannot contain non-token characters {method!r} (found at least {match.group()!r})" ) return super().putrequest( method, url, skip_host=skip_host, skip_accept_encoding=skip_accept_encoding ) def putheader(self, header: str, *values: str) -> None: """""" if not any(isinstance(v, str) and v == SKIP_HEADER for v in values): super().putheader(header, *values) elif to_str(header.lower()) not in SKIPPABLE_HEADERS: skippable_headers = "', '".join( [str.title(header) for header in sorted(SKIPPABLE_HEADERS)] ) raise ValueError( f"urllib3.util.SKIP_HEADER only supports '{skippable_headers}'" ) # `request` method's signature intentionally violates LSP. # urllib3's API is different from `http.client.HTTPConnection` and the subclassing is only incidental. def request( # type: ignore[override] self, method: str, url: str, body: _TYPE_BODY | None = None, headers: typing.Mapping[str, str] | None = None, *, chunked: bool = False, preload_content: bool = True, decode_content: bool = True, enforce_content_length: bool = True, ) -> None: # Update the inner socket's timeout value to send the request. # This only triggers if the connection is re-used. if self.sock is not None: self.sock.settimeout(self.timeout) # Store these values to be fed into the HTTPResponse # object later. TODO: Remove this in favor of a real # HTTP lifecycle mechanism. # We have to store these before we call .request() # because sometimes we can still salvage a response # off the wire even if we aren't able to completely # send the request body. self._response_options = _ResponseOptions( request_method=method, request_url=url, preload_content=preload_content, decode_content=decode_content, enforce_content_length=enforce_content_length, ) if headers is None: headers = {} header_keys = frozenset(to_str(k.lower()) for k in headers) skip_accept_encoding = "accept-encoding" in header_keys skip_host = "host" in header_keys self.putrequest( method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host ) # Transform the body into an iterable of sendall()-able chunks # and detect if an explicit Content-Length is doable. chunks_and_cl = body_to_chunks(body, method=method, blocksize=self.blocksize) chunks = chunks_and_cl.chunks content_length = chunks_and_cl.content_length # When chunked is explicit set to 'True' we respect that. if chunked: if "transfer-encoding" not in header_keys: self.putheader("Transfer-Encoding", "chunked") else: # Detect whether a framing mechanism is already in use. If so # we respect that value, otherwise we pick chunked vs content-length # depending on the type of 'body'. if "content-length" in header_keys: chunked = False elif "transfer-encoding" in header_keys: chunked = True # Otherwise we go off the recommendation of 'body_to_chunks()'. else: chunked = False if content_length is None: if chunks is not None: chunked = True self.putheader("Transfer-Encoding", "chunked") else: self.putheader("Content-Length", str(content_length)) # Now that framing headers are out of the way we send all the other headers. if "user-agent" not in header_keys: self.putheader("User-Agent", _get_default_user_agent()) for header, value in headers.items(): self.putheader(header, value) self.endheaders() # If we're given a body we start sending that in chunks. if chunks is not None: for chunk in chunks: # Sending empty chunks isn't allowed for TE: chunked # as it indicates the end of the body. if not chunk: continue if isinstance(chunk, str): chunk = chunk.encode("utf-8") if chunked: self.send(b"%x\r\n%b\r\n" % (len(chunk), chunk)) else: self.send(chunk) # Regardless of whether we have a body or not, if we're in # chunked mode we want to send an explicit empty chunk. if chunked: self.send(b"0\r\n\r\n") def request_chunked( self, method: str, url: str, body: _TYPE_BODY | None = None, headers: typing.Mapping[str, str] | None = None, ) -> None: """ Alternative to the common request method, which sends the body with chunked encoding and not as one block """ warnings.warn( "HTTPConnection.request_chunked() is deprecated and will be removed " "in urllib3 v2.1.0. Instead use HTTPConnection.request(..., chunked=True).", category=DeprecationWarning, stacklevel=2, ) self.request(method, url, body=body, headers=headers, chunked=True) def getresponse( # type: ignore[override] self, ) -> HTTPResponse: """ Get the response from the server. If the HTTPConnection is in the correct state, returns an instance of HTTPResponse or of whatever object is returned by the response_class variable. If a request has not been sent or if a previous response has not be handled, ResponseNotReady is raised. If the HTTP response indicates that the connection should be closed, then it will be closed before the response is returned. When the connection is closed, the underlying socket is closed. """ # Raise the same error as http.client.HTTPConnection if self._response_options is None: raise ResponseNotReady() # Reset this attribute for being used again. resp_options = self._response_options self._response_options = None # Since the connection's timeout value may have been updated # we need to set the timeout on the socket. self.sock.settimeout(self.timeout) # This is needed here to avoid circular import errors # Get the response from http.client.HTTPConnection httplib_response = super().getresponse() try: assert_header_parsing(httplib_response.msg) except (HeaderParsingError, TypeError) as hpe: log.warning( "Failed to parse headers (url=%s): %s", _url_from_connection(self, resp_options.request_url), hpe, exc_info=True, ) headers = HTTPHeaderDict(httplib_response.msg.items()) response = HTTPResponse( body=httplib_response, headers=headers, status=httplib_response.status, version=httplib_response.version, reason=httplib_response.reason, preload_content=resp_options.preload_content, decode_content=resp_options.decode_content, original_response=httplib_response, enforce_content_length=resp_options.enforce_content_length, request_method=resp_options.request_method, request_url=resp_options.request_url, ) return response class HTTPSConnection(HTTPConnection): """ Many of the parameters to this constructor are passed to the underlying SSL socket by means of :py:func:`urllib3.util.ssl_wrap_socket`. """ default_port = port_by_scheme["https"] # type: ignore[misc] cert_reqs: int | str | None = None ca_certs: str | None = None ca_cert_dir: str | None = None ca_cert_data: None | str | bytes = None ssl_version: int | str | None = None ssl_minimum_version: int | None = None ssl_maximum_version: int | None = None assert_fingerprint: str | None = None def __init__( self, host: str, port: int | None = None, *, timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, source_address: tuple[str, int] | None = None, blocksize: int = 16384, socket_options: None | (connection._TYPE_SOCKET_OPTIONS) = HTTPConnection.default_socket_options, proxy: Url | None = None, proxy_config: ProxyConfig | None = None, cert_reqs: int | str | None = None, assert_hostname: None | str | Literal[False] = None, assert_fingerprint: str | None = None, server_hostname: str | None = None, ssl_context: ssl.SSLContext | None = None, ca_certs: str | None = None, ca_cert_dir: str | None = None, ca_cert_data: None | str | bytes = None, ssl_minimum_version: int | None = None, ssl_maximum_version: int | None = None, ssl_version: int | str | None = None, # Deprecated cert_file: str | None = None, key_file: str | None = None, key_password: str | None = None, ) -> None: super().__init__( host, port=port, timeout=timeout, source_address=source_address, blocksize=blocksize, socket_options=socket_options, proxy=proxy, proxy_config=proxy_config, ) self.key_file = key_file self.cert_file = cert_file self.key_password = key_password self.ssl_context = ssl_context self.server_hostname = server_hostname self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint self.ssl_version = ssl_version self.ssl_minimum_version = ssl_minimum_version self.ssl_maximum_version = ssl_maximum_version self.ca_certs = ca_certs and os.path.expanduser(ca_certs) self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir) self.ca_cert_data = ca_cert_data # cert_reqs depends on ssl_context so calculate last. if cert_reqs is None: if self.ssl_context is not None: cert_reqs = self.ssl_context.verify_mode else: cert_reqs = resolve_cert_reqs(None) self.cert_reqs = cert_reqs def set_cert( self, key_file: str | None = None, cert_file: str | None = None, cert_reqs: int | str | None = None, key_password: str | None = None, ca_certs: str | None = None, assert_hostname: None | str | Literal[False] = None, assert_fingerprint: str | None = None, ca_cert_dir: str | None = None, ca_cert_data: None | str | bytes = None, ) -> None: """ This method should only be called once, before the connection is used. """ warnings.warn( "HTTPSConnection.set_cert() is deprecated and will be removed " "in urllib3 v2.1.0. Instead provide the parameters to the " "HTTPSConnection constructor.", category=DeprecationWarning, stacklevel=2, ) # If cert_reqs is not provided we'll assume CERT_REQUIRED unless we also # have an SSLContext object in which case we'll use its verify_mode. if cert_reqs is None: if self.ssl_context is not None: cert_reqs = self.ssl_context.verify_mode else: cert_reqs = resolve_cert_reqs(None) self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs self.key_password = key_password self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint self.ca_certs = ca_certs and os.path.expanduser(ca_certs) self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir) self.ca_cert_data = ca_cert_data def connect(self) -> None: sock: socket.socket | ssl.SSLSocket self.sock = sock = self._new_conn() server_hostname: str = self.host tls_in_tls = False # Do we need to establish a tunnel? if self._tunnel_host is not None: # We're tunneling to an HTTPS origin so need to do TLS-in-TLS. if self._tunnel_scheme == "https": self.sock = sock = self._connect_tls_proxy(self.host, sock) tls_in_tls = True # If we're tunneling it means we're connected to our proxy. self._has_connected_to_proxy = True self._tunnel() # type: ignore[attr-defined] # Override the host with the one we're requesting data from. server_hostname = self._tunnel_host if self.server_hostname is not None: server_hostname = self.server_hostname is_time_off = datetime.date.today() < RECENT_DATE if is_time_off: warnings.warn( ( f"System time is way off (before {RECENT_DATE}). This will probably " "lead to SSL verification errors" ), SystemTimeWarning, ) sock_and_verified = _ssl_wrap_socket_and_match_hostname( sock=sock, cert_reqs=self.cert_reqs, ssl_version=self.ssl_version, ssl_minimum_version=self.ssl_minimum_version, ssl_maximum_version=self.ssl_maximum_version, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, ca_cert_data=self.ca_cert_data, cert_file=self.cert_file, key_file=self.key_file, key_password=self.key_password, server_hostname=server_hostname, ssl_context=self.ssl_context, tls_in_tls=tls_in_tls, assert_hostname=self.assert_hostname, assert_fingerprint=self.assert_fingerprint, ) self.sock = sock_and_verified.socket self.is_verified = sock_and_verified.is_verified # If there's a proxy to be connected to we are fully connected. # This is set twice (once above and here) due to forwarding proxies # not using tunnelling. self._has_connected_to_proxy = bool(self.proxy) def _connect_tls_proxy(self, hostname: str, sock: socket.socket) -> ssl.SSLSocket: """ Establish a TLS connection to the proxy using the provided SSL context. """ # `_connect_tls_proxy` is called when self._tunnel_host is truthy. proxy_config = typing.cast(ProxyConfig, self.proxy_config) ssl_context = proxy_config.ssl_context sock_and_verified = _ssl_wrap_socket_and_match_hostname( sock, cert_reqs=self.cert_reqs, ssl_version=self.ssl_version, ssl_minimum_version=self.ssl_minimum_version, ssl_maximum_version=self.ssl_maximum_version, ca_certs=self.ca_certs, ca_cert_dir=self.ca_cert_dir, ca_cert_data=self.ca_cert_data, server_hostname=hostname, ssl_context=ssl_context, assert_hostname=proxy_config.assert_hostname, assert_fingerprint=proxy_config.assert_fingerprint, # Features that aren't implemented for proxies yet: cert_file=None, key_file=None, key_password=None, tls_in_tls=False, ) self.proxy_is_verified = sock_and_verified.is_verified return sock_and_verified.socket # type: ignore[return-value] class _WrappedAndVerifiedSocket(typing.NamedTuple): """ Wrapped socket and whether the connection is verified after the TLS handshake """ socket: ssl.SSLSocket | SSLTransport is_verified: bool def _ssl_wrap_socket_and_match_hostname( sock: socket.socket, *, cert_reqs: None | str | int, ssl_version: None | str | int, ssl_minimum_version: int | None, ssl_maximum_version: int | None, cert_file: str | None, key_file: str | None, key_password: str | None, ca_certs: str | None, ca_cert_dir: str | None, ca_cert_data: None | str | bytes, assert_hostname: None | str | Literal[False], assert_fingerprint: str | None, server_hostname: str | None, ssl_context: ssl.SSLContext | None, tls_in_tls: bool = False, ) -> _WrappedAndVerifiedSocket: """Logic for constructing an SSLContext from all TLS parameters, passing that down into ssl_wrap_socket, and then doing certificate verification either via hostname or fingerprint. This function exists to guarantee that both proxies and targets have the same behavior when connecting via TLS. """ default_ssl_context = False if ssl_context is None: default_ssl_context = True context = create_urllib3_context( ssl_version=resolve_ssl_version(ssl_version), ssl_minimum_version=ssl_minimum_version, ssl_maximum_version=ssl_maximum_version, cert_reqs=resolve_cert_reqs(cert_reqs), ) else: context = ssl_context context.verify_mode = resolve_cert_reqs(cert_reqs) # In some cases, we want to verify hostnames ourselves if ( # `ssl` can't verify fingerprints or alternate hostnames assert_fingerprint or assert_hostname # assert_hostname can be set to False to disable hostname checking or assert_hostname is False # We still support OpenSSL 1.0.2, which prevents us from verifying # hostnames easily: https://github.com/pyca/pyopenssl/pull/933
or ssl_.IS_PYOPENSSL
18
2023-12-16 04:12:01+00:00
24k
YaoFANGUK/video-subtitle-remover
backend/scenedetect/scene_manager.py
[ { "identifier": "SimpleTableCell", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableCell(object):\n \"\"\"A table class to create table cells.\n\n Example:\n cell = SimpleTableCell('Hello, world!')\n \"\"\"\n\n def __init__(self, text, header=False):\n \"\"\"Table cell constructor.\n\n Keyword arguments:\n text -- text to be displayed\n header -- flag to indicate this cell is a header cell.\n \"\"\"\n self.text = text\n self.header = header\n\n def __str__(self):\n \"\"\"Return the HTML code for the table cell.\"\"\"\n if self.header:\n return '<th>%s</th>' % (self.text)\n else:\n return '<td>%s</td>' % (self.text)" }, { "identifier": "SimpleTableImage", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableImage(object):\n \"\"\"A table class to create table cells with an image.\n\n Example:\n cell = SimpleTableImage('images/image_1.jpg')\n \"\"\"\n\n def __init__(self, image_file, width=None, height=None):\n \"\"\"Table cell constructor.\n\n Keyword arguments:\n image_file -- relative filepath to image file to display.\n width -- (optional) width of the image in pixels\n height -- (optional) height of the image in pixels\n \"\"\"\n self.image_file = image_file\n if width:\n self.width = round(width)\n else:\n self.width = width\n if height:\n self.height = round(height)\n else:\n self.height = height\n\n def __str__(self):\n \"\"\"Return the HTML code for the table cell with the image.\"\"\"\n safe_filename = quote(self.image_file)\n output = '<a href=\"%s\" target=\"_blank\">' % (safe_filename)\n output += '<img src=\"%s\"' % (safe_filename)\n if self.height:\n output += ' height=\"%s\"' % (self.height)\n if self.width:\n output += ' width=\"%s\"' % (self.width)\n output += '></a>'\n\n return output" }, { "identifier": "SimpleTableRow", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableRow(object):\n \"\"\"A table class to create table rows, populated by table cells.\n\n Example:\n # Row from list\n row = SimpleTableRow(['Hello,', 'world!'])\n\n # Row from SimpleTableCell\n cell1 = SimpleTableCell('Hello,')\n cell2 = SimpleTableCell('world!')\n row = SimpleTableRow([cell1, cell2])\n \"\"\"\n\n def __init__(self, cells=None, header=False):\n \"\"\"Table row constructor.\n\n Keyword arguments:\n cells -- iterable of SimpleTableCell (default None)\n header -- flag to indicate this row is a header row.\n if the cells are SimpleTableCell, it is the programmer's\n responsibility to verify whether it was created with the\n header flag set to True.\n \"\"\"\n cells = cells or []\n if isinstance(cells[0], SimpleTableCell):\n self.cells = cells\n else:\n self.cells = [SimpleTableCell(cell, header=header) for cell in cells]\n\n self.header = header\n\n def __str__(self):\n \"\"\"Return the HTML code for the table row and its cells as a string.\"\"\"\n row = []\n\n row.append('<tr>')\n\n for cell in self.cells:\n row.append(str(cell))\n\n row.append('</tr>')\n\n return '\\n'.join(row)\n\n def __iter__(self):\n \"\"\"Iterate through row cells\"\"\"\n for cell in self.cells:\n yield cell\n\n def add_cell(self, cell):\n \"\"\"Add a SimpleTableCell object to the list of cells.\"\"\"\n self.cells.append(cell)\n\n def add_cells(self, cells):\n \"\"\"Add a list of SimpleTableCell objects to the list of cells.\"\"\"\n for cell in cells:\n self.cells.append(cell)" }, { "identifier": "SimpleTable", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTable(object):\n \"\"\"A table class to create HTML tables, populated by HTML table rows.\n\n Example:\n # Table from lists\n table = SimpleTable([['Hello,', 'world!'], ['How', 'are', 'you?']])\n\n # Table with header row\n table = SimpleTable([['Hello,', 'world!'], ['How', 'are', 'you?']],\n header_row=['Header1', 'Header2', 'Header3'])\n\n # Table from SimpleTableRow\n rows = SimpleTableRow(['Hello,', 'world!'])\n table = SimpleTable(rows)\n \"\"\"\n\n def __init__(self, rows=None, header_row=None, css_class=None):\n \"\"\"Table constructor.\n\n Keyword arguments:\n rows -- iterable of SimpleTableRow\n header_row -- row that will be displayed at the beginning of the table.\n if this row is SimpleTableRow, it is the programmer's\n responsibility to verify whether it was created with the\n header flag set to True.\n css_class -- table CSS class\n \"\"\"\n rows = rows or []\n if isinstance(rows[0], SimpleTableRow):\n self.rows = rows\n else:\n self.rows = [SimpleTableRow(row) for row in rows]\n\n if header_row is None:\n self.header_row = None\n elif isinstance(header_row, SimpleTableRow):\n self.header_row = header_row\n else:\n self.header_row = SimpleTableRow(header_row, header=True)\n\n self.css_class = css_class\n\n def __str__(self):\n \"\"\"Return the HTML code for the table as a string.\"\"\"\n table = []\n\n if self.css_class:\n table.append('<table class=%s>' % self.css_class)\n else:\n table.append('<table>')\n\n if self.header_row:\n table.append(str(self.header_row))\n\n for row in self.rows:\n table.append(str(row))\n\n table.append('</table>')\n\n return '\\n'.join(table)\n\n def __iter__(self):\n \"\"\"Iterate through table rows\"\"\"\n for row in self.rows:\n yield row\n\n def add_row(self, row):\n \"\"\"Add a SimpleTableRow object to the list of rows.\"\"\"\n self.rows.append(row)\n\n def add_rows(self, rows):\n \"\"\"Add a list of SimpleTableRow objects to the list of rows.\"\"\"\n for row in rows:\n self.rows.append(row)" }, { "identifier": "HTMLPage", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class HTMLPage(object):\n \"\"\"A class to create HTML pages containing CSS and tables.\"\"\"\n\n def __init__(self, tables=None, css=None, encoding=\"utf-8\"):\n \"\"\"HTML page constructor.\n\n Keyword arguments:\n tables -- List of SimpleTable objects\n css -- Cascading Style Sheet specification that is appended before the\n table string\n encoding -- Characters encoding. Default: UTF-8\n \"\"\"\n self.tables = tables or []\n self.css = css\n self.encoding = encoding\n\n def __str__(self):\n \"\"\"Return the HTML page as a string.\"\"\"\n page = []\n\n if self.css:\n page.append('<style type=\"text/css\">\\n%s\\n</style>' % self.css)\n\n # Set encoding\n page.append('<meta http-equiv=\"Content-Type\" content=\"text/html;'\n 'charset=%s\">' % self.encoding)\n\n for table in self.tables:\n page.append(str(table))\n page.append('<br />')\n\n return '\\n'.join(page)\n\n def __iter__(self):\n \"\"\"Iterate through tables\"\"\"\n for table in self.tables:\n yield table\n\n def save(self, filename):\n \"\"\"Save HTML page to a file using the proper encoding\"\"\"\n with codecs.open(filename, 'w', self.encoding) as outfile:\n for line in str(self):\n outfile.write(line)\n\n def add_table(self, table):\n \"\"\"Add a SimpleTable to the page list of tables\"\"\"\n self.tables.append(table)" }, { "identifier": "tqdm", "path": "backend/scenedetect/platform.py", "snippet": "class FakeTqdmObject:\nclass FakeTqdmLoggingRedirect:\nclass CommandTooLong(Exception):\nclass Template(string.Template):\n def __init__(self, **kawrgs):\n def update(self, n=1):\n def close(self):\n def set_description(self, desc=None, refresh=True):\n def __init__(self, **kawrgs):\n def __enter__(self):\n def __exit__(self, type, value, traceback):\ndef get_cv2_imwrite_params() -> Dict[str, Union[int, None]]:\n def _get_cv2_param(param_name: str) -> Union[int, None]:\ndef get_file_name(file_path: AnyStr, include_extension=True) -> AnyStr:\ndef get_and_create_path(file_path: AnyStr, output_directory: Optional[AnyStr] = None) -> AnyStr:\ndef init_logger(log_level: int = logging.INFO,\n show_stdout: bool = False,\n log_file: Optional[str] = None):\ndef invoke_command(args: List[str]) -> int:\ndef get_ffmpeg_path() -> Optional[str]:\ndef get_ffmpeg_version() -> Optional[str]:\ndef get_mkvmerge_version() -> Optional[str]:\ndef get_system_version_info() -> str:\n INFO_TEMPLATE = '[PySceneDetect] %(message)s'\n DEBUG_TEMPLATE = '%(levelname)s: %(module)s.%(funcName)s(): %(message)s'" }, { "identifier": "FrameTimecode", "path": "backend/scenedetect/frame_timecode.py", "snippet": "class FrameTimecode:\n \"\"\"Object for frame-based timecodes, using the video framerate to compute back and\n forth between frame number and seconds/timecode.\n\n A timecode is valid only if it complies with one of the following three types/formats:\n\n 1. Timecode as `str` in the form 'HH:MM:SS[.nnn]' (`'01:23:45'` or `'01:23:45.678'`)\n 2. Number of seconds as `float`, or `str` in form 'Ss' or 'S.SSSs' (`'2s'` or `'2.3456s'`)\n 3. Exact number of frames as `int`, or `str` in form NNNNN (`123` or `'123'`)\n \"\"\"\n\n def __init__(self,\n timecode: Union[int, float, str, 'FrameTimecode'] = None,\n fps: Union[int, float, str, 'FrameTimecode'] = None):\n \"\"\"\n Arguments:\n timecode: A frame number (int), number of seconds (float), or timecode (str in\n the form `'HH:MM:SS'` or `'HH:MM:SS.nnn'`).\n fps: The framerate or FrameTimecode to use as a time base for all arithmetic.\n Raises:\n TypeError: Thrown if either `timecode` or `fps` are unsupported types.\n ValueError: Thrown when specifying a negative timecode or framerate.\n \"\"\"\n # The following two properties are what is used to keep track of time\n # in a frame-specific manner. Note that once the framerate is set,\n # the value should never be modified (only read if required).\n # TODO(v1.0): Make these actual @properties.\n self.framerate = None\n self.frame_num = None\n\n # Copy constructor. Only the timecode argument is used in this case.\n if isinstance(timecode, FrameTimecode):\n self.framerate = timecode.framerate\n self.frame_num = timecode.frame_num\n if fps is not None:\n raise TypeError('Framerate cannot be overwritten when copying a FrameTimecode.')\n else:\n # Ensure other arguments are consistent with API.\n if fps is None:\n raise TypeError('Framerate (fps) is a required argument.')\n if isinstance(fps, FrameTimecode):\n fps = fps.framerate\n\n # Process the given framerate, if it was not already set.\n if not isinstance(fps, (int, float)):\n raise TypeError('Framerate must be of type int/float.')\n if (isinstance(fps, int) and not fps > 0) or (isinstance(fps, float)\n and not fps >= MAX_FPS_DELTA):\n raise ValueError('Framerate must be positive and greater than zero.')\n self.framerate = float(fps)\n\n # Process the timecode value, storing it as an exact number of frames.\n if isinstance(timecode, str):\n self.frame_num = self._parse_timecode_string(timecode)\n else:\n self.frame_num = self._parse_timecode_number(timecode)\n\n # TODO(v1.0): Add a `frame` property to replace the existing one and deprecate this getter.\n def get_frames(self) -> int:\n \"\"\"Get the current time/position in number of frames. This is the\n equivalent of accessing the self.frame_num property (which, along\n with the specified framerate, forms the base for all of the other\n time measurement calculations, e.g. the :meth:`get_seconds` method).\n\n If using to compare a :class:`FrameTimecode` with a frame number,\n you can do so directly against the object (e.g. ``FrameTimecode(10, 10.0) <= 10``).\n\n Returns:\n int: The current time in frames (the current frame number).\n \"\"\"\n return self.frame_num\n\n # TODO(v1.0): Add a `framerate` property to replace the existing one and deprecate this getter.\n def get_framerate(self) -> float:\n \"\"\"Get Framerate: Returns the framerate used by the FrameTimecode object.\n\n Returns:\n float: Framerate of the current FrameTimecode object, in frames per second.\n \"\"\"\n return self.framerate\n\n def equal_framerate(self, fps) -> bool:\n \"\"\"Equal Framerate: Determines if the passed framerate is equal to that of this object.\n\n Arguments:\n fps: Framerate to compare against within the precision constant defined in this module\n (see :data:`MAX_FPS_DELTA`).\n Returns:\n bool: True if passed fps matches the FrameTimecode object's framerate, False otherwise.\n\n \"\"\"\n return math.fabs(self.framerate - fps) < MAX_FPS_DELTA\n\n # TODO(v1.0): Add a `seconds` property to replace this and deprecate the existing one.\n def get_seconds(self) -> float:\n \"\"\"Get the frame's position in number of seconds.\n\n If using to compare a :class:`FrameTimecode` with a frame number,\n you can do so directly against the object (e.g. ``FrameTimecode(10, 10.0) <= 1.0``).\n\n Returns:\n float: The current time/position in seconds.\n \"\"\"\n return float(self.frame_num) / self.framerate\n\n # TODO(v1.0): Add a `timecode` property to replace this and deprecate the existing one.\n def get_timecode(self, precision: int = 3, use_rounding: bool = True) -> str:\n \"\"\"Get a formatted timecode string of the form HH:MM:SS[.nnn].\n\n Args:\n precision: The number of decimal places to include in the output ``[.nnn]``.\n use_rounding: Rounds the output to the desired precision. If False, the value\n will be truncated to the specified precision.\n\n Returns:\n str: The current time in the form ``\"HH:MM:SS[.nnn]\"``.\n \"\"\"\n # Compute hours and minutes based off of seconds, and update seconds.\n secs = self.get_seconds()\n base = 60.0 * 60.0\n hrs = int(secs / base)\n secs -= (hrs * base)\n base = 60.0\n mins = int(secs / base)\n secs -= (mins * base)\n # Convert seconds into string based on required precision.\n if precision > 0:\n if use_rounding:\n secs = round(secs, precision)\n msec = format(secs, '.%df' % precision)[-precision:]\n secs = '%02d.%s' % (int(secs), msec)\n else:\n secs = '%02d' % int(round(secs, 0)) if use_rounding else '%02d' % int(secs)\n # Return hours, minutes, and seconds as a formatted timecode string.\n return '%02d:%02d:%s' % (hrs, mins, secs)\n\n # TODO(v1.0): Add a `previous` property to replace the existing one and deprecate this getter.\n def previous_frame(self) -> 'FrameTimecode':\n \"\"\"Return a new FrameTimecode for the previous frame (or 0 if on frame 0).\"\"\"\n new_timecode = FrameTimecode(self)\n new_timecode.frame_num = max(0, new_timecode.frame_num - 1)\n return new_timecode\n\n def _seconds_to_frames(self, seconds: float) -> int:\n \"\"\"Convert the passed value seconds to the nearest number of frames using\n the current FrameTimecode object's FPS (self.framerate).\n\n Returns:\n Integer number of frames the passed number of seconds represents using\n the current FrameTimecode's framerate property.\n \"\"\"\n return round(seconds * self.framerate)\n\n def _parse_timecode_number(self, timecode: Union[int, float]) -> int:\n \"\"\" Parse a timecode number, storing it as the exact number of frames.\n Can be passed as frame number (int), seconds (float)\n\n Raises:\n TypeError, ValueError\n \"\"\"\n # Process the timecode value, storing it as an exact number of frames.\n # Exact number of frames N\n if isinstance(timecode, int):\n if timecode < 0:\n raise ValueError('Timecode frame number must be positive and greater than zero.')\n return timecode\n # Number of seconds S\n elif isinstance(timecode, float):\n if timecode < 0.0:\n raise ValueError('Timecode value must be positive and greater than zero.')\n return self._seconds_to_frames(timecode)\n # FrameTimecode\n elif isinstance(timecode, FrameTimecode):\n return timecode.frame_num\n elif timecode is None:\n raise TypeError('Timecode/frame number must be specified!')\n else:\n raise TypeError('Timecode format/type unrecognized.')\n\n def _parse_timecode_string(self, timecode_string: str) -> int:\n \"\"\"Parses a string based on the three possible forms (in timecode format,\n as an integer number of frames, or floating-point seconds, ending with 's').\n\n Requires that the `framerate` property is set before calling this method.\n Assuming a framerate of 30.0 FPS, the strings '00:05:00.000', '00:05:00',\n '9000', '300s', and '300.0s' are all possible valid values, all representing\n a period of time equal to 5 minutes, 300 seconds, or 9000 frames (at 30 FPS).\n\n Raises:\n TypeError, ValueError\n \"\"\"\n if self.framerate is None:\n raise TypeError('self.framerate must be set before calling _parse_timecode_string.')\n # Number of seconds S\n if timecode_string.endswith('s'):\n secs = timecode_string[:-1]\n if not secs.replace('.', '').isdigit():\n raise ValueError('All characters in timecode seconds string must be digits.')\n secs = float(secs)\n if secs < 0.0:\n raise ValueError('Timecode seconds value must be positive.')\n return self._seconds_to_frames(secs)\n # Exact number of frames N\n elif timecode_string.isdigit():\n timecode = int(timecode_string)\n if timecode < 0:\n raise ValueError('Timecode frame number must be positive.')\n return timecode\n # Standard timecode in string format 'HH:MM:SS[.nnn]'\n else:\n tc_val = timecode_string.split(':')\n if not (len(tc_val) == 3 and tc_val[0].isdigit() and tc_val[1].isdigit()\n and tc_val[2].replace('.', '').isdigit()):\n raise ValueError('Unrecognized or improperly formatted timecode string.')\n hrs, mins = int(tc_val[0]), int(tc_val[1])\n secs = float(tc_val[2]) if '.' in tc_val[2] else int(tc_val[2])\n if not (hrs >= 0 and mins >= 0 and secs >= 0 and mins < 60 and secs < 60):\n raise ValueError('Invalid timecode range (values outside allowed range).')\n secs += (((hrs * 60.0) + mins) * 60.0)\n return self._seconds_to_frames(secs)\n\n def __iadd__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n self.frame_num += other\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n self.frame_num += other.frame_num\n else:\n raise ValueError('FrameTimecode instances require equal framerate for addition.')\n # Check if value to add is in number of seconds.\n elif isinstance(other, float):\n self.frame_num += self._seconds_to_frames(other)\n elif isinstance(other, str):\n self.frame_num += self._parse_timecode_string(other)\n else:\n raise TypeError('Unsupported type for performing addition with FrameTimecode.')\n if self.frame_num < 0: # Required to allow adding negative seconds/frames.\n self.frame_num = 0\n return self\n\n def __add__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n to_return = FrameTimecode(timecode=self)\n to_return += other\n return to_return\n\n def __isub__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n self.frame_num -= other\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n self.frame_num -= other.frame_num\n else:\n raise ValueError('FrameTimecode instances require equal framerate for subtraction.')\n # Check if value to add is in number of seconds.\n elif isinstance(other, float):\n self.frame_num -= self._seconds_to_frames(other)\n elif isinstance(other, str):\n self.frame_num -= self._parse_timecode_string(other)\n else:\n raise TypeError('Unsupported type for performing subtraction with FrameTimecode: %s' %\n type(other))\n if self.frame_num < 0:\n self.frame_num = 0\n return self\n\n def __sub__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n to_return = FrameTimecode(timecode=self)\n to_return -= other\n return to_return\n\n def __eq__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n return self.frame_num == other\n elif isinstance(other, float):\n return self.get_seconds() == other\n elif isinstance(other, str):\n return self.frame_num == self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num == other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n elif other is None:\n return False\n else:\n raise TypeError('Unsupported type for performing == with FrameTimecode: %s' %\n type(other))\n\n def __ne__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n return not self == other\n\n def __lt__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num < other\n elif isinstance(other, float):\n return self.get_seconds() < other\n elif isinstance(other, str):\n return self.frame_num < self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num < other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing < with FrameTimecode: %s' %\n type(other))\n\n def __le__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num <= other\n elif isinstance(other, float):\n return self.get_seconds() <= other\n elif isinstance(other, str):\n return self.frame_num <= self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num <= other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing <= with FrameTimecode: %s' %\n type(other))\n\n def __gt__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num > other\n elif isinstance(other, float):\n return self.get_seconds() > other\n elif isinstance(other, str):\n return self.frame_num > self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num > other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing > with FrameTimecode: %s' %\n type(other))\n\n def __ge__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num >= other\n elif isinstance(other, float):\n return self.get_seconds() >= other\n elif isinstance(other, str):\n return self.frame_num >= self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num >= other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing >= with FrameTimecode: %s' %\n type(other))\n\n # TODO(v1.0): __int__ and __float__ should be removed. Mark as deprecated, and indicate\n # need to use relevant property instead.\n\n def __int__(self) -> int:\n return self.frame_num\n\n def __float__(self) -> float:\n return self.get_seconds()\n\n def __str__(self) -> str:\n return self.get_timecode()\n\n def __repr__(self) -> str:\n return '%s [frame=%d, fps=%.3f]' % (self.get_timecode(), self.frame_num, self.framerate)\n\n def __hash__(self) -> int:\n return self.frame_num" }, { "identifier": "VideoStream", "path": "backend/scenedetect/video_stream.py", "snippet": "class VideoStream(ABC):\n \"\"\" Interface which all video backends must implement. \"\"\"\n\n #\n # Default Implementations\n #\n\n @property\n def base_timecode(self) -> FrameTimecode:\n \"\"\"FrameTimecode object to use as a time base.\"\"\"\n return FrameTimecode(timecode=0, fps=self.frame_rate)\n\n #\n # Abstract Static Methods\n #\n\n @staticmethod\n @abstractmethod\n def BACKEND_NAME() -> str:\n \"\"\"Unique name used to identify this backend. Should be a static property in derived\n classes (`BACKEND_NAME = 'backend_identifier'`).\"\"\"\n raise NotImplementedError\n\n #\n # Abstract Properties\n #\n\n @property\n @abstractmethod\n def path(self) -> Union[bytes, str]:\n \"\"\"Video or device path.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def name(self) -> Union[bytes, str]:\n \"\"\"Name of the video, without extension, or device.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def is_seekable(self) -> bool:\n \"\"\"True if seek() is allowed, False otherwise.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_rate(self) -> float:\n \"\"\"Frame rate in frames/sec.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def duration(self) -> Optional[FrameTimecode]:\n \"\"\"Duration of the stream as a FrameTimecode, or None if non terminating.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_size(self) -> Tuple[int, int]:\n \"\"\"Size of each video frame in pixels as a tuple of (width, height).\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def aspect_ratio(self) -> float:\n \"\"\"Pixel aspect ratio as a float (1.0 represents square pixels).\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def position(self) -> FrameTimecode:\n \"\"\"Current position within stream as FrameTimecode.\n\n This can be interpreted as presentation time stamp, thus frame 1 corresponds\n to the presentation time 0. Returns 0 even if `frame_number` is 1.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def position_ms(self) -> float:\n \"\"\"Current position within stream as a float of the presentation time in\n milliseconds. The first frame has a PTS of 0.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_number(self) -> int:\n \"\"\"Current position within stream as the frame number.\n\n Will return 0 until the first frame is `read`.\"\"\"\n raise NotImplementedError\n\n #\n # Abstract Methods\n #\n\n @abstractmethod\n def read(self, decode: bool = True, advance: bool = True) -> Union[ndarray, bool]:\n \"\"\"Read and decode the next frame as a numpy.ndarray. Returns False when video ends.\n\n Arguments:\n decode: Decode and return the frame.\n advance: Seek to the next frame. If False, will return the current (last) frame.\n\n Returns:\n If decode = True, the decoded frame (numpy.ndarray), or False (bool) if end of video.\n If decode = False, a bool indicating if advancing to the the next frame succeeded.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def reset(self) -> None:\n \"\"\" Close and re-open the VideoStream (equivalent to seeking back to beginning). \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def seek(self, target: Union[FrameTimecode, float, int]) -> None:\n \"\"\"Seek to the given timecode. If given as a frame number, represents the current seek\n pointer (e.g. if seeking to 0, the next frame decoded will be the first frame of the video).\n\n For 1-based indices (first frame is frame #1), the target frame number needs to be converted\n to 0-based by subtracting one. For example, if we want to seek to the first frame, we call\n seek(0) followed by read(). If we want to seek to the 5th frame, we call seek(4) followed\n by read(), at which point frame_number will be 5.\n\n May not be supported on all backend types or inputs (e.g. cameras).\n\n Arguments:\n target: Target position in video stream to seek to.\n If float, interpreted as time in seconds.\n If int, interpreted as frame number.\n Raises:\n SeekError: An error occurs while seeking, or seeking is not supported.\n ValueError: `target` is not a valid value (i.e. it is negative).\n \"\"\"\n raise NotImplementedError" }, { "identifier": "SceneDetector", "path": "backend/scenedetect/scene_detector.py", "snippet": "class SceneDetector:\n \"\"\" Base class to inherit from when implementing a scene detection algorithm.\n\n This API is not yet stable and subject to change.\n\n This represents a \"dense\" scene detector, which returns a list of frames where\n the next scene/shot begins in a video.\n\n Also see the implemented scene detectors in the scenedetect.detectors module\n to get an idea of how a particular detector can be created.\n \"\"\"\n # TODO(v0.7): Make this a proper abstract base class.\n\n stats_manager: Optional[StatsManager] = None\n \"\"\"Optional :class:`StatsManager <scenedetect.stats_manager.StatsManager>` to\n use for caching frame metrics to and from.\"\"\"\n\n # TODO(v1.0): Remove - this is a rarely used case for what is now a neglegible performance gain.\n def is_processing_required(self, frame_num: int) -> bool:\n \"\"\"[DEPRECATED] DO NOT USE\n\n Test if all calculations for a given frame are already done.\n\n Returns:\n False if the SceneDetector has assigned _metric_keys, and the\n stats_manager property is set to a valid StatsManager object containing\n the required frame metrics/calculations for the given frame - thus, not\n needing the frame to perform scene detection.\n\n True otherwise (i.e. the frame_img passed to process_frame is required\n to be passed to process_frame for the given frame_num).\n \"\"\"\n metric_keys = self.get_metrics()\n return not metric_keys or not (self.stats_manager is not None\n and self.stats_manager.metrics_exist(frame_num, metric_keys))\n\n def stats_manager_required(self) -> bool:\n \"\"\"Stats Manager Required: Prototype indicating if detector requires stats.\n\n Returns:\n True if a StatsManager is required for the detector, False otherwise.\n \"\"\"\n return False\n\n def get_metrics(self) -> List[str]:\n \"\"\"Get Metrics: Get a list of all metric names/keys used by the detector.\n\n Returns:\n List of strings of frame metric key names that will be used by\n the detector when a StatsManager is passed to process_frame.\n \"\"\"\n return []\n\n def process_frame(self, frame_num: int, frame_img: Optional[numpy.ndarray]) -> List[int]:\n \"\"\"Process Frame: Computes/stores metrics and detects any scene changes.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame numbers of cuts to be added to the cutting list.\n \"\"\"\n return []\n\n def post_process(self, frame_num: int) -> List[int]:\n \"\"\"Post Process: Performs any processing after the last frame has been read.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame numbers of cuts to be added to the cutting list.\n \"\"\"\n return []\n\n @property\n def event_buffer_length(self) -> int:\n \"\"\"The amount of frames a given event can be buffered for, in time. Represents maximum\n amount any event can be behind `frame_number` in the result of :meth:`process_frame`.\n \"\"\"\n return 0" }, { "identifier": "SparseSceneDetector", "path": "backend/scenedetect/scene_detector.py", "snippet": "class SparseSceneDetector(SceneDetector):\n \"\"\"Base class to inherit from when implementing a sparse scene detection algorithm.\n\n This class will be removed in v1.0 and should not be used.\n\n Unlike dense detectors, sparse detectors scene_detect \"events\" and return a *pair* of frames,\n as opposed to just a single cut.\n\n An example of a SparseSceneDetector is the MotionDetector.\n \"\"\"\n\n def process_frame(self, frame_num: int, frame_img: numpy.ndarray) -> List[Tuple[int, int]]:\n \"\"\"Process Frame: Computes/stores metrics and detects any scene changes.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame pairs representing individual scenes\n to be added to the output scene list directly.\n \"\"\"\n return []\n\n def post_process(self, frame_num: int) -> List[Tuple[int, int]]:\n \"\"\"Post Process: Performs any processing after the last frame has been read.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame pairs representing individual scenes\n to be added to the output scene list directly.\n \"\"\"\n return []" }, { "identifier": "StatsManager", "path": "backend/scenedetect/stats_manager.py", "snippet": "class StatsManager:\n \"\"\"Provides a key-value store for frame metrics/calculations which can be used\n for two-pass detection algorithms, as well as saving stats to a CSV file.\n\n Analyzing a statistics CSV file is also very useful for finding the optimal\n algorithm parameters for certain detection methods. Additionally, the data\n may be plotted by a graphing module (e.g. matplotlib) by obtaining the\n metric of interest for a series of frames by iteratively calling get_metrics(),\n after having called the detect_scenes(...) method on the SceneManager object\n which owns the given StatsManager instance.\n\n Only metrics consisting of `float` or `int` should be used currently.\n \"\"\"\n\n def __init__(self, base_timecode: FrameTimecode = None):\n \"\"\"Initialize a new StatsManager.\n\n Arguments:\n base_timecode: Timecode associated with this object. Must not be None (default value\n will be removed in a future release).\n \"\"\"\n # Frame metrics is a dict of frame (int): metric_dict (Dict[str, float])\n # of each frame metric key and the value it represents (usually float).\n self._frame_metrics: Dict[FrameTimecode, Dict[str, float]] = dict()\n self._registered_metrics: Set[str] = set() # Set of frame metric keys.\n self._loaded_metrics: Set[str] = set() # Metric keys loaded from stats file.\n self._metrics_updated: bool = False # Flag indicating if metrics require saving.\n self._base_timecode: Optional[FrameTimecode] = base_timecode # Used for timing calculations.\n\n def register_metrics(self, metric_keys: Iterable[str]) -> None:\n \"\"\"Register a list of metric keys that will be used by the detector.\n\n Used to ensure that multiple detector keys don't overlap.\n\n Raises:\n FrameMetricRegistered: A particular metric_key has already been registered/added\n to the StatsManager. Only if the StatsManager is being used for read-only\n access (i.e. all frames in the video have already been processed for the given\n metric_key in the exception) is this behavior desirable.\n \"\"\"\n for metric_key in metric_keys:\n if metric_key not in self._registered_metrics:\n self._registered_metrics.add(metric_key)\n else:\n raise FrameMetricRegistered(metric_key)\n\n # TODO(v1.0): Change frame_number to a FrameTimecode now that it is just a hash and will\n # be required for VFR support.\n def get_metrics(self, frame_number: int, metric_keys: Iterable[str]) -> List[Any]:\n \"\"\"Return the requested statistics/metrics for a given frame.\n\n Arguments:\n frame_number (int): Frame number to retrieve metrics for.\n metric_keys (List[str]): A list of metric keys to look up.\n\n Returns:\n A list containing the requested frame metrics for the given frame number\n in the same order as the input list of metric keys. If a metric could\n not be found, None is returned for that particular metric.\n \"\"\"\n return [self._get_metric(frame_number, metric_key) for metric_key in metric_keys]\n\n def set_metrics(self, frame_number: int, metric_kv_dict: Dict[str, Any]) -> None:\n \"\"\" Set Metrics: Sets the provided statistics/metrics for a given frame.\n\n Arguments:\n frame_number: Frame number to retrieve metrics for.\n metric_kv_dict: A dict mapping metric keys to the\n respective integer/floating-point metric values to set.\n \"\"\"\n for metric_key in metric_kv_dict:\n self._set_metric(frame_number, metric_key, metric_kv_dict[metric_key])\n\n def metrics_exist(self, frame_number: int, metric_keys: Iterable[str]) -> bool:\n \"\"\" Metrics Exist: Checks if the given metrics/stats exist for the given frame.\n\n Returns:\n bool: True if the given metric keys exist for the frame, False otherwise.\n \"\"\"\n return all([self._metric_exists(frame_number, metric_key) for metric_key in metric_keys])\n\n def is_save_required(self) -> bool:\n \"\"\" Is Save Required: Checks if the stats have been updated since loading.\n\n Returns:\n bool: True if there are frame metrics/statistics not yet written to disk,\n False otherwise.\n \"\"\"\n return self._metrics_updated\n\n def save_to_csv(self,\n csv_file: Union[str, bytes, TextIO],\n base_timecode: Optional[FrameTimecode] = None,\n force_save=True) -> None:\n \"\"\" Save To CSV: Saves all frame metrics stored in the StatsManager to a CSV file.\n\n Arguments:\n csv_file: A file handle opened in write mode (e.g. open('...', 'w')) or a path as str.\n base_timecode: [DEPRECATED] DO NOT USE. For backwards compatibility.\n force_save: If True, writes metrics out even if an update is not required.\n\n Raises:\n OSError: If `path` cannot be opened or a write failure occurs.\n \"\"\"\n # TODO(v0.7): Replace with DeprecationWarning that `base_timecode` will be removed in v0.8.\n if base_timecode is not None:\n logger.error('base_timecode is deprecated.')\n\n # Ensure we need to write to the file, and that we have data to do so with.\n if not ((self.is_save_required() or force_save) and self._registered_metrics\n and self._frame_metrics):\n logger.info(\"No metrics to save.\")\n return\n\n assert self._base_timecode is not None\n\n # If we get a path instead of an open file handle, recursively call ourselves\n # again but with file handle instead of path.\n if isinstance(csv_file, (str, bytes)):\n with open(csv_file, 'w') as file:\n self.save_to_csv(csv_file=file, force_save=force_save)\n return\n\n csv_writer = csv.writer(csv_file, lineterminator='\\n')\n metric_keys = sorted(list(self._registered_metrics.union(self._loaded_metrics)))\n csv_writer.writerow([COLUMN_NAME_FRAME_NUMBER, COLUMN_NAME_TIMECODE] + metric_keys)\n frame_keys = sorted(self._frame_metrics.keys())\n logger.info(\"Writing %d frames to CSV...\", len(frame_keys))\n for frame_key in frame_keys:\n frame_timecode = self._base_timecode + frame_key\n csv_writer.writerow(\n [frame_timecode.get_frames() +\n 1, frame_timecode.get_timecode()] +\n [str(metric) for metric in self.get_metrics(frame_key, metric_keys)])\n\n @staticmethod\n def valid_header(row: List[str]) -> bool:\n \"\"\"Check that the given CSV row is a valid header for a statsfile.\n\n Arguments:\n row: A row decoded from the CSV reader.\n\n Returns:\n True if `row` is a valid statsfile header, False otherwise.\n \"\"\"\n if not row or not len(row) >= 2:\n return False\n if row[0] != COLUMN_NAME_FRAME_NUMBER or row[1] != COLUMN_NAME_TIMECODE:\n return False\n return True\n\n # TODO(v1.0): Remove.\n def load_from_csv(self, csv_file: Union[str, bytes, TextIO]) -> Optional[int]:\n \"\"\"[DEPRECATED] DO NOT USE\n\n Load all metrics stored in a CSV file into the StatsManager instance. Will be removed in a\n future release after becoming a no-op.\n\n Arguments:\n csv_file: A file handle opened in read mode (e.g. open('...', 'r')) or a path as str.\n\n Returns:\n int or None: Number of frames/rows read from the CSV file, or None if the\n input file was blank or could not be found.\n\n Raises:\n StatsFileCorrupt: Stats file is corrupt and can't be loaded, or wrong file\n was specified.\n \"\"\"\n # TODO: Make this an error, then make load_from_csv() a no-op, and finally, remove it.\n logger.warning(\"load_from_csv() is deprecated and will be removed in a future release.\")\n\n # If we get a path instead of an open file handle, check that it exists, and if so,\n # recursively call ourselves again but with file set instead of path.\n if isinstance(csv_file, (str, bytes)):\n if os.path.exists(csv_file):\n with open(csv_file, 'r') as file:\n return self.load_from_csv(csv_file=file)\n # Path doesn't exist.\n return None\n\n # If we get here, file is a valid file handle in read-only text mode.\n csv_reader = csv.reader(csv_file, lineterminator='\\n')\n num_cols = None\n num_metrics = None\n num_frames = None\n # First Row: Frame Num, Timecode, [metrics...]\n try:\n row = next(csv_reader)\n # Backwards compatibility for previous versions of statsfile\n # which included an additional header row.\n if not self.valid_header(row):\n row = next(csv_reader)\n except StopIteration:\n # If the file is blank or we couldn't decode anything, assume the file was empty.\n return None\n if not self.valid_header(row):\n raise StatsFileCorrupt()\n num_cols = len(row)\n num_metrics = num_cols - 2\n if not num_metrics > 0:\n raise StatsFileCorrupt('No metrics defined in CSV file.')\n self._loaded_metrics = row[2:]\n num_frames = 0\n for row in csv_reader:\n metric_dict = {}\n if not len(row) == num_cols:\n raise StatsFileCorrupt('Wrong number of columns detected in stats file row.')\n for i, metric_str in enumerate(row[2:]):\n if metric_str and metric_str != 'None':\n try:\n metric_dict[self._loaded_metrics[i]] = float(metric_str)\n except ValueError:\n raise StatsFileCorrupt('Corrupted value in stats file: %s' %\n metric_str) from ValueError\n frame_number = int(row[0])\n # Switch from 1-based to 0-based frame numbers.\n if frame_number > 0:\n frame_number -= 1\n self.set_metrics(frame_number, metric_dict)\n num_frames += 1\n logger.info('Loaded %d metrics for %d frames.', num_metrics, num_frames)\n self._metrics_updated = False\n return num_frames\n\n def _get_metric(self, frame_number: int, metric_key: str) -> Optional[Any]:\n if self._metric_exists(frame_number, metric_key):\n return self._frame_metrics[frame_number][metric_key]\n return None\n\n def _set_metric(self, frame_number: int, metric_key: str, metric_value: Any) -> None:\n self._metrics_updated = True\n if not frame_number in self._frame_metrics:\n self._frame_metrics[frame_number] = dict()\n self._frame_metrics[frame_number][metric_key] = metric_value\n\n def _metric_exists(self, frame_number: int, metric_key: str) -> bool:\n return (frame_number in self._frame_metrics\n and metric_key in self._frame_metrics[frame_number])" }, { "identifier": "FrameMetricRegistered", "path": "backend/scenedetect/stats_manager.py", "snippet": "class FrameMetricRegistered(Exception):\n \"\"\" Raised when attempting to register a frame metric key which has\n already been registered. \"\"\"\n\n def __init__(self,\n metric_key: str,\n message: str = \"Attempted to re-register frame metric key.\"):\n super().__init__(message)\n self.metric_key = metric_key" } ]
import csv import threading import queue import logging import math import sys import cv2 import numpy as np from enum import Enum from typing import Iterable, List, Tuple, Optional, Dict, Callable, Union, TextIO from backend.scenedetect._thirdparty.simpletable import (SimpleTableCell, SimpleTableImage, SimpleTableRow, SimpleTable, HTMLPage) from backend.scenedetect.platform import (tqdm, get_and_create_path, get_cv2_imwrite_params, Template) from backend.scenedetect.frame_timecode import FrameTimecode from backend.scenedetect.video_stream import VideoStream from backend.scenedetect.scene_detector import SceneDetector, SparseSceneDetector from backend.scenedetect.stats_manager import StatsManager, FrameMetricRegistered
14,698
'%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) if image_filenames: for image in image_filenames[i]: row.add_cell( SimpleTableCell( SimpleTableImage(image, width=image_width, height=image_height))) if i == 0: scene_table = SimpleTable(rows=[row], header_row=header_row, css_class=css_class) else: scene_table.add_row(row=row) # Write html file page = HTMLPage() page.add_table(timecode_table) page.add_table(scene_table) page.css = css page.save(output_html_filename) # # TODO(v1.0): Refactor to take a SceneList object; consider moving this and save scene list # to a better spot, or just move them to scene_list.py. # def save_images(scene_list: List[Tuple[FrameTimecode, FrameTimecode]], video: VideoStream, num_images: int = 3, frame_margin: int = 1, image_extension: str = 'jpg', encoder_param: int = 95, image_name_template: str = '$VIDEO_NAME-Scene-$SCENE_NUMBER-$IMAGE_NUMBER', output_dir: Optional[str] = None, show_progress: Optional[bool] = False, scale: Optional[float] = None, height: Optional[int] = None, width: Optional[int] = None, interpolation: Interpolation = Interpolation.CUBIC, video_manager=None) -> Dict[int, List[str]]: """Save a set number of images from each scene, given a list of scenes and the associated video/frame source. Arguments: scene_list: A list of scenes (pairs of FrameTimecode objects) returned from calling a SceneManager's detect_scenes() method. video: A VideoStream object corresponding to the scene list. Note that the video will be closed/re-opened and seeked through. num_images: Number of images to generate for each scene. Minimum is 1. frame_margin: Number of frames to pad each scene around the beginning and end (e.g. moves the first/last image into the scene by N frames). Can set to 0, but will result in some video files failing to extract the very last frame. image_extension: Type of image to save (must be one of 'jpg', 'png', or 'webp'). encoder_param: Quality/compression efficiency, based on type of image: 'jpg' / 'webp': Quality 0-100, higher is better quality. 100 is lossless for webp. 'png': Compression from 1-9, where 9 achieves best filesize but is slower to encode. image_name_template: Template to use when creating the images on disk. Can use the macros $VIDEO_NAME, $SCENE_NUMBER, and $IMAGE_NUMBER. The image extension is applied automatically as per the argument image_extension. output_dir: Directory to output the images into. If not set, the output is created in the working directory. show_progress: If True, shows a progress bar if tqdm is installed. scale: Optional factor by which to rescale saved images. A scaling factor of 1 would not result in rescaling. A value < 1 results in a smaller saved image, while a value > 1 results in an image larger than the original. This value is ignored if either the height or width values are specified. height: Optional value for the height of the saved images. Specifying both the height and width will resize images to an exact size, regardless of aspect ratio. Specifying only height will rescale the image to that number of pixels in height while preserving the aspect ratio. width: Optional value for the width of the saved images. Specifying both the width and height will resize images to an exact size, regardless of aspect ratio. Specifying only width will rescale the image to that number of pixels wide while preserving the aspect ratio. interpolation: Type of interpolation to use when resizing images. video_manager: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: Dictionary of the format { scene_num : [image_paths] }, where scene_num is the number of the scene in scene_list (starting from 1), and image_paths is a list of the paths to the newly saved/created images. Raises: ValueError: Raised if any arguments are invalid or out of range (e.g. if num_images is negative). """ # TODO(v0.7): Add DeprecationWarning that `video_manager` will be removed in v0.8. if video_manager is not None: logger.error('`video_manager` argument is deprecated, use `video` instead.') video = video_manager if not scene_list: return {} if num_images <= 0 or frame_margin < 0: raise ValueError() # TODO: Validate that encoder_param is within the proper range. # Should be between 0 and 100 (inclusive) for jpg/webp, and 1-9 for png. imwrite_param = [get_cv2_imwrite_params()[image_extension], encoder_param ] if encoder_param is not None else [] video.reset() # Setup flags and init progress bar if available. completed = True logger.info('Generating output images (%d per scene)...', num_images) progress_bar = None if show_progress: progress_bar = tqdm(total=len(scene_list) * num_images, unit='images', dynamic_ncols=True)
# -*- coding: utf-8 -*- # # PySceneDetect: Python-Based Video Scene Detector # ------------------------------------------------------------------- # [ Site: https://scenedetect.com ] # [ Docs: https://scenedetect.com/docs/ ] # [ Github: https://github.com/Breakthrough/PySceneDetect/ ] # # Copyright (C) 2014-2023 Brandon Castellano <http://www.bcastell.com>. # PySceneDetect is licensed under the BSD 3-Clause License; see the # included LICENSE file, or visit one of the above pages for details. # """``scenedetect.scene_manager`` Module This module implements :class:`SceneManager`, coordinates running a :mod:`SceneDetector <scenedetect.detectors>` over the frames of a video (:mod:`VideoStream <scenedetect.video_stream>`). Video decoding is done in a separate thread to improve performance. This module also contains other helper functions (e.g. :func:`save_images`) which can be used to process the resulting scene list. =============================================================== Usage =============================================================== The following example shows basic usage of a :class:`SceneManager`: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector video = open_video(video_path) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) # Detect all scenes in video from current position to end. scene_manager.detect_scenes(video) # `get_scene_list` returns a list of start/end timecode pairs # for each scene that was found. scenes = scene_manager.get_scene_list() An optional callback can also be invoked on each detected scene, for example: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector # Callback to invoke on the first frame of every new scene detection. def on_new_scene(frame_img: numpy.ndarray, frame_num: int): print("New scene found at frame %d." % frame_num) video = open_video(test_video_file) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video, callback=on_new_scene) To use a `SceneManager` with a webcam/device or existing `cv2.VideoCapture` device, use the :class:`VideoCaptureAdapter <scenedetect.backends.opencv.VideoCaptureAdapter>` instead of `open_video`. ======================================================================= Storing Per-Frame Statistics ======================================================================= `SceneManager` can use an optional :class:`StatsManager <scenedetect.stats_manager.StatsManager>` to save frame statistics to disk: .. code:: python from scenedetect import open_video, ContentDetector, SceneManager, StatsManager video = open_video(test_video_file) scene_manager = SceneManager(stats_manager=StatsManager()) scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video) scene_list = scene_manager.get_scene_list() print_scenes(scene_list=scene_list) # Save per-frame statistics to disk. scene_manager.stats_manager.save_to_csv(csv_file=STATS_FILE_PATH) The statsfile can be used to find a better threshold for certain inputs, or perform statistical analysis of the video. """ logger = logging.getLogger('pyscenedetect') # TODO: This value can and should be tuned for performance improvements as much as possible, # until accuracy falls, on a large enough dataset. This has yet to be done, but the current # value doesn't seem to have caused any issues at least. DEFAULT_MIN_WIDTH: int = 256 """The default minimum width a frame will be downscaled to when calculating a downscale factor.""" MAX_FRAME_QUEUE_LENGTH: int = 4 """Maximum number of decoded frames which can be buffered while waiting to be processed.""" PROGRESS_BAR_DESCRIPTION = 'Detected: %d | Progress' """Template to use for progress bar.""" class Interpolation(Enum): """Interpolation method used for image resizing. Based on constants defined in OpenCV.""" NEAREST = cv2.INTER_NEAREST """Nearest neighbor interpolation.""" LINEAR = cv2.INTER_LINEAR """Bilinear interpolation.""" CUBIC = cv2.INTER_CUBIC """Bicubic interpolation.""" AREA = cv2.INTER_AREA """Pixel area relation resampling. Provides moire'-free downscaling.""" LANCZOS4 = cv2.INTER_LANCZOS4 """Lanczos interpolation over 8x8 neighborhood.""" def compute_downscale_factor(frame_width: int, effective_width: int = DEFAULT_MIN_WIDTH) -> int: """Get the optimal default downscale factor based on a video's resolution (currently only the width in pixels is considered). The resulting effective width of the video will be between frame_width and 1.5 * frame_width pixels (e.g. if frame_width is 200, the range of effective widths will be between 200 and 300). Arguments: frame_width: Actual width of the video frame in pixels. effective_width: Desired minimum width in pixels. Returns: int: The default downscale factor to use to achieve at least the target effective_width. """ assert not (frame_width < 1 or effective_width < 1) if frame_width < effective_width: return 1 return frame_width // effective_width def get_scenes_from_cuts( cut_list: Iterable[FrameTimecode], start_pos: Union[int, FrameTimecode], end_pos: Union[int, FrameTimecode], base_timecode: Optional[FrameTimecode] = None, ) -> List[Tuple[FrameTimecode, FrameTimecode]]: """Returns a list of tuples of start/end FrameTimecodes for each scene based on a list of detected scene cuts/breaks. This function is called when using the :meth:`SceneManager.get_scene_list` method. The scene list is generated from a cutting list (:meth:`SceneManager.get_cut_list`), noting that each scene is contiguous, starting from the first to last frame of the input. If `cut_list` is empty, the resulting scene will span from `start_pos` to `end_pos`. Arguments: cut_list: List of FrameTimecode objects where scene cuts/breaks occur. base_timecode: The base_timecode of which all FrameTimecodes in the cut_list are based on. num_frames: The number of frames, or FrameTimecode representing duration, of the video that was processed (used to generate last scene's end time). start_frame: The start frame or FrameTimecode of the cut list. Used to generate the first scene's start time. base_timecode: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: List of tuples in the form (start_time, end_time), where both start_time and end_time are FrameTimecode objects representing the exact time/frame where each scene occupies based on the input cut_list. """ # TODO(v0.7): Use the warnings module to turn this into a warning. if base_timecode is not None: logger.error('`base_timecode` argument is deprecated has no effect.') # Scene list, where scenes are tuples of (Start FrameTimecode, End FrameTimecode). scene_list = [] if not cut_list: scene_list.append((start_pos, end_pos)) return scene_list # Initialize last_cut to the first frame we processed,as it will be # the start timecode for the first scene in the list. last_cut = start_pos for cut in cut_list: scene_list.append((last_cut, cut)) last_cut = cut # Last scene is from last cut to end of video. scene_list.append((last_cut, end_pos)) return scene_list def write_scene_list(output_csv_file: TextIO, scene_list: Iterable[Tuple[FrameTimecode, FrameTimecode]], include_cut_list: bool = True, cut_list: Optional[Iterable[FrameTimecode]] = None) -> None: """Writes the given list of scenes to an output file handle in CSV format. Arguments: output_csv_file: Handle to open file in write mode. scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. include_cut_list: Bool indicating if the first row should include the timecodes where each scene starts. Should be set to False if RFC 4180 compliant CSV output is required. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not specified, the cut list is generated using the start times of each scene following the first one. """ csv_writer = csv.writer(output_csv_file, lineterminator='\n') # If required, output the cutting list as the first row (i.e. before the header row). if include_cut_list: csv_writer.writerow( ["Timecode List:"] + cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]]) csv_writer.writerow([ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ]) for i, (start, end) in enumerate(scene_list): duration = end - start csv_writer.writerow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) def write_scene_list_html(output_html_filename, scene_list, cut_list=None, css=None, css_class='mytable', image_filenames=None, image_width=None, image_height=None): """Writes the given list of scenes to an output file handle in html format. Arguments: output_html_filename: filename of output html file scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not passed, the start times of each scene (besides the 0th scene) is used instead. css: String containing all the css information for the resulting html page. css_class: String containing the named css class image_filenames: dict where key i contains a list with n elements (filenames of the n saved images from that scene) image_width: Optional desired width of images in table in pixels image_height: Optional desired height of images in table in pixels """ if not css: css = """ table.mytable { font-family: times; font-size:12px; color:#000000; border-width: 1px; border-color: #eeeeee; border-collapse: collapse; background-color: #ffffff; width=100%; max-width:550px; table-layout:fixed; } table.mytable th { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; background-color: #e6eed6; color:#000000; } table.mytable td { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; } #code { display:inline; font-family: courier; color: #3d9400; } #string { display:inline; font-weight: bold; } """ # Output Timecode list timecode_table = SimpleTable( [["Timecode List:"] + (cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]])], css_class=css_class) # Output list of scenes header_row = [ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ] for i, (start, end) in enumerate(scene_list): duration = end - start row = SimpleTableRow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) if image_filenames: for image in image_filenames[i]: row.add_cell( SimpleTableCell( SimpleTableImage(image, width=image_width, height=image_height))) if i == 0: scene_table = SimpleTable(rows=[row], header_row=header_row, css_class=css_class) else: scene_table.add_row(row=row) # Write html file page = HTMLPage() page.add_table(timecode_table) page.add_table(scene_table) page.css = css page.save(output_html_filename) # # TODO(v1.0): Refactor to take a SceneList object; consider moving this and save scene list # to a better spot, or just move them to scene_list.py. # def save_images(scene_list: List[Tuple[FrameTimecode, FrameTimecode]], video: VideoStream, num_images: int = 3, frame_margin: int = 1, image_extension: str = 'jpg', encoder_param: int = 95, image_name_template: str = '$VIDEO_NAME-Scene-$SCENE_NUMBER-$IMAGE_NUMBER', output_dir: Optional[str] = None, show_progress: Optional[bool] = False, scale: Optional[float] = None, height: Optional[int] = None, width: Optional[int] = None, interpolation: Interpolation = Interpolation.CUBIC, video_manager=None) -> Dict[int, List[str]]: """Save a set number of images from each scene, given a list of scenes and the associated video/frame source. Arguments: scene_list: A list of scenes (pairs of FrameTimecode objects) returned from calling a SceneManager's detect_scenes() method. video: A VideoStream object corresponding to the scene list. Note that the video will be closed/re-opened and seeked through. num_images: Number of images to generate for each scene. Minimum is 1. frame_margin: Number of frames to pad each scene around the beginning and end (e.g. moves the first/last image into the scene by N frames). Can set to 0, but will result in some video files failing to extract the very last frame. image_extension: Type of image to save (must be one of 'jpg', 'png', or 'webp'). encoder_param: Quality/compression efficiency, based on type of image: 'jpg' / 'webp': Quality 0-100, higher is better quality. 100 is lossless for webp. 'png': Compression from 1-9, where 9 achieves best filesize but is slower to encode. image_name_template: Template to use when creating the images on disk. Can use the macros $VIDEO_NAME, $SCENE_NUMBER, and $IMAGE_NUMBER. The image extension is applied automatically as per the argument image_extension. output_dir: Directory to output the images into. If not set, the output is created in the working directory. show_progress: If True, shows a progress bar if tqdm is installed. scale: Optional factor by which to rescale saved images. A scaling factor of 1 would not result in rescaling. A value < 1 results in a smaller saved image, while a value > 1 results in an image larger than the original. This value is ignored if either the height or width values are specified. height: Optional value for the height of the saved images. Specifying both the height and width will resize images to an exact size, regardless of aspect ratio. Specifying only height will rescale the image to that number of pixels in height while preserving the aspect ratio. width: Optional value for the width of the saved images. Specifying both the width and height will resize images to an exact size, regardless of aspect ratio. Specifying only width will rescale the image to that number of pixels wide while preserving the aspect ratio. interpolation: Type of interpolation to use when resizing images. video_manager: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: Dictionary of the format { scene_num : [image_paths] }, where scene_num is the number of the scene in scene_list (starting from 1), and image_paths is a list of the paths to the newly saved/created images. Raises: ValueError: Raised if any arguments are invalid or out of range (e.g. if num_images is negative). """ # TODO(v0.7): Add DeprecationWarning that `video_manager` will be removed in v0.8. if video_manager is not None: logger.error('`video_manager` argument is deprecated, use `video` instead.') video = video_manager if not scene_list: return {} if num_images <= 0 or frame_margin < 0: raise ValueError() # TODO: Validate that encoder_param is within the proper range. # Should be between 0 and 100 (inclusive) for jpg/webp, and 1-9 for png. imwrite_param = [get_cv2_imwrite_params()[image_extension], encoder_param ] if encoder_param is not None else [] video.reset() # Setup flags and init progress bar if available. completed = True logger.info('Generating output images (%d per scene)...', num_images) progress_bar = None if show_progress: progress_bar = tqdm(total=len(scene_list) * num_images, unit='images', dynamic_ncols=True)
filename_template = Template(image_name_template)
5
2023-10-25 02:50:01+00:00
24k
EulerSearch/embedding_studio
plugins/default_fine_tuning_method.py
[ { "identifier": "settings", "path": "embedding_studio/core/config.py", "snippet": "class Settings(BaseSettings):\n API_V1_STR: str = \"/api/v1\"\n SECRET_KEY: str = secrets.token_urlsafe(32)\n ACCESS_TOKEN_EXPIRE_MINUTES: int = 60 * 24 * 8\n BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = []\n FINETUNING_MONGO_HOST: str = os.getenv(\"FINETUNING_MONGO_HOST\", \"mongo\")\n FINETUNING_MONGO_PORT: int = os.getenv(\"FINETUNING_MONGO_PORT\", 27017)\n FINETUNING_MONGO_DB_NAME: str = os.getenv(\n \"FINETUNING_MONGO_DB_NAME\", \"embedding_studio\"\n )\n FINETUNING_MONGO_USERNAME: str = os.getenv(\n \"FINETUNING_MONGO_USERNAME\", \"root\"\n )\n FINETUNING_MONGO_PASSWORD: str = os.getenv(\n \"FINETUNING_MONGO_PASSWORD\", \"mongopassword\"\n )\n FINETUNING_MONGO_URL: str = (\n f\"mongodb://{FINETUNING_MONGO_USERNAME}:{FINETUNING_MONGO_PASSWORD}@\"\n f\"{FINETUNING_MONGO_HOST}:{FINETUNING_MONGO_PORT}\"\n )\n CLICKSTREAM_MONGO_HOST: str = os.getenv(\"CLICKSTREAM_MONGO_HOST\", \"mongo\")\n CLICKSTREAM_MONGO_PORT: int = os.getenv(\"CLICKSTREAM_MONGO_PORT\", 27017)\n CLICKSTREAM_MONGO_DB_NAME: str = os.getenv(\n \"CLICKSTREAM_MONGO_DB_NAME\", \"embedding_studio\"\n )\n CLICKSTREAM_MONGO_USERNAME: str = os.getenv(\n \"CLICKSTREAM_MONGO_USERNAME\", \"root\"\n )\n CLICKSTREAM_MONGO_PASSWORD: str = os.getenv(\n \"CLICKSTREAM_MONGO_PASSWORD\", \"mongopassword\"\n )\n CLICKSTREAM_MONGO_URL: str = (\n f\"mongodb://{CLICKSTREAM_MONGO_USERNAME}:{CLICKSTREAM_MONGO_PASSWORD}@\"\n f\"{CLICKSTREAM_MONGO_HOST}:{CLICKSTREAM_MONGO_PORT}\"\n )\n REDIS_HOST: str = os.getenv(\"REDIS_HOST\", \"localhost\")\n REDIS_PORT: int = os.getenv(\"REDIS_PORT\", 6379)\n REDIS_PASSWORD: str = os.getenv(\"REDIS_PASSWORD\", \"redispassword\")\n REDIS_URL: str = f\"redis://{REDIS_HOST}:{REDIS_PORT}/0\"\n MINIO_HOST: str = os.getenv(\"MINIO_HOST\", \"localhost\")\n MINIO_PORT: int = os.getenv(\"MINIO_PORT\", 9000)\n MINIO_ROOT_USER: str = os.getenv(\"MINIO_ROOT_USER\", \"root\")\n MINIO_ROOT_PASSWORD: str = os.getenv(\n \"MINIO_ROOT_PASSWORD\", \"miniopassword\"\n )\n MINIO_DEFAULT_BUCKETS: str = os.getenv(\n \"MINIO_DEFAULT_BUCKETS\", \"embeddingstudio\"\n )\n MINIO_ACCESS_KEY: str = os.getenv(\n \"MINIO_ACCESS_KEY\", \"mtGNiEvoTL6C0EXAMPLE\"\n )\n MINIO_SECRET_KEY: str = os.getenv(\n \"MINIO_SECRET_KEY\", \"HY5JserXAaWmphNyCpQPEXAMPLEKEYEXAMPLEKEY\"\n )\n MYSQL_HOST: str = os.getenv(\"MYSQL_HOST\", \"localhost\")\n MYSQL_PORT: int = os.getenv(\"MYSQL_PORT\", 3306)\n MYSQL_DATABASE: str = os.getenv(\"MYSQL_DATABASE\", \"mlflow\")\n MYSQL_USER: str = os.getenv(\"MYSQL_USER\", \"mlflow_user\")\n MYSQL_PASSWORD: str = os.getenv(\"MYSQL_PASSWORD\", \"Baxp3O5rUvpIxiD77BfZ\")\n MYSQL_ROOT_PASSWORD: str = os.getenv(\n \"MYSQL_ROOT_PASSWORD\", \"PrK5qmPTDsm2IYKvHVG8\"\n )\n MLFLOW_HOST: str = os.getenv(\"MLFLOW_HOST\", \"localhost\")\n MLFLOW_PORT: int = os.getenv(\"MLFLOW_PORT\", 5001)\n MLFLOW_TRACKING_URI: str = f\"http://{MLFLOW_HOST}:{MLFLOW_PORT}\"\n ES_PLUGINS_PATH: str = os.getenv(\"ES_PLUGINS_PATH\", \"plugins\")\n FINE_TUNING_WORKER_MAX_RETRIES: int = os.getenv(\n \"FINE_TUNING_WORKER_MAX_RETRIES\", 3\n )\n FINE_TUNING_WORKER_TIME_LIMIT: int = os.getenv(\n \"FINE_TUNING_WORKER_TIME_LIMIT\", 18000000\n )\n DEFAULT_MAX_ATTEMPTS: int = os.getenv(\"DEFAULT_MAX_ATTEMPTS\", 3)\n DEFAULT_WAIT_TIME_SECONDS: float = os.getenv(\n \"DEFAULT_WAIT_TIME_SECONDS\", 3.0\n )\n S3_READ_CREDENTIALS_ATTEMPTS: int = os.getenv(\n \"S3_READ_CREDENTIALS_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n S3_READ_WAIT_TIME_SECONDS: float = os.getenv(\n \"S3_READ_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n S3_DOWNLOAD_DATA_ATTEMPTS: int = os.getenv(\n \"S3_DOWNLOAD_DATA_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n S3_DOWNLOAD_DATA_WAIT_TIME_SECONDS: float = os.getenv(\n \"S3_DOWNLOAD_DATA_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_LOG_METRIC_ATTEMPTS: int = os.getenv(\n \"MLFLOW_LOG_METRIC_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_LOG_METRIC_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_LOG_METRIC_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_LOG_PARAM_ATTEMPTS: int = os.getenv(\n \"MLFLOW_LOG_PARAM_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_LOG_PARAM_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_LOG_PARAM_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_LOG_MODEL_ATTEMPTS: int = os.getenv(\n \"MLFLOW_LOG_MODEL_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_LOG_MODEL_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_LOG_MODEL_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_LOAD_MODEL_ATTEMPTS: int = os.getenv(\n \"MLFLOW_LOAD_MODEL_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_LOAD_MODEL_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_LOAD_MODEL_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_DELETE_MODEL_ATTEMPTS: int = os.getenv(\n \"MLFLOW_DELETE_MODEL_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_DELETE_MODEL_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_DELETE_MODEL_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_SEARCH_RUNS_ATTEMPTS: int = os.getenv(\n \"MLFLOW_SEARCH_RUNS_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_SEARCH_RUNS_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_SEARCH_RUNS_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_END_RUN_ATTEMPTS: int = os.getenv(\n \"MLFLOW_END_RUN_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_END_RUN_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_END_RUN_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_GET_RUN_ATTEMPTS: int = os.getenv(\n \"MLFLOW_GET_RUN_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_GET_RUN_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_GET_RUN_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_SEARCH_EXPERIMENTS_ATTEMPTS: int = os.getenv(\n \"MLFLOW_SEARCH_EXPERIMENTS_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_SEARCH_EXPERIMENTS_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_SEARCH_EXPERIMENTS_WAIT_TIME_SECONDS\",\n DEFAULT_WAIT_TIME_SECONDS,\n )\n MLFLOW_DELETE_EXPERIMENT_ATTEMPTS: int = os.getenv(\n \"MLFLOW_DELETE_EXPERIMENT_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_DELETE_EXPERIMENT_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_DELETE_EXPERIMENT_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_CREATE_EXPERIMENT_ATTEMPTS: int = os.getenv(\n \"MLFLOW_CREATE_EXPERIMENT_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_CREATE_EXPERIMENT_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_CREATE_EXPERIMENT_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n MLFLOW_GET_EXPERIMENT_ATTEMPTS: int = os.getenv(\n \"MLFLOW_GET_EXPERIMENT_ATTEMPTS\", DEFAULT_MAX_ATTEMPTS\n )\n MLFLOW_GET_EXPERIMENT_WAIT_TIME_SECONDS: float = os.getenv(\n \"MLFLOW_GET_EXPERIMENT_WAIT_TIME_SECONDS\", DEFAULT_WAIT_TIME_SECONDS\n )\n CLICKSTREAM_TIME_MAX_DELTA_MINUS_SEC: int = os.getenv(\n \"CLICKSTREAM_TIME_MAX_DELTA_MINUS_SEC\", 12 * 60 * 60\n )\n CLICKSTREAM_TIME_MAX_DELTA_PLUS_SEC: int = os.getenv(\n \"CLICKSTREAM_TIME_MAX_DELTA_PLUS_SEC\", 5 * 60\n )" }, { "identifier": "FineTuningMethod", "path": "embedding_studio/core/plugin.py", "snippet": "class FineTuningMethod(ABC):\n \"\"\"Base class (plugin) for fine-tuning methods.\n\n All fine-tuning methods must inherit from this class.\n \"\"\"\n\n meta: PluginMeta\n\n @abstractmethod\n def upload_initial_model(self) -> None:\n \"\"\"Upload the initial model to the storage.\n\n Method that should be implemented by subclasses to upload the\n initial model to the storage.\n \"\"\"\n raise NotImplementedError(\n \"Subclasses must implement upload_initial_model\"\n )\n\n @abstractmethod\n def get_fine_tuning_builder(\n self, clickstream: List[SessionWithEvents]\n ) -> FineTuningBuilder:\n \"\"\"Return a FineTuningBuilder instance for the fine-tuning process.\n\n Method that should be implemented by subclasses to provide a\n FineTuningBuilder instance.\n\n :param clickstream: Collection of user feedback, used to enhance\n the model.\n :return: An instance of FineTuningBuilder used for\n launching the fine-tuning process.\n \"\"\"\n raise NotImplementedError(\n \"Subclasses must implement get_fine_tuning_builder\"\n )" }, { "identifier": "AWSS3ClickstreamParser", "path": "embedding_studio/embeddings/data/clickstream/parsers/s3_parser.py", "snippet": "class AWSS3ClickstreamParser(ClickstreamParser):\n def __init__(\n self, query_item_type: type, search_result_type: type, event_type: type\n ):\n super(AWSS3ClickstreamParser, self).__init__(\n query_item_type, search_result_type, S3FileMeta, event_type\n )" }, { "identifier": "DummyEventType", "path": "embedding_studio/embeddings/data/clickstream/search_event.py", "snippet": "class DummyEventType(EventType):\n importance: float\n\n @property\n def event_importance(self) -> float:\n return self.importance" }, { "identifier": "SearchResult", "path": "embedding_studio/embeddings/data/clickstream/search_event.py", "snippet": "class SearchResult(BaseModel):\n item: ItemMeta\n is_click: bool\n rank: Optional[float] = None\n event_type: Optional[EventType] = None\n timestamp: Optional[int] = None\n\n @validator(\"event_type\", pre=True, always=True)\n def validate_event_type(cls, value, values):\n if value is not None and not isinstance(value, EventType):\n raise ValueError(\"Invalid event_type instance\")\n return value\n\n class Config:\n arbitrary_types_allowed = True\n\n @classmethod\n def from_mongo(\n cls,\n result: SearchResultItem,\n event_ids: Set[str],\n item_type: type,\n event_type: type,\n ) -> \"SearchResult\":\n event_instance = DummyEventType(importance=1)\n\n return cls(\n item=item_type(**result.meta),\n is_click=result.object_id in event_ids,\n event_type=event_instance,\n timestamp=None,\n )\n\n @classmethod\n def from_dict(\n cls, data: dict, item_type: type, event_type: type\n ) -> \"SearchResult\":\n event_data: Optional[Dict] = data.get(\"event_type\")\n event_instance = None\n\n if event_data is not None:\n event_instance = event_type(**event_data)\n\n return cls(\n item=item_type(**data[\"item\"]),\n is_click=data[\"is_click\"],\n rank=data[\"rank\"],\n event_type=event_instance,\n timestamp=int(data.get(\"timestamp\")),\n )" }, { "identifier": "ClickstreamSessionsSplitter", "path": "embedding_studio/embeddings/data/clickstream/splitter.py", "snippet": "class ClickstreamSessionsSplitter:\n def __init__(\n self,\n test_size_ratio: float = 0.2,\n shuffle: bool = True,\n random_state: Optional[int] = None,\n ):\n \"\"\"Generate train / test clickstream sessions split.\n\n :param test_size_ratio: ratio of test split size (default: 0.2)\n :param shuffle: to shuffle or not paired clickstream sessions (default: True)\n :param random_state: random state to sklearn splitter (default: None)\n \"\"\"\n if (\n not isinstance(test_size_ratio, float)\n or test_size_ratio <= 0\n or test_size_ratio >= 1.0\n ):\n raise ValueError(\n f\"test_size_ration is a numeric value in range (0.0, 1.0)\"\n )\n\n if test_size_ratio >= 0.5:\n logger.warning(\n \"test_size_ration is larger than 0.5. It's unusual for ML to have test size > train size.\"\n )\n\n self._test_size_ratio = test_size_ratio\n\n if not isinstance(shuffle, bool):\n raise ValueError(\"shuffle should be boolean\")\n self._shuffle = shuffle\n self._random_state = random_state\n\n @property\n def shuffle(self) -> bool:\n return self._shuffle\n\n def split(self, sessions: List[ClickstreamSession]) -> DatasetDict:\n \"\"\"Split clickstream sessions.\n\n :param sessions: sessions to be split\n :return: train / test splits accordingly (PairedClickstreamDataset)\n \"\"\"\n # Get all IDs\n all_result_ids: Set[str] = set()\n for session in sessions:\n all_result_ids.update(session.results)\n\n if len(all_result_ids) == 0:\n raise ValueError(\"Sessions list is empty\")\n\n # Ensure a minimum number of unique result IDs in each set\n min_unique_test_sessions: int = int(\n self._test_size_ratio * len(sessions)\n )\n\n # Split the result IDs into train and test sets\n train_result_ids, test_result_ids = train_test_split(\n list(all_result_ids),\n test_size=self._test_size_ratio,\n random_state=self._random_state,\n )\n test_result_ids: Set[str] = set(test_result_ids)\n\n # Split sessions into train and test based on result IDs\n train_sessions: List[ClickstreamSession] = []\n test_sessions: List[ClickstreamSession] = []\n\n for session in sessions:\n if len(session.results) == 0:\n continue\n\n if (\n len(set(session.results) & test_result_ids)\n / len(session.results)\n <= 0.5\n ):\n # If less than 50% of result IDs intersect with the test set, add to the train set\n train_sessions.append(session)\n else:\n test_sessions.append(session)\n\n if len(test_sessions) < min_unique_test_sessions:\n logger.warning(\n f\"Clickstream sessions intersects highly, so they are not split well\"\n )\n random_train_session_indexess: List[int] = random.choices(\n list(range(len(train_sessions))),\n k=min_unique_test_sessions - len(test_sessions),\n )\n for i in reversed(sorted(random_train_session_indexess)):\n test_sessions.append(train_sessions.pop(i))\n\n if len(test_sessions) + len(train_sessions) < len(sessions):\n missed_sessions_count = len(sessions) - (\n len(test_sessions) + len(train_sessions)\n )\n logger.warning(\n f\"Clickstream sessions weren't split correctly, add {missed_sessions_count} more sessions to the train split.\"\n )\n\n for session in sessions:\n if (\n session not in train_sessions\n and session not in test_sessions\n ):\n train_sessions.append(session)\n\n return DatasetDict(\n {\n \"train\": PairedClickstreamDataset(\n train_sessions, self.shuffle\n ),\n \"test\": PairedClickstreamDataset(test_sessions, self.shuffle),\n }\n )" }, { "identifier": "TextQueryItem", "path": "embedding_studio/embeddings/data/clickstream/text_query_item.py", "snippet": "class TextQueryItem(QueryItem):\n text: str\n\n class Config:\n arbitrary_types_allowed = True" }, { "identifier": "TextQueryRetriever", "path": "embedding_studio/embeddings/data/clickstream/text_query_retriever.py", "snippet": "class TextQueryRetriever(QueryRetriever):\n def __call__(self, query: TextQueryItem) -> str:\n if not hasattr(query, \"text\"):\n raise ValueError(f\"Query object does not have text attribute\")\n return query.text" }, { "identifier": "AWSS3DataLoader", "path": "embedding_studio/embeddings/data/loaders/s3/s3_loader.py", "snippet": "class AWSS3DataLoader(DataLoader):\n def __init__(self, retry_config: Optional[RetryConfig] = None, **kwargs):\n \"\"\"Items loader from AWS S3.\n\n :param max_attempts: maximum number of attempts (default: 10)\n :param wait_time_seconds: time to wait between (default: 10)\n :param kwargs: dict data for AWSS3Credentials\n \"\"\"\n super(AWSS3DataLoader, self).__init__(**kwargs)\n self.retry_config = (\n retry_config\n if retry_config\n else AWSS3DataLoader._get_default_retry_config()\n )\n self.credentials = AWSS3Credentials(**kwargs)\n self.attempt_exception_types = [EndpointConnectionError]\n\n @staticmethod\n def _get_default_retry_config() -> RetryConfig:\n default_retry_params = RetryParams(\n max_attempts=settings.DEFAULT_MAX_ATTEMPTS,\n wait_time_seconds=settings.DEFAULT_WAIT_TIME_SECONDS,\n )\n\n config = RetryConfig(default_params=default_retry_params)\n config[\"credentials\"] = RetryParams(\n max_attempts=settings.S3_READ_CREDENTIALS_ATTEMPTS,\n wait_time_seconds=settings.S3_READ_WAIT_TIME_SECONDS,\n )\n config[\"download_data\"] = RetryParams(\n max_attempts=settings.S3_DOWNLOAD_DATA_ATTEMPTS,\n wait_time_seconds=settings.S3_DOWNLOAD_DATA_WAIT_TIME_SECONDS,\n )\n return config\n\n @retry_method(name=\"download_data\")\n def _read_from_s3(self, client, bucket: str, file: str) -> Image:\n return read_from_s3(client, bucket, file)\n\n @retry_method(name=\"credentials\")\n def _get_client(self, task_id: str):\n if (\n self.credentials.aws_access_key_id is None\n or self.credentials.aws_secret_access_key is None\n ) and not self.credentials.use_system_info:\n logger.warning(\n \"No specific AWS credentials, use Anonymous session\"\n )\n s3_client = boto3.client(\n \"s3\", config=Config(signature_version=UNSIGNED)\n )\n else:\n sts_client = boto3.client(\n \"sts\",\n aws_access_key_id=self.credentials.aws_access_key_id,\n aws_secret_access_key=self.credentials.aws_secret_access_key,\n )\n if self.credentials.external_id:\n assumed_role_object = sts_client.assume_role(\n RoleArn=self.credentials.role_arn,\n RoleSessionName=task_id,\n ExternalId=self.credentials.external_id,\n )\n else:\n assumed_role_object = sts_client.assume_role(\n RoleArn=self.credentials.role_arn,\n RoleSessionName=task_id,\n )\n credentials = assumed_role_object[\"Credentials\"]\n s3_client = boto3.client(\n \"s3\",\n aws_access_key_id=credentials[\"AccessKeyId\"],\n aws_secret_access_key=credentials[\"SecretAccessKey\"],\n aws_session_token=credentials[\"SessionToken\"],\n )\n return s3_client\n\n def _generate_dataset_from_s3(\n self, files: List[S3FileMeta]\n ) -> Iterable[Dict]:\n if len(files) == 0:\n logger.warning(\"Nothing to download\")\n else:\n logger.info(\"Connecting to aws s3...\")\n task_id: str = str(uuid.uuid4())\n try:\n s3_client = self._get_client(task_id)\n logger.info(\"Start downloading data from S3...\")\n bad_items_count = 0\n for val in files:\n image = None\n try:\n image: Image = read_from_s3(\n s3_client, val.bucket, val.file\n )\n except Exception as e:\n logger.exception(\n f\"Unable to download an item: {val.bucket}/{val.file} Exception: {str(e)}\"\n )\n\n if image is None:\n logger.error(\n f\"Unable to download {val.file} from {val.bucket}\"\n )\n bad_items_count += 1\n continue\n yield {\"item\": image, \"item_id\": val.id}\n\n if bad_items_count == len(files):\n raise FailedToLoadAnythingFromAWSS3()\n\n except Exception as err:\n logger.error(f\"Failed to load dataset from s3: {err}\")\n raise err\n\n def load(self, items_data: List[S3FileMeta]) -> Dataset:\n return Dataset.from_generator(\n lambda: self._generate_dataset_from_s3(items_data)\n )" }, { "identifier": "CLIPItemStorageProducer", "path": "embedding_studio/embeddings/data/storages/producers/clip.py", "snippet": "class CLIPItemStorageProducer(ItemStorageProducer):\n def __init__(\n self,\n field_normalizer: DatasetFieldsNormalizer,\n id_field_name: Optional[str] = None,\n ):\n super(CLIPItemStorageProducer, self).__init__(\n ImageItemsDatasetDictPreprocessor(field_normalizer, 224),\n id_field_name,\n )" }, { "identifier": "DatasetFieldsNormalizer", "path": "embedding_studio/embeddings/data/utils/fields_normalizer.py", "snippet": "class DatasetFieldsNormalizer:\n ID_FIELD_NAME = \"item_id\"\n ITEM_FIELD_NAME = \"item\"\n\n def __init__(self, item_field_name: str, id_field_name: str):\n \"\"\"Unify column names in DatasetDict, so it can be used in fine-tuning script.\n A dataset should have ID column, related to ID in clickstream.\n\n :param item_field_name: name of column with items.\n :param id_field_name: name of ID column\n \"\"\"\n if not id_field_name:\n raise ValueError(\"id_field_name should be non-empty string\")\n self.id_field_name = id_field_name\n\n if not item_field_name:\n raise ValueError(\"item_field_name should be non-empty string\")\n self.item_field_name = item_field_name\n\n def __call__(self, dataset: DatasetDict) -> DatasetDict:\n id_normalizer = (\n lambda id_value: str(id_value.item())\n if (\n isinstance(id_value, Tensor)\n or isinstance(id_value, FloatTensor)\n )\n else str(id_value)\n )\n for key in dataset.keys():\n if (\n DatasetFieldsNormalizer.ID_FIELD_NAME\n not in dataset.column_names[key]\n ):\n dataset = dataset.rename_column(\n self.id_field_name, DatasetFieldsNormalizer.ID_FIELD_NAME\n )\n else:\n logger.warning(\n f\"Dataset {key} split already has {DatasetFieldsNormalizer.ID_FIELD_NAME} field\"\n )\n\n if (\n DatasetFieldsNormalizer.ITEM_FIELD_NAME\n not in dataset.column_names[key]\n ):\n dataset = dataset.rename_column(\n self.item_field_name,\n DatasetFieldsNormalizer.ITEM_FIELD_NAME,\n )\n else:\n logger.warning(\n f\"Dataset {key} split already has {DatasetFieldsNormalizer.ITEM_FIELD_NAME} field\"\n )\n\n return dataset.map(\n lambda example: {\n DatasetFieldsNormalizer.ID_FIELD_NAME: id_normalizer(\n example[DatasetFieldsNormalizer.ID_FIELD_NAME]\n )\n }\n )" }, { "identifier": "CosineProbMarginRankingLoss", "path": "embedding_studio/embeddings/losses/prob_cosine_margin_ranking_loss.py", "snippet": "class CosineProbMarginRankingLoss(ProbMarginRankingLoss):\n def __init__(self, base_margin: Optional[float] = 1.0):\n \"\"\"Embeddings Fine-tuning Loss (modification of MarginRankingLoss)\n Use sigmoid instead of ReLU + results confidences to ignore noises and mistakes.\n Adapt to cosine similarity / distance\n\n :param base_margin: margin ranking loss margin (default: 1.0)\n \"\"\"\n super(CosineProbMarginRankingLoss, self).__init__(\n base_margin=base_margin\n )\n\n def __adjust(self, adjusted_diff: FloatTensor) -> FloatTensor:\n # The way any wrong difference more than 0.01 is worth to be penaltized\n # Sigmoid with this kind of input return prob > 0.1, for difference between\n # pos and more than 0.001. That's our expected behaviour.\n # TODO: implement calculation of magic numbers\n return -400 * adjusted_diff + 6" }, { "identifier": "TextToImageCLIPModel", "path": "embedding_studio/embeddings/models/text_to_image/clip.py", "snippet": "class TextToImageCLIPModel(EmbeddingsModelInterface):\n def __init__(self, clip_model: SentenceTransformer):\n \"\"\"Wrapper to SentenceTransformer CLIP model.\n Usage: model = TextToImageCLIPModel(SentenceTransformer('clip-ViT-B-32'))\n\n :param clip_model: clip model from SentenceTransformer package\n \"\"\"\n super(TextToImageCLIPModel, self).__init__(same_query_and_items=False)\n self.clip_model = clip_model\n self.text_model = torch.nn.Sequential(\n self.clip_model._modules[\"0\"]\n ._modules[\"model\"]\n ._modules[\"text_model\"],\n PassPoolerOutputLayer(),\n self.clip_model._modules[\"0\"]\n ._modules[\"model\"]\n ._modules[\"text_projection\"],\n )\n\n self.vision_model = torch.nn.Sequential(\n self.clip_model._modules[\"0\"]\n ._modules[\"model\"]\n ._modules[\"vision_model\"],\n PassPoolerOutputLayer(),\n self.clip_model._modules[\"0\"]\n ._modules[\"model\"]\n ._modules[\"visual_projection\"],\n )\n\n def get_query_model_params(self) -> Iterator[Parameter]:\n return self.text_model.parameters()\n\n def get_items_model_params(self) -> Iterator[Parameter]:\n return self.vision_model.parameters()\n\n def fix_query_model(self, num_fixed_layers: int):\n if (\n len(self.text_model._modules[\"0\"].encoder.layers)\n <= num_fixed_layers\n ):\n raise ValueError(\n f\"Number of fixed layers ({num_fixed_layers}) >= number \"\n f'of existing layers ({len(self.text_model._modules[\"0\"].encoder.layers)})'\n )\n\n self.text_model._modules[\"0\"].embeddings.requires_grad = False\n for i, attn in enumerate(self.text_model._modules[\"0\"].encoder.layers):\n if i < num_fixed_layers:\n self.text_model._modules[\"0\"].encoder.layers[\n i\n ].requires_grad = False\n\n def unfix_query_model(self):\n self.text_model._modules[\"0\"].embeddings.requires_grad = True\n for i, attn in enumerate(self.text_model._modules[\"0\"].encoder.layers):\n self.text_model._modules[\"0\"].encoder.layers[\n i\n ].requires_grad = True\n\n def fix_item_model(self, num_fixed_layers: int):\n if (\n len(self.vision_model._modules[\"0\"].encoder.layers)\n <= num_fixed_layers\n ):\n raise ValueError(\n f\"Number of fixed layers ({num_fixed_layers}) >= number \"\n f'of existing layers ({len(self.vision_model._modules[\"0\"].encoder.layers)})'\n )\n\n self.vision_model._modules[\"0\"].embeddings.requires_grad = False\n for i, attn in enumerate(\n self.vision_model._modules[\"0\"].encoder.layers\n ):\n if i < num_fixed_layers:\n self.vision_model._modules[\"0\"].encoder.layers[\n i\n ].requires_grad = False\n\n def unfix_item_model(self):\n self.vision_model._modules[\"0\"].embeddings.requires_grad = True\n for i, attn in enumerate(\n self.vision_model._modules[\"0\"].encoder.layers\n ):\n self.vision_model._modules[\"0\"].encoder.layers[\n i\n ].requires_grad = True\n\n def tokenize(self, query: str) -> List[Dict]:\n return self.clip_model.tokenize([query])\n\n def forward_query(self, query: str) -> FloatTensor:\n if len(query) == 0:\n logger.warning(\"Provided query is empty\")\n\n tokenized = self.tokenize(query)\n return self.text_model.forward(tokenized[\"input_ids\"].to(self.device))\n\n def forward_items(self, items: List[np.array]) -> FloatTensor:\n if len(items) == 0:\n raise ValueError(\"items list must not be empty\")\n\n return self.vision_model.forward(torch.stack(items).to(self.device))" }, { "identifier": "SessionWithEvents", "path": "embedding_studio/models/clickstream/sessions.py", "snippet": "class SessionWithEvents(RegisteredSession):\n events: List[SessionEvent]" }, { "identifier": "FineTuningBuilder", "path": "embedding_studio/models/plugin.py", "snippet": "class FineTuningBuilder:\n data_loader: DataLoader\n query_retriever: QueryRetriever\n clickstream_parser: ClickstreamParser\n clickstream_sessions_splitter: ClickstreamSessionsSplitter\n dataset_fields_normalizer: DatasetFieldsNormalizer\n item_storage_producer: ItemStorageProducer\n accumulators: List[MetricsAccumulator]\n experiments_manager: ExperimentsManager\n fine_tuning_settings: FineTuningSettings\n initial_params: Dict[str, List[Any]]\n ranking_data: RankingData\n initial_max_evals: int = 100" }, { "identifier": "PluginMeta", "path": "embedding_studio/models/plugin.py", "snippet": "class PluginMeta(BaseModel):\n name: str\n version: str = \"1.0.0\"\n description: Optional[str] = None" }, { "identifier": "prepare_data", "path": "embedding_studio/workers/fine_tuning/data/prepare_data.py", "snippet": "def prepare_data(\n clickstream_sessions: List[Union[Dict, SessionWithEvents]],\n parser: ClickstreamParser,\n clickstream_splitter: ClickstreamSessionsSplitter,\n query_retriever: QueryRetriever,\n loader: DataLoader,\n storage_producer: ItemStorageProducer,\n) -> RankingData:\n \"\"\"Prepare fine-tuning data.\n\n :param clickstream_sessions: clickstream sessions\n :param parser: how to parse a clickstream session\n :param clickstream_splitter: how to split clickstream sessions\n :param query_retriever: retrieve query item\n :param loader: load items data\n :param storage_producer: get train/test datasets\n :return: train / test clickstream sessiobs and dataset dict\n \"\"\"\n if len(clickstream_sessions) == 0:\n raise ValueError(\"Empty clickstream sessions list\")\n\n logger.info(\"Parse clickstream sessions data\")\n raw_clickstream_sessions: List[RawClickstreamSession] = [\n (\n parser.parse(session)\n if isinstance(session, dict)\n else parser.parse_from_mongo(session)\n )\n for session in clickstream_sessions\n ]\n\n clickstream_sessions: List[ClickstreamSession] = [\n r.get_session() for r in raw_clickstream_sessions\n ]\n\n logger.info(\"Setup query retriever\")\n query_retriever.setup(clickstream_sessions)\n\n logger.info(\"Split clickstream sessions into train / test\")\n clickstream_dataset = clickstream_splitter.split(clickstream_sessions)\n logger.info(\n f'Splitting is finished, train: {len(clickstream_dataset[\"train\"])} / test: {len(clickstream_dataset[\"test\"])}'\n )\n\n logger.info(\"Get list of files to be loaded\")\n files_to_load: Set[ItemMeta] = set()\n for session in raw_clickstream_sessions:\n files_to_load.update(set([r.item for r in session.results]))\n\n if len(files_to_load) == 0:\n raise ValueError(\"Empty clickstream sessions\")\n\n logger.info(\"Download files and prepare DataDict of ItemStorage values\")\n files_to_load: List[ItemMeta] = list(files_to_load)\n\n dataset: DatasetDict = storage_producer(\n loader.load(files_to_load), clickstream_dataset\n )\n\n return RankingData(clickstream_dataset, dataset)" }, { "identifier": "ExperimentsManager", "path": "embedding_studio/workers/fine_tuning/experiments/experiments_tracker.py", "snippet": "class ExperimentsManager:\n def __init__(\n self,\n tracking_uri: str,\n main_metric: str,\n accumulators: List[MetricsAccumulator],\n is_loss: bool = False,\n n_top_runs: int = 10,\n requirements: Optional[str] = None,\n retry_config: Optional[RetryConfig] = None,\n ):\n \"\"\"Wrapper over mlflow package to manage certain fine-tuning experiments.\n\n :param tracking_uri: url of MLFlow server\n :param main_metric: name of main metric that will be used to find best model\n :param accumulators: accumulators of metrics to be logged\n :param is_loss: is main metric loss (if True, then best quality is minimal) (default: False)\n :param n_top_runs: how many hyper params group consider to be used in following tuning steps (default: 10)\n :param requirements: extra requirements to be passed to mlflow.pytorch.log_model (default: None)\n :param retry_config: retry policy (default: None)\n \"\"\"\n if not isinstance(tracking_uri, str) or len(tracking_uri) == 0:\n raise ValueError(\n f\"MLFlow tracking URI value should be a not empty string\"\n )\n mlflow.set_tracking_uri(tracking_uri)\n self._tracking_uri = tracking_uri\n if self._tracking_uri.endswith(\"/\"):\n self._tracking_uri = self._tracking_uri[:-1]\n\n self.retry_config = (\n retry_config\n if retry_config\n else ExperimentsManager._get_default_retry_config()\n )\n self.attempt_exception_types = [RestException]\n\n if not isinstance(main_metric, str) or len(main_metric) == 0:\n raise ValueError(f\"main_metric value should be a not empty string\")\n self.main_metric = main_metric\n self._metric_field = f\"metrics.{self.main_metric}\"\n\n self._n_top_runs = n_top_runs\n self._is_loss = is_loss\n\n if len(accumulators) == 0:\n logger.warning(\n \"No accumulators were provided, there will be no metrics logged except loss\"\n )\n self._accumulators = accumulators\n\n self._requirements: List[str] = (\n _get_base_requirements() if requirements is None else requirements\n )\n\n self._iteration_experiment = None\n self._tuning_iteration = None\n self._tuning_iteration_id = None\n\n self._run = None\n self._run_params = None\n self._run_id = None\n\n def _check_artifact_exists(self, run_id, artifact_path):\n client = mlflow.MlflowClient()\n artifacts = client.list_artifacts(run_id, path=artifact_path)\n return any(artifact.path == artifact_path for artifact in artifacts)\n\n @staticmethod\n def _get_default_retry_config() -> RetryConfig:\n default_retry_params = RetryParams(\n max_attempts=settings.DEFAULT_MAX_ATTEMPTS,\n wait_time_seconds=settings.DEFAULT_WAIT_TIME_SECONDS,\n )\n\n config = RetryConfig(default_params=default_retry_params)\n config[\"log_metric\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOG_METRIC_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOG_METRIC_WAIT_TIME_SECONDS,\n )\n config[\"log_param\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOG_PARAM_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOG_PARAM_WAIT_TIME_SECONDS,\n )\n config[\"log_model\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOG_MODEL_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOG_MODEL_WAIT_TIME_SECONDS,\n )\n config[\"load_model\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOAD_MODEL_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOAD_MODEL_WAIT_TIME_SECONDS,\n )\n config[\"delete_model\"] = RetryParams(\n max_attempts=settings.MLFLOW_DELETE_MODEL_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_DELETE_MODEL_WAIT_TIME_SECONDS,\n )\n config[\"search_runs\"] = RetryParams(\n max_attempts=settings.MLFLOW_SEARCH_RUNS_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_SEARCH_RUNS_WAIT_TIME_SECONDS,\n )\n config[\"end_run\"] = RetryParams(\n max_attempts=settings.MLFLOW_END_RUN_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_END_RUN_WAIT_TIME_SECONDS,\n )\n config[\"get_run\"] = RetryParams(\n max_attempts=settings.MLFLOW_GET_RUN_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_GET_RUN_WAIT_TIME_SECONDS,\n )\n config[\"search_experiments\"] = RetryParams(\n max_attempts=settings.MLFLOW_SEARCH_EXPERIMENTS_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_SEARCH_EXPERIMENTS_WAIT_TIME_SECONDS,\n )\n config[\"delete_experiment\"] = RetryParams(\n max_attempts=settings.MLFLOW_DELETE_EXPERIMENT_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_DELETE_EXPERIMENT_WAIT_TIME_SECONDS,\n )\n config[\"create_experiment\"] = RetryParams(\n max_attempts=settings.MLFLOW_CREATE_EXPERIMENT_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_CREATE_EXPERIMENT_WAIT_TIME_SECONDS,\n )\n config[\"get_experiment\"] = RetryParams(\n max_attempts=settings.MLFLOW_GET_EXPERIMENT_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_GET_EXPERIMENT_WAIT_TIME_SECONDS,\n )\n\n return config\n\n @property\n def is_loss(self) -> bool:\n return self._is_loss\n\n def __del__(self):\n self.finish_run()\n self.finish_iteration()\n\n def is_retryable_error(self, e: Exception) -> bool:\n return False\n\n def _get_model_exists_filter(self) -> str:\n return \"metrics.model_uploaded = 1\"\n\n def _get_artifact_url(self, run_id: str, artifact_path: str) -> str:\n return (\n f\"{self._tracking_uri}/get-artifact?path=\"\n f'{urllib.parse.quote(artifact_path, safe=\"\")}&run_uuid={run_id}'\n )\n\n @retry_method(name=\"log_model\")\n def upload_initial_model(self, model: EmbeddingsModelInterface):\n \"\"\"Upload the very first, initial model to the mlflow server\n\n :param model: model to be uploaded\n \"\"\"\n self.finish_iteration()\n experiment_id = get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME)\n if experiment_id is None:\n logger.info(\n f\"Can't find any active iteration with name: {INITIAL_EXPERIMENT_NAME}\"\n )\n try:\n logger.info(\"Create initial experiment\")\n mlflow.create_experiment(INITIAL_EXPERIMENT_NAME)\n except MlflowException as e:\n if \"Cannot set a deleted experiment\" in str(e):\n logger.error(\n f\"Creation of initial experiment is failed: experiment with the same name {INITIAL_EXPERIMENT_NAME} is deleted, but not archived\"\n )\n experiments = mlflow.search_experiments(\n view_type=mlflow.entities.ViewType.ALL\n )\n deleted_experiment_id = None\n\n for exp in experiments:\n if exp.name == INITIAL_EXPERIMENT_NAME:\n deleted_experiment_id = exp.experiment_id\n break\n\n logger.info(\n f\"Restore deleted experiment with the same name: {INITIAL_EXPERIMENT_NAME}\"\n )\n mlflow.tracking.MlflowClient().restore_experiment(\n deleted_experiment_id\n )\n logger.info(\n f\"Archive deleted experiment with the same name: {INITIAL_EXPERIMENT_NAME}\"\n )\n mlflow.tracking.MlflowClient().rename_experiment(\n deleted_experiment_id,\n INITIAL_EXPERIMENT_NAME + \"_archive\",\n )\n logger.info(\n f\"Delete archived experiment with the same name: {INITIAL_EXPERIMENT_NAME}\"\n )\n mlflow.delete_experiment(deleted_experiment_id)\n logger.info(f\"Create initial experiment\")\n mlflow.create_experiment(INITIAL_EXPERIMENT_NAME)\n else:\n raise e\n\n with mlflow.start_run(\n experiment_id=get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME),\n run_name=INITIAL_RUN_NAME,\n ) as run:\n logger.info(\n f\"Upload initial model to {INITIAL_EXPERIMENT_NAME} / {INITIAL_RUN_NAME}\"\n )\n if self._check_artifact_exists(\n get_run_id_by_name(\n get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME),\n INITIAL_RUN_NAME,\n ),\n \"model\",\n ):\n logger.info(\"Model is already uploaded\")\n return\n\n mlflow.pytorch.log_model(\n model, \"model\", pip_requirements=self._requirements\n )\n logger.info(\"Uploading is finished\")\n\n @retry_method(name=\"load_model\")\n def download_initial_model(self) -> EmbeddingsModelInterface:\n \"\"\"Download initial model.\n\n :return: initial embeddings model\n \"\"\"\n model_uri: str = f\"runs:/{get_run_id_by_name(get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME), INITIAL_RUN_NAME)}/model\"\n logger.info(f\"Download the model from {model_uri}\")\n model = mlflow.pytorch.load_model(model_uri)\n logger.info(\"Downloading is finished\")\n return model\n\n @retry_method(name=\"search_runs\")\n def get_top_params(self) -> Optional[List[FineTuningParams]]:\n \"\"\"Get top N previous fine-tuning iteration best params\n\n :return: fine-tuning iteration params\n \"\"\"\n initial_id: Optional[str] = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n last_session_id: Optional[str] = self.get_previous_iteration_id()\n if initial_id == last_session_id:\n logger.warning(\n \"Can't retrieve top params, no previous iteration in history\"\n )\n return None\n\n else:\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[last_session_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs.status == \"FINISHED\"] # and only finished ones\n if runs.shape[0] == 0:\n logger.warning(\n \"Can't retrieve top params, no previous iteration's finished runs with uploaded model in history\"\n )\n return None\n\n # Get the indices that would sort the DataFrame based on the specified parameter\n sorted_indices: np.ndarray = np.argsort(\n runs[self._metric_field].values\n )\n if not self.is_loss:\n sorted_indices = sorted_indices[\n ::-1\n ] # Use [::-1] to sort in descending order\n\n # Extract the top N rows based on the sorted indices\n top_n_rows: np.ndarray = runs.iloc[\n sorted_indices[: self._n_top_runs]\n ]\n\n # Define a mapping dictionary to remove the \"params.\" prefix\n column_mapping: Dict[str, str] = {\n col: col.replace(\"params.\", \"\") for col in top_n_rows.columns\n }\n\n # Rename the columns\n top_n_rows: np.ndarray = top_n_rows.rename(\n columns=column_mapping\n ).to_dict(orient=\"records\")\n\n return [FineTuningParams(**row) for row in top_n_rows]\n\n def _get_best_previous_run_id(self) -> Tuple[Optional[str], bool]:\n initial_id: Optional[str] = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n last_session_id: Optional[str] = self.get_previous_iteration_id()\n if initial_id == last_session_id or last_session_id is None:\n return None, True\n else:\n run_id, _ = self._get_best_quality(last_session_id)\n return run_id, False\n\n def _get_best_current_run_id(self) -> Tuple[Optional[str], bool]:\n initial_id: Optional[str] = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n if (\n initial_id == self._tuning_iteration_id\n or self._tuning_iteration_id is None\n ):\n return None, True\n else:\n run_id, _ = self._get_best_quality(self._tuning_iteration_id)\n return run_id, False\n\n @retry_method(name=\"load_model\")\n def get_last_model_url(self) -> Optional[str]:\n run_id, is_initial = self._get_best_previous_run_id()\n if is_initial:\n logger.warning(\n \"Can't get the best model URL, no previous iteration in history\"\n )\n return None\n else:\n if run_id is None:\n logger.warning(\n \"Can't get the best model URL, no previous iterations \"\n \"finished runs with uploaded model in history\"\n )\n return None\n path = MODEL_ARTIFACT_PATH\n return self._get_artifact_url(run_id, path)\n\n @retry_method(name=\"load_model\")\n def get_current_model_url(self) -> Optional[str]:\n run_id, is_initial = self._get_best_current_run_id()\n if is_initial:\n logger.warning(\n \"Can't get the best model URL, current run is initial\"\n )\n return None\n\n if run_id is None:\n logger.warning(\n \"Can't get the best model URL, no iterations \"\n \"finished runs with uploaded model in history\"\n )\n return None\n path = MODEL_ARTIFACT_PATH\n return self._get_artifact_url(run_id, path)\n\n @retry_method(name=\"load_model\")\n def get_last_model(self) -> EmbeddingsModelInterface:\n \"\"\"Get previous iteration best embedding model.\n\n :return: best embedding model\n \"\"\"\n run_id, is_initial = self._get_best_previous_run_id()\n if is_initial:\n logger.warning(\n \"Download initial model, no previous iteration in history\"\n )\n return self.download_initial_model()\n\n else:\n if run_id is None:\n logger.warning(\n \"Download initial model, no previous iteration's \"\n \"finished runs with uploaded model in history\"\n )\n return self.download_initial_model()\n else:\n model_uri: str = f\"runs:/{run_id}/model\"\n logger.info(f\"Download the model from {model_uri}\")\n model = mlflow.pytorch.load_model(model_uri)\n logger.info(\"Downloading is finished\")\n return model\n\n @retry_method(name=\"load_model\")\n def get_current_model(self) -> Optional[EmbeddingsModelInterface]:\n \"\"\"Get current iteration best embedding model.\n\n :return: best embedding model\n \"\"\"\n if self._tuning_iteration is None:\n logger.error(\"No current iteration, can't get any model\")\n return\n\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n logger.info(\"Download initial model\")\n return self.download_initial_model()\n\n run_id, is_initial = self._get_best_current_run_id()\n model_uri: str = f\"runs:/{run_id}/model\"\n logger.info(f\"Download the model from {model_uri}\")\n model = mlflow.pytorch.load_model(model_uri)\n logger.info(\"Downloading is finished\")\n return model\n\n @retry_method(name=\"search_experiments\")\n def get_previous_iteration_id(self) -> Optional[str]:\n if (\n self._tuning_iteration == INITIAL_EXPERIMENT_NAME\n or self._tuning_iteration is None\n ):\n logger.warning(\n f\"Can't find previous iteration - no current iteration was setup\"\n )\n return None\n\n plugin_name = f\"{self._tuning_iteration.plugin_name}\"\n experiments: List[Experiment] = [\n e\n for e in mlflow.search_experiments()\n if (\n e.name.startswith(EXPERIMENT_PREFIX)\n and e.name.find(plugin_name) != -1\n and e.name != str(self._tuning_iteration)\n )\n ]\n if len(experiments) == 0:\n logger.warning(\"No iteration found\")\n return None\n else:\n return max(\n experiments, key=lambda exp: exp.creation_time\n ).experiment_id\n\n @retry_method(name=\"delete_experiment\")\n def delete_previous_iteration(self):\n experiment_id: Optional[str] = self.get_previous_iteration_id()\n\n logger.info(\"Delete models of previous iteration.\")\n runs = mlflow.search_runs(\n experiment_ids=[experiment_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs.status == \"FINISHED\"]\n run_ids = runs[\"run_id\"].tolist()\n\n for run_id in run_ids:\n self.delete_model(run_id, experiment_id)\n\n if experiment_id is not None:\n logger.info(\n f\"Iteration with ID {experiment_id} is going to be deleted\"\n )\n mlflow.tracking.MlflowClient().rename_experiment(\n experiment_id, INITIAL_EXPERIMENT_NAME + \"_archive\"\n )\n mlflow.delete_experiment(experiment_id)\n else:\n logger.warning(\n \"Can't delete a previous iteration, no previous iteration in history\"\n )\n\n @retry_method(name=\"create_experiment\")\n def set_iteration(self, iteration: FineTuningIteration):\n \"\"\"Start a new fine-tuning session.\n\n :param iteration: fine-tuning iteration info\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n self.finish_iteration()\n\n logger.info(\"Start a new fine-tuning iterations\")\n\n self._tuning_iteration = iteration\n self._tuning_iteration_id = get_experiment_id_by_name(str(iteration))\n if self._tuning_iteration_id is None:\n self._tuning_iteration_id = mlflow.create_experiment(\n str(iteration)\n )\n\n self._iteration_experiment = mlflow.set_experiment(\n experiment_id=self._tuning_iteration_id\n )\n\n @retry_method(name=\"start_run\")\n def set_run(self, params: FineTuningParams) -> bool:\n \"\"\"Start a new run with provided fine-tuning params\n\n :param params: provided fine-tuning params\n :return: True if it's a finished run (otherwise False)\n \"\"\"\n convert_value = (\n lambda value: \", \".join(map(str, value))\n if isinstance(value, list)\n else value\n )\n\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n # TODO: implement exception\n raise ValueError(\"You can't start run for initial iteration\")\n\n if self._run is not None:\n self.finish_run()\n\n logger.info(\n f\"Start a new run for iteration {self._tuning_iteration_id} with params:\\n\\t{str(params)}\"\n )\n\n self._run_params = params\n run_name: str = self._run_params.id\n self._run_id = get_run_id_by_name(self._tuning_iteration_id, run_name)\n\n self._run = mlflow.start_run(\n self._run_id, self._tuning_iteration_id, run_name\n )\n if self._run_id is None:\n self._run_id = self._run.info.run_id\n for key, value in dict(self._tuning_iteration).items():\n mlflow.log_param(key, convert_value(value))\n\n for key, value in dict(self._run_params).items():\n mlflow.log_param(key, convert_value(value))\n\n mlflow.log_metric(\"model_uploaded\", 0)\n\n return False\n else:\n return self._run.info.status == \"FINISHED\"\n\n @retry_method(name=\"search_runs\")\n def model_is_uploaded(self) -> bool:\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[self._tuning_iteration_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs[\"run_id\"] == self._run_id]\n return runs.shape[0] > 0\n\n @retry_method(name=\"get_experiment\")\n def finish_iteration(self):\n logger.info(f\"Finish current iteration {self._tuning_iteration_id}\")\n self._tuning_iteration = INITIAL_EXPERIMENT_NAME\n self._tuning_iteration_id = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n\n if self._tuning_iteration_id is None:\n self._iteration_experiment = mlflow.set_experiment(\n experiment_name=INITIAL_EXPERIMENT_NAME\n )\n self._tuning_iteration_id = (\n self._iteration_experiment.experiment_id\n )\n else:\n self._iteration_experiment = mlflow.set_experiment(\n experiment_id=self._tuning_iteration_id\n )\n\n logger.info(f\"Current iteration is finished\")\n\n @retry_method(name=\"end_run\")\n def finish_run(self):\n logger.info(\n f\"Finish current run {self._tuning_iteration_id} / {self._run_id}\"\n )\n for accumulator in self._accumulators:\n accumulator.clear()\n\n mlflow.end_run()\n\n # Set params to default None\n self._run = None\n self._run_params = None\n self._run_id = None\n\n logger.info(f\"Current run is finished\")\n\n @retry_method(name=\"log_param\")\n def _set_model_as_deleted(self, run_id: str, experiment_id: str):\n with mlflow.start_run(\n run_id=run_id, experiment_id=experiment_id\n ) as run:\n mlflow.log_metric(\"model_deleted\", 1)\n mlflow.log_metric(\"model_uploaded\", 0)\n\n @retry_method(name=\"delete_model\")\n def _delete_model(self, run_id: str, experiment_id: str) -> bool:\n logger.warning(\n f\"Unable to delete a model for run {run_id}, MLFlow has no such functionality, please implement on your own.\"\n )\n return False\n\n @retry_method(name=\"get_run\")\n def delete_model(self, run_id: str, experiment_id: Optional[str] = None):\n experiment_id = (\n self._tuning_iteration_id\n if experiment_id is None\n else experiment_id\n )\n if experiment_id is None:\n raise ValueError(\n f\"No iteration was initialized, unable to delete model.\"\n )\n\n if experiment_id == INITIAL_EXPERIMENT_NAME:\n raise ValueError(f\"Initial model can't be deleted.\")\n\n run_info = None\n try:\n run_info = mlflow.get_run(run_id=run_id)\n except RestException as e:\n if e.get_http_status_code() == 404:\n logger.exception(f\"Run with ID {run_id} doesn't exist.\")\n else:\n raise e\n\n if run_info is not None:\n runs: pd.DataFrame = mlflow.search_runs(\n filter_string=self._get_model_exists_filter()\n )\n runs = runs[runs[\"run_id\"] == run_id]\n if runs.shape[0] == 0:\n logger.warning(\n f\"Run {run_id} has no model being uploaded. Nothing to delete\"\n )\n\n else:\n deleted = None\n try:\n deleted = self._delete_model(run_id, experiment_id)\n except MaxAttemptsReachedException:\n pass\n\n if deleted:\n self._set_model_as_deleted(run_id, experiment_id)\n\n @retry_method(name=\"log_model\")\n def save_model(\n self, model: EmbeddingsModelInterface, best_only: bool = True\n ):\n \"\"\"Save fine-tuned embedding model\n\n :param model: model to be saved\n :param best_only: save only if it's the best (default: True)\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n raise ValueError(\n f\"Can't save not initial model for {INITIAL_EXPERIMENT_NAME} experiment\"\n )\n\n if self._run_id is None:\n raise ValueError(\"There is no current Run\")\n\n logger.info(\n f\"Save model for {self._tuning_iteration_id} / {self._run_id}\"\n )\n if not best_only:\n mlflow.pytorch.log_model(\n model, \"model\", pip_requirements=self._requirements\n )\n mlflow.log_metric(\"model_uploaded\", 1)\n logger.info(\"Upload is finished\")\n else:\n current_quality = self.get_quality()\n best_run_id, best_quality = self.get_best_quality()\n\n if best_run_id is None or (\n current_quality <= best_quality\n if self.is_loss\n else current_quality >= best_quality\n ):\n mlflow.pytorch.log_model(\n model, \"model\", pip_requirements=self._requirements\n )\n mlflow.log_metric(\"model_uploaded\", 1)\n logger.info(\"Upload is finished\")\n\n if best_run_id is not None:\n self.delete_model(best_run_id)\n else:\n logger.info(\"Not the best run - ignore saving\")\n\n @retry_method(name=\"log_metric\")\n def save_metric(self, metric_value: MetricValue):\n \"\"\"Accumulate and save metric value\n\n :param metric_value: value to be logged\n \"\"\"\n for accumulator in self._accumulators:\n for name, value in accumulator.accumulate(metric_value):\n mlflow.log_metric(name, value)\n\n @retry_method(name=\"search_runs\")\n def get_quality(self) -> float:\n \"\"\"Current run quality value\n\n :return: quality value\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n raise ValueError(\n f\"No metrics for {INITIAL_EXPERIMENT_NAME} experiment\"\n )\n\n if self._run_id is None:\n raise ValueError(\"There is no current Run\")\n\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[self._tuning_iteration_id]\n )\n quality: np.ndarray = runs[runs.run_id == self._run_id][\n self._metric_field\n ]\n return float(quality) if quality.shape[0] == 1 else float(quality[0])\n\n @retry_method(name=\"search_runs\")\n def _get_best_quality(\n self, experiment_id: str\n ) -> Tuple[Optional[str], float]:\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[experiment_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs.status == \"FINISHED\"] # and not finished ones\n if runs.shape[0] == 0:\n logger.warning(\n \"No finished experiments found with model uploaded, except initial\"\n )\n return None, 0.0\n\n else:\n value: float = (\n runs[self._metric_field].min()\n if self.is_loss\n else runs[self._metric_field].max()\n )\n best: pd.DataFrame = runs[runs[self._metric_field] == value][\n [\"run_id\", self._metric_field]\n ]\n return list(best.itertuples(index=False, name=None))[0]\n\n def get_best_quality(self) -> Tuple[str, float]:\n \"\"\"Get current fine-tuning iteration best quality\n\n :return: run_id and best metric value\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n raise ValueError(\n f\"No metrics for {INITIAL_EXPERIMENT_NAME} experiment\"\n )\n\n return self._get_best_quality(self._tuning_iteration_id)" }, { "identifier": "FineTuningSettings", "path": "embedding_studio/workers/fine_tuning/experiments/finetuning_settings.py", "snippet": "class FineTuningSettings(BaseModel):\n \"\"\"\n\n :param loss_func: loss object for a ranking task\n :param metric_calculators: list of trackable metrics calculators (default: None)\n by default only DistanceShift metric\n :param ranker: ranking function (query, items) -> ranks (defult: cosine similarity)\n :param is_similarity: is ranking function similarity like or distance (default: True)\n :param confidence_calculator: function to calculate results confidences (default: dummy_confidences)\n :param step_size: optimizer steps (default: 500)\n :param gamma: optimizers gamma (default: 0.9)\n :param num_epochs: num of training epochs (default: 10)\n :param batch_size: count of sessions in a batch (default: 1)\n :param test_each_n_sessions: frequency of validation, if value in range [0, 1] - used as ratio (default: -1)\n \"\"\"\n\n loss_func: RankingLossInterface\n metric_calculators: Optional[List[MetricCalculator]] = None\n ranker: Optional[\n Callable[[FloatTensor, FloatTensor], FloatTensor]\n ] = COSINE_SIMILARITY\n is_similarity: Optional[bool] = True\n confidence_calculator: Optional[Callable] = dummy_confidences\n step_size: Optional[int] = 500\n gamma: Optional[float] = 0.9\n num_epochs: Optional[int] = 10\n batch_size: Optional[int] = 1\n test_each_n_sessions: Optional[Union[float, int]] = -1\n\n class Config:\n arbitrary_types_allowed = True" }, { "identifier": "INITIAL_PARAMS", "path": "embedding_studio/workers/fine_tuning/experiments/initial_params/clip.py", "snippet": "INITIAL_PARAMS: Dict[str, List[Union[int, float]]] = {\n \"num_fixed_layers\": [5, 6, 7, 8],\n \"query_lr\": [1e-4, 5e-5, 1e-5, 5e-6, 1e-6, 5e-7],\n \"items_lr\": [1e-4, 5e-5, 1e-5, 5e-6, 1e-6, 5e-7],\n \"query_weight_decay\": [0.0, 1e-6, 1e-5, 1e-4],\n \"items_weight_decay\": [0.0, 1e-6, 1e-5, 1e-4],\n \"margin\": [0.01, 0.025, 0.05],\n}" }, { "identifier": "MetricsAccumulator", "path": "embedding_studio/workers/fine_tuning/experiments/metrics_accumulator.py", "snippet": "class MetricsAccumulator:\n def __init__(\n self,\n name: str,\n calc_mean: bool = False,\n calc_sliding: bool = False,\n calc_min: bool = False,\n calc_max: bool = False,\n window_size: int = 10,\n ):\n \"\"\"Accumulator of metric values + calculator of aggregations like mean, max, min, sliding_mean.\n\n :param name: metric name (metrics with other name will be ignored)\n :param calc_mean: should accumulator calculate mean value (default: False)\n :param calc_sliding: should accumulator calculate sliding mean value (default: False)\n :param calc_min: should accumulator calculate min value (default: False)\n :param calc_max: should accumulator calculate max value (default: False)\n :param window_size: size of sliding window (default: 10)\n \"\"\"\n if not isinstance(name, str) or len(name) == 0:\n raise ValueError(\"MetricsAccumulator's name should not be empty\")\n\n self._name = name\n\n if not isinstance(calc_mean, bool):\n raise ValueError(\"calc_mean value should be bool\")\n self._calc_mean = calc_mean\n\n if not isinstance(calc_sliding, bool):\n raise ValueError(\"calc_sliding value should be bool\")\n self._calc_sliding = calc_sliding\n\n if not isinstance(calc_min, bool):\n raise ValueError(\"calc_min value should be bool\")\n self._calc_min = calc_min\n\n if not isinstance(calc_max, bool):\n raise ValueError(\"calc_max value should be bool\")\n self._calc_max = calc_max\n\n if not isinstance(window_size, int) or window_size <= 1:\n raise ValueError(\n \"window_size value should be integer with value more than 1\"\n )\n\n self._window_size = window_size\n self._values = []\n\n @property\n def name(self) -> str:\n return self._name\n\n def clear(self):\n \"\"\"Clear accumulator\"\"\"\n self._values = []\n\n def accumulate(self, value: MetricValue) -> List[Tuple[str, float]]:\n \"\"\"Add metric value to an accumulator.\n\n :param value: metric to be accumulated\n :return: aggregations\n \"\"\"\n if self.name == value.name:\n self._values.append(value.value)\n\n return self.aggregate()\n\n return []\n\n def aggregate(self) -> List[Tuple[str, float]]:\n \"\"\"Aggregate accumulated metrics\n\n :return: metric aggregations (last, mean, sliding, min, max)\n \"\"\"\n aggregations: List[Tuple[str, float]] = []\n if len(self._values) > 0:\n aggregations.append((self.name, self._values[-1]))\n if self._calc_mean:\n aggregations.append(\n (f\"mean_{self.name}\", float(np.mean(self._values)))\n )\n\n if self._calc_sliding:\n slide_value = float(\n np.mean(self._values)\n if len(self._values) < self._window_size\n else np.mean(self._values[-self._window_size :])\n )\n aggregations.append((f\"sliding_{self.name}\", slide_value))\n\n if self._calc_min:\n aggregations.append((f\"min_{self.name}\", np.min(self._values)))\n\n if self._calc_max:\n aggregations.append((f\"max_{self.name}\", np.max(self._values)))\n\n return aggregations" } ]
from typing import List from sentence_transformers import SentenceTransformer from embedding_studio.core.config import settings from embedding_studio.core.plugin import FineTuningMethod from embedding_studio.embeddings.data.clickstream.parsers.s3_parser import ( AWSS3ClickstreamParser, ) from embedding_studio.embeddings.data.clickstream.search_event import ( DummyEventType, SearchResult, ) from embedding_studio.embeddings.data.clickstream.splitter import ( ClickstreamSessionsSplitter, ) from embedding_studio.embeddings.data.clickstream.text_query_item import ( TextQueryItem, ) from embedding_studio.embeddings.data.clickstream.text_query_retriever import ( TextQueryRetriever, ) from embedding_studio.embeddings.data.loaders.s3.s3_loader import ( AWSS3DataLoader, ) from embedding_studio.embeddings.data.storages.producers.clip import ( CLIPItemStorageProducer, ) from embedding_studio.embeddings.data.utils.fields_normalizer import ( DatasetFieldsNormalizer, ) from embedding_studio.embeddings.losses.prob_cosine_margin_ranking_loss import ( CosineProbMarginRankingLoss, ) from embedding_studio.embeddings.models.text_to_image.clip import ( TextToImageCLIPModel, ) from embedding_studio.models.clickstream.sessions import SessionWithEvents from embedding_studio.models.plugin import FineTuningBuilder, PluginMeta from embedding_studio.workers.fine_tuning.data.prepare_data import prepare_data from embedding_studio.workers.fine_tuning.experiments.experiments_tracker import ( ExperimentsManager, ) from embedding_studio.workers.fine_tuning.experiments.finetuning_settings import ( FineTuningSettings, ) from embedding_studio.workers.fine_tuning.experiments.initial_params.clip import ( INITIAL_PARAMS, ) from embedding_studio.workers.fine_tuning.experiments.metrics_accumulator import ( MetricsAccumulator, )
16,533
class DefaultFineTuningMethod(FineTuningMethod): meta = PluginMeta( name="Default Fine Tuning Method", version="0.0.1", description="A default fine-tuning plugin", ) def __init__(self): # uncomment and pass your credentials to use your own s3 bucket # creds = { # "role_arn": "arn:aws:iam::123456789012:role/some_data" # "aws_access_key_id": "TESTACCESSKEIDTEST11", # "aws_secret_access_key": "QWERTY1232qdsadfasfg5349BBdf30ekp23odk03", # } # self.data_loader = AWSS3DataLoader(**creds) # with empty creds, use anonymous session creds = { } self.data_loader = AWSS3DataLoader(**creds) self.retriever = TextQueryRetriever() self.parser = AWSS3ClickstreamParser(
class DefaultFineTuningMethod(FineTuningMethod): meta = PluginMeta( name="Default Fine Tuning Method", version="0.0.1", description="A default fine-tuning plugin", ) def __init__(self): # uncomment and pass your credentials to use your own s3 bucket # creds = { # "role_arn": "arn:aws:iam::123456789012:role/some_data" # "aws_access_key_id": "TESTACCESSKEIDTEST11", # "aws_secret_access_key": "QWERTY1232qdsadfasfg5349BBdf30ekp23odk03", # } # self.data_loader = AWSS3DataLoader(**creds) # with empty creds, use anonymous session creds = { } self.data_loader = AWSS3DataLoader(**creds) self.retriever = TextQueryRetriever() self.parser = AWSS3ClickstreamParser(
TextQueryItem, SearchResult, DummyEventType
4
2023-10-31 00:33:13+00:00
24k
facebookresearch/minimax
src/minimax/runners/xp_runner.py
[ { "identifier": "EvalRunner", "path": "src/minimax/runners/eval_runner.py", "snippet": "class EvalRunner:\n def __init__(\n self,\n pop,\n env_names,\n env_kwargs=None,\n n_episodes=10,\n agent_idxs='*',\n render_mode=None):\n\n self.pop = pop\n\n if isinstance(agent_idxs, str):\n if \"*\" in agent_idxs:\n self.agent_idxs = np.arange(pop.n_agents)\n else:\n self.agent_idxs = \\\n np.array([int(x) for x in agent_idxs.split(',')])\n else:\n self.agent_idxs = agent_idxs # assume array\n\n assert np.max(self.agent_idxs) < pop.n_agents, \\\n 'Agent index is out of bounds.'\n\n if isinstance(env_names, str):\n env_names = [\n x.strip() for x in env_names.split(',')\n ]\n\n self.n_episodes = n_episodes\n env_infos = create_envs_for_kwargs(env_names, env_kwargs)\n env_names = []\n self.ext_env_names = []\n env_kwargs = []\n for (name, ext_name, kwargs) in env_infos:\n env_names.append(name)\n self.ext_env_names.append(ext_name)\n env_kwargs.append(kwargs)\n self.n_envs = len(env_names)\n\n self.benvs = []\n self.env_params = []\n self.env_has_solved_rate = []\n for env_name, kwargs in zip(env_names, env_kwargs):\n benv = envs.BatchEnv(\n env_name=env_name,\n n_parallel=n_episodes,\n n_eval=1,\n env_kwargs=kwargs,\n wrappers=['monitor_return', 'monitor_ep_metrics']\n )\n self.benvs.append(benv)\n self.env_params.append(benv.env.params)\n self.env_has_solved_rate.append(benv.env.eval_solved_rate is not None)\n\n self.action_dtype = self.benvs[0].env.action_space().dtype\n\n monitored_metrics = self.benvs[0].env.get_monitored_metrics()\n self.rolling_stats = RollingStats(names=monitored_metrics, window=1)\n self._update_ep_stats = jax.vmap(\n jax.vmap(\n self.rolling_stats.update_stats, in_axes=(0,0,0,None)),\n in_axes=(0,0,0,None))\n\n self.test_return_pre = 'test_return'\n self.test_solved_rate_pre = 'test_solved_rate'\n\n self.render_mode = render_mode\n if render_mode:\n from minimax.envs.viz.grid_viz import GridVisualizer\n self.viz = GridVisualizer()\n self.viz.show()\n\n if render_mode == 'ipython':\n from IPython import display\n self.ipython_display = display\n\n def load_checkpoint_state(self, runner_state, state):\n runner_state = list(runner_state)\n runner_state[1] = runner_state[1].load_state_dict(state[1])\n\n return tuple(runner_state)\n\n @partial(jax.jit, static_argnums=(0,2))\n def _get_transition(\n self,\n rng,\n benv,\n params,\n state,\n obs,\n carry,\n zero_carry,\n extra):\n value, pi_params, next_carry = self.pop.act(params, obs, carry)\n pi = self.pop.get_action_dist(pi_params, dtype=self.action_dtype)\n rng, subrng = jax.random.split(rng)\n action = pi.sample(seed=subrng)\n log_pi = pi.log_prob(action)\n\n rng, *vrngs = jax.random.split(rng, self.pop.n_agents+1)\n\n step_args = (jnp.array(vrngs), state, action, extra)\n (next_obs, \n next_state, \n reward, \n done, \n info, \n extra) = benv.step(*step_args)\n\n # Add transition to storage\n step = (obs, action, reward, done, log_pi, value)\n if carry is not None:\n step += (carry,)\n\n # Zero carry if needed\n if carry is not None:\n next_carry = jax.vmap(_tree_util.pytree_select)(\n done, zero_carry, next_carry)\n\n if self.render_mode:\n self.viz.render(\n benv.env.params, \n jax.tree_util.tree_map(lambda x: x[0][0], state))\n if self.render_mode == 'ipython':\n self.ipython_display.display(self.viz.window.fig)\n self.ipython_display.clear_output(wait=True)\n\n return next_state, next_obs, next_carry, done, info, extra\n\n @partial(jax.jit, static_argnums=(0, 2))\n def _rollout_benv(\n self, \n rng, \n benv,\n params,\n env_params,\n state,\n obs,\n carry,\n zero_carry,\n extra,\n ep_stats):\n\n def _scan_rollout(scan_carry, rng):\n (state, \n obs, \n carry,\n extra, \n ep_stats) = scan_carry\n \n step = \\\n self._get_transition(\n rng,\n benv,\n params, \n state, \n obs, \n carry, \n zero_carry,\n extra)\n\n (next_state, \n next_obs, \n next_carry, \n done, \n info, \n extra) = step\n\n ep_stats = self._update_ep_stats(ep_stats, done, info, 1)\n\n return (next_state, next_obs, next_carry, extra, ep_stats), None\n\n n_steps = benv.env.max_episode_steps()\n rngs = jax.random.split(rng, n_steps)\n (state, \n obs, \n carry, \n extra,\n ep_stats),_ = jax.lax.scan(\n _scan_rollout,\n (state, obs, carry, extra, ep_stats),\n rngs,\n length=n_steps)\n\n return ep_stats\n\n @partial(jax.jit, static_argnums=(0,))\n def run(self, rng, params):\n \"\"\"\n Rollout agents on each env. \n\n For each env, run n_eval episodes in parallel, \n where each is indexed to return in order.\n \"\"\"\n eval_stats = self.fake_run(rng, params)\n rng, *rollout_rngs = jax.random.split(rng, self.n_envs+1)\n for i, (benv, env_param) in enumerate(zip(self.benvs, self.env_params)):\n rng, *reset_rngs = jax.random.split(rng, self.pop.n_agents+1)\n obs, state, extra = benv.reset(jnp.array(reset_rngs))\n\n if self.pop.agent.is_recurrent:\n rng, subrng = jax.random.split(rng)\n zero_carry = self.pop.init_carry(subrng, obs)\n else:\n zero_carry = None\n\n # Reset episodic stats\n ep_stats = self.rolling_stats.reset_stats(\n batch_shape=(self.pop.n_agents, self.n_episodes))\n\n ep_stats = self._rollout_benv(\n rollout_rngs[i],\n benv,\n jax.lax.stop_gradient(params), \n env_param, \n state, \n obs,\n zero_carry,\n zero_carry,\n extra,\n ep_stats)\n \n env_name = self.ext_env_names[i]\n mean_return = ep_stats['return'].mean(1)\n\n if self.env_has_solved_rate[i]:\n mean_solved_rate = jax.vmap(jax.vmap(benv.env.eval_solved_rate))(ep_stats).mean(1)\n\n for idx in self.agent_idxs:\n eval_stats[f'eval/a{idx}:{self.test_return_pre}:{env_name}'] = mean_return[idx].squeeze()\n if self.env_has_solved_rate[i]:\n eval_stats[f'eval/a{idx}:{self.test_solved_rate_pre}:{env_name}'] = mean_solved_rate[idx].squeeze()\n\n return eval_stats\n\n def fake_run(self, rng, params):\n eval_stats = {}\n for i, env_name in enumerate(self.ext_env_names):\n for idx in self.agent_idxs:\n eval_stats.update({\n f'eval/a{idx}:{self.test_return_pre}:{env_name}':0.\n })\n if self.env_has_solved_rate[i]:\n eval_stats.update({\n f'eval/a{idx}:{self.test_solved_rate_pre}:{env_name}':0.,\n })\n\n return eval_stats" }, { "identifier": "DRRunner", "path": "src/minimax/runners/dr_runner.py", "snippet": "class DRRunner:\n\t\"\"\"\n\tOrchestrates rollouts across one or more students. \n\tThe main components at play:\n\t- AgentPop: Manages train state and batched inference logic \n\t\tfor a population of agents.\n\t- BatchEnv: Manages environment step and reset logic, using a \n\t\tpopulaton of agents.\n\t- RolloutStorage: Manages the storing and sampling of collected txns.\n\t- PPO: Handles PPO updates, which take a train state + batch of txns.\n\t\"\"\"\n\tdef __init__(\n\t\tself, \n\t\tenv_name,\n\t\tenv_kwargs,\n\t\tstudent_agents,\n\t\tn_students=1,\n\t\tn_parallel=1,\n\t\tn_eval=1,\n\t\tn_rollout_steps=256,\n\t\tlr=1e-4,\n\t\tlr_final=None,\n\t\tlr_anneal_steps=0,\n\t\tmax_grad_norm=0.5,\n\t\tdiscount=0.99,\n\t\tgae_lambda=0.95,\n\t\tadam_eps=1e-5,\n\t\tnormalize_return=False,\n\t\ttrack_env_metrics=False,\n\t\tn_unroll_rollout=1,\n\t\tn_devices=1,\n\t\trender=False):\n\n\t\tassert len(student_agents) == 1, 'Only one type of student supported.'\n\t\tassert n_parallel % n_devices == 0, 'Num envs must be divisible by num devices.'\n\n\t\tself.n_students = n_students\n\t\tself.n_parallel = n_parallel // n_devices\n\t\tself.n_eval = n_eval\n\t\tself.n_devices = n_devices\n\t\tself.step_batch_size = n_students*n_eval*n_parallel\n\t\tself.n_rollout_steps = n_rollout_steps\n\t\tself.n_updates = 0\n\t\tself.lr = lr\n\t\tself.lr_final = lr if lr_final is None else lr_final\n\t\tself.lr_anneal_steps = lr_anneal_steps\n\t\tself.max_grad_norm = max_grad_norm\n\t\tself.adam_eps = adam_eps\n\t\tself.normalize_return = normalize_return\n\t\tself.track_env_metrics = track_env_metrics\n\t\tself.n_unroll_rollout = n_unroll_rollout\n\t\tself.render = render\n\n\t\tself.student_pop = AgentPop(student_agents[0], n_agents=n_students)\n\n\t\tself.env, self.env_params = envs.make(\n\t\t\tenv_name, \n\t\t\tenv_kwargs=env_kwargs\n\t\t)\n\t\tself._action_shape = self.env.action_space().shape\n\n\t\tself.benv = envs.BatchEnv(\n\t\t\tenv_name=env_name,\n\t\t\tn_parallel=self.n_parallel,\n\t\t\tn_eval=self.n_eval,\n\t\t\tenv_kwargs=env_kwargs,\n\t\t\twrappers=['monitor_return', 'monitor_ep_metrics']\n\t\t)\n\t\tself.action_dtype = self.benv.env.action_space().dtype\n\n\t\tself.student_rollout = RolloutStorage(\n\t\t\tdiscount=discount,\n\t\t\tgae_lambda=gae_lambda,\n\t\t\tn_steps=n_rollout_steps,\n\t\t\tn_agents=n_students,\n\t\t\tn_envs=self.n_parallel,\n\t\t\tn_eval=self.n_eval,\n\t\t\taction_space=self.env.action_space(),\n\t\t\tobs_space=self.env.observation_space(),\n\t\t\tagent=self.student_pop.agent,\n\t\t)\n\n\t\tmonitored_metrics = self.benv.env.get_monitored_metrics()\n\t\tself.rolling_stats = RollingStats(\n\t\t\tnames=monitored_metrics,\n\t\t\twindow=10,\n\t\t)\n\t\tself._update_ep_stats = jax.vmap(jax.vmap(self.rolling_stats.update_stats))\n\n\t\tif self.render:\n\t\t\tfrom envs.viz.grid_viz import GridVisualizer\n\t\t\tself.viz = GridVisualizer()\n\t\t\tself.viz.show()\n\n\tdef reset(self, rng):\n\t\tself.n_updates = 0\n\n\t\tn_parallel = self.n_parallel*self.n_devices\n\n\t\trngs, *vrngs = jax.random.split(rng, self.n_students+1)\n\t\tobs, state, extra = self.benv.reset(jnp.array(vrngs), n_parallel=n_parallel)\n\t\tdummy_obs = jax.tree_util.tree_map(lambda x: x[0], obs) # for one agent only\n\n\t\trng, subrng = jax.random.split(rng)\n\t\tif self.student_pop.agent.is_recurrent:\n\t\t\tcarry = self.student_pop.init_carry(subrng, obs)\n\t\t\tself.zero_carry = jax.tree_map(lambda x: x.at[:,:self.n_parallel].get(), carry)\n\t\telse:\n\t\t\tcarry = None\n\n\t\trng, subrng = jax.random.split(rng)\n\t\tparams = self.student_pop.init_params(subrng, dummy_obs)\n\n\t\tschedule_fn = optax.linear_schedule(\n\t\t\tinit_value=-float(self.lr),\n\t\t\tend_value=-float(self.lr_final),\n\t\t\ttransition_steps=self.lr_anneal_steps,\n\t\t)\n\n\t\ttx = optax.chain(\n\t\t\toptax.clip_by_global_norm(self.max_grad_norm),\n\t\t\toptax.adam(learning_rate=float(self.lr), eps=self.adam_eps)\n\t\t)\n\n\t\ttrain_state = VmapTrainState.create(\n\t\t\tapply_fn=self.student_pop.agent.evaluate,\n\t\t\tparams=params,\n\t\t\ttx=tx\n\t\t)\n\n\t\tep_stats = self.rolling_stats.reset_stats(\n\t\t\tbatch_shape=(self.n_students, n_parallel*self.n_eval))\n\n\t\tstart_state = state\n\n\t\treturn (\n\t\t\trng, \n\t\t\ttrain_state, \n\t\t\tstate,\n\t\t\tstart_state, # Used to track metrics from starting state\n\t\t\tobs, \n\t\t\tcarry, \n\t\t\textra, \n\t\t\tep_stats\n\t\t)\n\n\tdef get_checkpoint_state(self, state):\n\t\t_state = list(state)\n\t\t_state[1] = state[1].state_dict\n\n\t\treturn _state\n\n\tdef load_checkpoint_state(self, runner_state, state):\n\t\trunner_state = list(runner_state)\n\t\trunner_state[1] = runner_state[1].load_state_dict(state[1])\n\n\t\treturn tuple(runner_state)\n\n\t@partial(jax.jit, static_argnums=(0,2))\n\tdef _get_transition(\n\t\tself, \n\t\trng, \n\t\tpop, \n\t\tparams, \n\t\trollout, \n\t\tstate, \n\t\tstart_state, \n\t\tobs, \n\t\tcarry, \n\t\tdone,\n\t\textra=None):\n\t\t# Sample action\n\t\tvalue, pi_params, next_carry = pop.act(params, obs, carry, done)\n\n\t\tpi = pop.get_action_dist(pi_params, dtype=self.action_dtype)\n\t\trng, subrng = jax.random.split(rng)\n\t\taction = pi.sample(seed=subrng)\n\t\tlog_pi = pi.log_prob(action)\n\n\t\trng, *vrngs = jax.random.split(rng, self.n_students+1)\n\t\t(next_obs, \n\t\t next_state, \n\t\t reward, \n\t\t done, \n\t\t info, \n\t\t extra) = self.benv.step(jnp.array(vrngs), state, action, extra)\n\n\t\tnext_start_state = jax.vmap(_tree_util.pytree_select)(\n\t\t\tdone, next_state, start_state\n\t\t)\n\n\t\t# Add transition to storage\n\t\tstep = (obs, action, reward, done, log_pi, value)\n\t\tif carry is not None:\n\t\t\tstep += (carry,)\n\n\t\trollout = self.student_rollout.append(rollout, *step)\n\n\t\tif self.render:\n\t\t\tself.viz.render(\n\t\t\t\tself.benv.env.params, \n\t\t\t\tjax.tree_util.tree_map(lambda x: x[0][0], state))\n\n\t\treturn (\n\t\t\trollout, \n\t\t\tnext_state,\n\t\t\tnext_start_state, \n\t\t\tnext_obs, \n\t\t\tnext_carry, \n\t\t\tdone, \n\t\t\tinfo, \n\t\t\textra\n\t\t)\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef _rollout_students(\n\t\tself, \n\t\trng, \n\t\ttrain_state, \n\t\tstate, \n\t\tstart_state, \n\t\tobs, \n\t\tcarry, \n\t\tdone,\n\t\textra=None, \n\t\tep_stats=None):\n\t\trollout = self.student_rollout.reset()\n\n\t\trngs = jax.random.split(rng, self.n_rollout_steps)\n\n\t\tdef _scan_rollout(scan_carry, rng):\n\t\t\trollout, state, start_state, obs, carry, done, extra, ep_stats, train_state = scan_carry \n\n\t\t\tnext_scan_carry = \\\n\t\t\t\tself._get_transition(\n\t\t\t\t\trng, \n\t\t\t\t\tself.student_pop, \n\t\t\t\t\tjax.lax.stop_gradient(train_state.params), \n\t\t\t\t\trollout, \n\t\t\t\t\tstate,\n\t\t\t\t\tstart_state, \n\t\t\t\t\tobs, \n\t\t\t\t\tcarry,\n\t\t\t\t\tdone, \n\t\t\t\t\textra)\n\t\t\t(rollout, \n\t\t\t next_state,\n\t\t\t next_start_state, \n\t\t\t next_obs, \n\t\t\t next_carry, \n\t\t\t done, \n\t\t\t info, \n\t\t\t extra) = next_scan_carry\n\n\t\t\tep_stats = self._update_ep_stats(ep_stats, done, info)\n\n\t\t\treturn (\n\t\t\t\trollout, \n\t\t\t\tnext_state,\n\t\t\t\tnext_start_state,\n\t\t\t\tnext_obs, \n\t\t\t\tnext_carry,\n\t\t\t\tdone,\n\t\t\t\textra, \n\t\t\t\tep_stats,\n\t\t\t\ttrain_state), None\n\n\t\t(rollout, \n\t\t state, \n\t\t start_state, \n\t\t obs, \n\t\t carry, \n\t\t done,\n\t\t extra, \n\t\t ep_stats,\n\t\t train_state), _ = jax.lax.scan(\n\t\t\t_scan_rollout,\n\t\t\t(rollout, \n\t\t\t state, \n\t\t\t start_state,\n\t\t\t obs, \n\t\t\t carry, \n\t\t\t done,\n\t\t\t extra, \n\t\t\t ep_stats,\n\t\t\t train_state),\n\t\t\trngs,\n\t\t\tlength=self.n_rollout_steps,\n\t\t)\n\n\t\treturn rollout, state, start_state, obs, carry, extra, ep_stats, train_state\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef _compile_stats(self, update_stats, ep_stats, env_metrics=None):\n\t\tstats = jax.vmap(lambda info: jax.tree_map(lambda x: x.mean(), info))(\n\t\t\t{k:ep_stats[k] for k in self.rolling_stats.names}\n\t\t)\n\t\tstats.update(update_stats)\n\n\t\tif self.n_students > 1:\n\t\t\t_stats = {}\n\t\t\tfor i in range(self.n_students):\n\t\t\t\t_student_stats = jax.tree_util.tree_map(lambda x: x[i], stats) # for agent0\n\t\t\t\t_stats.update({f'a{i}/{k}':v for k,v in _student_stats.items()})\n\t\t\tstats = _stats\n\n\t\tif self.track_env_metrics:\n\t\t\tmean_env_metrics = jax.vmap(lambda info: jax.tree_map(lambda x: x.mean(), info))(env_metrics)\n\t\t\tmean_env_metrics = {f'env/{k}':v for k,v in mean_env_metrics.items()}\n\n\t\t\tif self.n_students > 1:\n\t\t\t\t_env_metrics = {}\n\t\t\t\tfor i in range(self.n_students):\n\t\t\t\t\t_student_env_metrics = jax.tree_util.tree_map(lambda x: x[i], mean_env_metrics) # for agent0\n\t\t\t\t\t_env_metrics.update({f'{k}_a{i}':v for k,v in _student_env_metrics.items()})\n\t\t\t\tmean_env_metrics = _env_metrics\n\n\t\t\tstats.update(mean_env_metrics)\n\n\t\tif self.n_students == 1:\n\t\t\tstats = jax.tree_map(lambda x: x[0], stats)\n\n\t\tif self.n_devices > 1:\n\t\t\tstats = jax.tree_map(lambda x: jax.lax.pmean(x, 'device'), stats)\n\n\t\treturn stats\n\n\tdef get_shmap_spec(self):\n\t\trunner_state_size = len(inspect.signature(self.run).parameters)\n\t\tin_spec = [P(None,'device'),]*(runner_state_size)\n\t\tout_spec = [P(None,'device'),]*(runner_state_size)\n\n\t\tin_spec[:2] = [P(None),]*2\n\t\tin_spec = tuple(in_spec)\n\t\tout_spec = (P(None),) + in_spec\n\n\t\treturn in_spec, out_spec\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef run(\n\t\tself, \n\t\trng, \n\t\ttrain_state, \n\t\tstate, \n\t\tstart_state,\n\t\tobs, \n\t\tcarry=None, \n\t\textra=None, \n\t\tep_stats=None):\n\t\t\"\"\"\n\t\tPerform one update step: rollout all students and teachers + update with PPO\n\t\t\"\"\"\n\t\tif self.n_devices > 1:\n\t\t\trng = jax.random.fold_in(rng, jax.lax.axis_index('device'))\n\n\t\trng, *vrngs = jax.random.split(rng, self.n_students+1)\n\t\trollout_batch_shape = (self.n_students, self.n_parallel*self.n_eval)\n\n\t\tobs, state, extra = self.benv.reset(jnp.array(vrngs))\n\t\tep_stats = self.rolling_stats.reset_stats(\n\t\t\tbatch_shape=rollout_batch_shape)\n\n\t\trollout_start_state = state\n\n\t\tdone = jnp.zeros(rollout_batch_shape, dtype=jnp.bool_)\n\t\trng, subrng = jax.random.split(rng)\n\t\trollout, state, start_state, obs, carry, extra, ep_stats, train_state = \\\n\t\t\tself._rollout_students(\n\t\t\t\tsubrng, \n\t\t\t\ttrain_state, \n\t\t\t\tstate, \n\t\t\t\tstart_state,\n\t\t\t\tobs, \n\t\t\t\tcarry, \n\t\t\t\tdone,\n\t\t\t\textra, \n\t\t\t\tep_stats\n\t\t\t)\n\n\t\ttrain_batch = self.student_rollout.get_batch(\n\t\t\trollout, \n\t\t\tself.student_pop.get_value(\n\t\t\t\tjax.lax.stop_gradient(train_state.params), \n\t\t\t\tobs, \n\t\t\t\tcarry,\n\t\t\t)\n\t\t)\n\n\t\t# PPOAgent vmaps over the train state and batch. Batch must be N x EM\n\t\trng, subrng = jax.random.split(rng)\n\t\ttrain_state, update_stats = self.student_pop.update(subrng, train_state, train_batch)\n\n\t\t# Collect env metrics\n\t\tif self.track_env_metrics:\n\t\t\tenv_metrics = self.benv.get_env_metrics(rollout_start_state)\n\t\telse:\n\t\t\tenv_metrics = None\n\n\t\tstats = self._compile_stats(update_stats, ep_stats, env_metrics)\n\t\tstats.update(dict(n_updates=train_state.n_updates[0]))\n\n\t\ttrain_state = train_state.increment()\n\t\tself.n_updates += 1\n\n\t\treturn (\n\t\t\tstats, \n\t\t\trng, \n\t\t\ttrain_state, \n\t\t\tstate, \n\t\t\tstart_state, \n\t\t\tobs, \n\t\t\tcarry, \n\t\t\textra, \n\t\t\tep_stats\n\t\t)" }, { "identifier": "PAIREDRunner", "path": "src/minimax/runners/paired_runner.py", "snippet": "class PAIREDRunner:\n\t\"\"\"\n\tOrchestrates rollouts across one or more students and teachers. \n\tThe main components at play:\n\t- AgentPop: Manages train state and batched inference logic \n\t\tfor a population of agents.\n\t- BatchUEDEnv: Manages environment step and reset logic for a \n\t\tpopulation of agents batched over a pair of student and \n\t\tteacher MDPs.\n\t- RolloutStorage: Manages the storing and sampling of collected txns.\n\t- PPO: Handles PPO updates, which take a train state + batch of txns.\n\t\"\"\"\n\tdef __init__(\n\t\tself, \n\t\tenv_name,\n\t\tenv_kwargs,\n\t\tued_env_kwargs,\n\t\tstudent_agents,\n\t\tn_students=2,\n\t\tn_parallel=1,\n\t\tn_eval=1,\n\t\tn_rollout_steps=250,\n\t\tlr=1e-4,\n\t\tlr_final=None,\n\t\tlr_anneal_steps=0,\n\t\tmax_grad_norm=0.5,\n\t\tdiscount=0.99,\n\t\tgae_lambda=0.95,\n\t\tadam_eps=1e-5,\n\t\tteacher_lr=None,\n\t\tteacher_lr_final=None,\n\t\tteacher_lr_anneal_steps=None,\n\t\tteacher_discount=0.99,\n\t\tteacher_gae_lambda=0.95,\n\t\tteacher_agents=None,\n\t\tued_score='relative_regret',\n\t\ttrack_env_metrics=False,\n\t\tn_unroll_rollout=1,\n\t\trender=False,\n\t\tn_devices=1):\n\t\tassert n_parallel % n_devices == 0, 'Num envs must be divisible by num devices.'\n\n\t\tued_score = UEDScore[ued_score.upper()]\n\n\t\tassert len(student_agents) == 1, \\\n\t\t\t'Only one type of student supported.'\n\t\tassert not (n_students > 2 and ued_score in [UEDScore.RELATIVE_REGRET, UEDScore.MEAN_RELATIVE_REGRET]), \\\n\t\t\t'Standard PAIRED uses only 2 students.'\n\t\tassert teacher_agents is None or len(teacher_agents) == 1, \\\n\t\t\t'Only one type of teacher supported.'\n\n\t\tself.n_students = n_students\n\t\tself.n_parallel = n_parallel // n_devices\n\t\tself.n_eval = n_eval\n\t\tself.n_devices = n_devices\n\t\tself.step_batch_size = n_students*n_eval*n_parallel\n\t\tself.n_rollout_steps = n_rollout_steps\n\t\tself.n_updates = 0\n\t\tself.lr = lr\n\t\tself.lr_final = lr if lr_final is None else lr_final\n\t\tself.lr_anneal_steps = lr_anneal_steps\n\t\tself.teacher_lr = \\\n\t\t\tlr if teacher_lr is None else lr\n\t\tself.teacher_lr_final = \\\n\t\t\tself.lr_final if teacher_lr_final is None else teacher_lr_final\n\t\tself.teacher_lr_anneal_steps = \\\n\t\t\tlr_anneal_steps if teacher_lr_anneal_steps is None else teacher_lr_anneal_steps\n\t\tself.max_grad_norm = max_grad_norm\n\t\tself.adam_eps = adam_eps\n\t\tself.ued_score = ued_score\n\t\tself.track_env_metrics = track_env_metrics\n\n\t\tself.n_unroll_rollout = n_unroll_rollout\n\t\tself.render = render\n\n\t\tself.student_pop = AgentPop(student_agents[0], n_agents=n_students)\n\n\t\tif teacher_agents is not None:\n\t\t\tself.teacher_pop = AgentPop(teacher_agents[0], n_agents=1)\n\n\t\t# This ensures correct partial-episodic bootstrapping by avoiding\n\t\t# any termination purely due to timeouts.\n\t\t# env_kwargs.max_episode_steps = self.n_rollout_steps + 1\n\t\tself.benv = envs.BatchUEDEnv(\n\t\t\tenv_name=env_name,\n\t\t\tn_parallel=self.n_parallel,\n\t\t\tn_eval=n_eval,\n\t\t\tenv_kwargs=env_kwargs,\n\t\t\tued_env_kwargs=ued_env_kwargs,\n\t\t\twrappers=['monitor_return', 'monitor_ep_metrics'],\n\t\t\tued_wrappers=[]\n\t\t)\n\t\tself.teacher_n_rollout_steps = \\\n\t\t\tself.benv.env.ued_max_episode_steps()\n\n\t\tself.student_rollout = RolloutStorage(\n\t\t\tdiscount=discount,\n\t\t\tgae_lambda=gae_lambda,\n\t\t\tn_steps=n_rollout_steps,\n\t\t\tn_agents=n_students,\n\t\t\tn_envs=self.n_parallel,\n\t\t\tn_eval=self.n_eval,\n\t\t\taction_space=self.benv.env.action_space(),\n\t\t\tobs_space=self.benv.env.observation_space(),\n\t\t\tagent=self.student_pop.agent\n\t\t)\n\n\t\tself.teacher_rollout = RolloutStorage(\n\t\t\tdiscount=teacher_discount,\n\t\t\tgae_lambda=teacher_gae_lambda,\n\t\t\tn_steps=self.teacher_n_rollout_steps,\n\t\t\tn_agents=1,\n\t\t\tn_envs=self.n_parallel,\n\t\t\tn_eval=1,\n\t\t\taction_space=self.benv.env.ued_action_space(),\n\t\t\tobs_space=self.benv.env.ued_observation_space(),\n\t\t\tagent=self.teacher_pop.agent,\n\t\t)\n\n\t\tued_monitored_metrics = ('return',)\n\t\tself.ued_rolling_stats = RollingStats(\t\n\t\t\tnames=ued_monitored_metrics,\n\t\t\twindow=10,\n\t\t)\n\t\t\n\t\tmonitored_metrics = self.benv.env.get_monitored_metrics()\n\t\tself.rolling_stats = RollingStats(\n\t\t\tnames=monitored_metrics,\n\t\t\twindow=10,\n\t\t)\n\n\t\tself._update_ep_stats = jax.vmap(jax.vmap(self.rolling_stats.update_stats))\n\t\tself._update_ued_ep_stats = jax.vmap(jax.vmap(self.ued_rolling_stats.update_stats))\n\n\t\tif self.render:\n\t\t\tfrom envs.viz.grid_viz import GridVisualizer\n\t\t\tself.viz = GridVisualizer()\n\t\t\tself.viz.show()\n\n\tdef reset(self, rng):\n\t\tself.n_updates = 0\n\n\t\tn_parallel = self.n_parallel*self.n_devices\n\n\t\trng, student_rng, teacher_rng = jax.random.split(rng,3)\n\t\tstudent_info = self._reset_pop(\n\t\t\t\tstudent_rng, \n\t\t\t\tself.student_pop, \n\t\t\t\tpartial(self.benv.reset, sub_batch_size=n_parallel*self.n_eval),\n\t\t\t\tn_parallel_ep=n_parallel*self.n_eval,\n\t\t\t\tlr_init=self.lr,\n\t\t\t\tlr_final=self.lr_final,\n\t\t\t\tlr_anneal_steps=self.lr_anneal_steps)\n\n\t\tteacher_info = self._reset_pop(\n\t\t\tteacher_rng, \n\t\t\tself.teacher_pop, \n\t\t\tpartial(self.benv.reset_teacher, n_parallel=n_parallel),\n\t\t\tn_parallel_ep=n_parallel,\n\t\t\tlr_init=self.teacher_lr,\n\t\t\tlr_final=self.teacher_lr_final,\n\t\t\tlr_anneal_steps=self.teacher_lr_anneal_steps)\n\n\t\treturn (\n\t\t\trng,\n\t\t\t*student_info,\n\t\t\t*teacher_info\n\t\t)\n\n\tdef _reset_pop(\n\t\tself, \n\t\trng, \n\t\tpop, \n\t\tenv_reset_fn, \n\t\tn_parallel_ep=1,\n\t\tlr_init=3e-4,\n\t\tlr_final=3e-4,\n\t\tlr_anneal_steps=0):\n\t\trng, *vrngs = jax.random.split(rng, pop.n_agents+1)\n\t\treset_out = env_reset_fn(jnp.array(vrngs))\n\t\tif len(reset_out) == 2:\n\t\t\tobs, state = reset_out\n\t\telse:\n\t\t\tobs, state, extra = reset_out\n\t\tdummy_obs = jax.tree_util.tree_map(lambda x: x[0], obs) # for one agent only\n\n\t\trng, subrng = jax.random.split(rng)\n\t\tif pop.agent.is_recurrent:\n\t\t\tcarry = pop.init_carry(subrng, obs)\n\t\telse:\n\t\t\tcarry = None\n\n\t\trng, subrng = jax.random.split(rng)\n\t\tparams = pop.init_params(subrng, dummy_obs)\n\n\t\tschedule_fn = optax.linear_schedule(\n\t\t\tinit_value=-float(lr_init),\n\t\t\tend_value=-float(lr_final),\n\t\t\ttransition_steps=lr_anneal_steps,\n\t\t)\n\n\t\ttx = optax.chain(\n\t\t\toptax.clip_by_global_norm(self.max_grad_norm),\n\t\t\toptax.scale_by_adam(eps=self.adam_eps),\n\t\t\toptax.scale_by_schedule(schedule_fn),\n\t\t)\n\n\t\ttrain_state = VmapTrainState.create(\n\t\t\tapply_fn=pop.agent.evaluate,\n\t\t\tparams=params,\n\t\t\ttx=tx\n\t\t)\n\t\t\n\t\tep_stats = self.rolling_stats.reset_stats(\n\t\t\tbatch_shape=(pop.n_agents,n_parallel_ep))\n\n\t\treturn train_state, state, obs, carry, ep_stats\n\n\tdef get_checkpoint_state(self, state):\n\t\t_state = list(state)\n\t\t_state[1] = state[1].state_dict\n\t\t_state[6] = state[6].state_dict\n\n\t\treturn _state\n\n\tdef load_checkpoint_state(self, runner_state, state):\n\t\trunner_state = list(runner_state)\n\t\trunner_state[1] = runner_state[1].load_state_dict(state[1])\n\t\trunner_state[6] = runner_state[6].load_state_dict(state[6])\n\n\t\treturn tuple(runner_state)\n\n\t@partial(jax.jit, static_argnums=(0,2,3))\n\tdef _get_transition(\n\t\tself,\n\t\trng, \n\t\tpop, \n\t\trollout_mgr, \n\t\trollout, \n\t\tparams, \n\t\tstate, \n\t\tobs, \n\t\tcarry, \n\t\tdone,\n\t\treset_state=None,\n\t\textra=None):\n\t\t# Sample action\n\t\tvalue, pi_params, next_carry = pop.act(params, obs, carry, done)\n\t\tpi = pop.get_action_dist(pi_params)\n\t\trng, subrng = jax.random.split(rng)\n\t\taction = pi.sample(seed=subrng)\n\t\tlog_pi = pi.log_prob(action)\n\n\t\trng, *vrngs = jax.random.split(rng, pop.n_agents+1)\n\n\t\tif pop is self.student_pop:\n\t\t\tstep_fn = self.benv.step_student\n\t\telse:\n\t\t\tstep_fn = self.benv.step_teacher\n\t\tstep_args = (jnp.array(vrngs), state, action)\n\n\t\tif reset_state is not None: # Needed for student to reset to same instance\n\t\t\tstep_args += (reset_state,)\n\n\t\tif extra is not None:\n\t\t\tstep_args += (extra,)\n\t\t\tnext_obs, next_state, reward, done, info, extra = step_fn(*step_args)\n\t\telse:\n\t\t\tnext_obs, next_state, reward, done, info = step_fn(*step_args)\n\n\t\t# Add transition to storage\n\t\tstep = (obs, action, reward, done, log_pi, value)\n\t\tif carry is not None:\n\t\t\tstep += (carry,)\n\n\t\trollout = rollout_mgr.append(rollout, *step)\n\n\t\tif self.render and pop is self.student_pop:\n\t\t\tself.viz.render(\n\t\t\t\tself.benv.env.env.params, \n\t\t\t\tjax.tree_util.tree_map(lambda x: x[0][0], state))\n\n\t\treturn rollout, next_state, next_obs, next_carry, done, info, extra\n\n\t@partial(jax.jit, static_argnums=(0,2,3,4))\n\tdef _rollout(\n\t\tself, \n\t\trng, \n\t\tpop, \n\t\trollout_mgr,\n\t\tn_steps,\n\t\tparams, \n\t\tstate, \n\t\tobs, \n\t\tcarry, \n\t\tdone,\n\t\treset_state=None, \n\t\textra=None, \n\t\tep_stats=None):\n\t\trngs = jax.random.split(rng, n_steps)\n\n\t\trollout = rollout_mgr.reset()\n\n\t\tdef _scan_rollout(scan_carry, rng):\n\t\t\t(rollout, \n\t\t\t state, \n\t\t\t obs, \n\t\t\t carry,\n\t\t\t done, \n\t\t\t extra, \n\t\t\t ep_stats) = scan_carry\n\t\t\t\n\t\t\tnext_scan_carry = \\\n\t\t\t\tself._get_transition(\n\t\t\t\t\trng,\n\t\t\t\t\tpop, \n\t\t\t\t\trollout_mgr,\n\t\t\t\t\trollout,\n\t\t\t\t\tparams, \n\t\t\t\t\tstate, \n\t\t\t\t\tobs, \n\t\t\t\t\tcarry, \n\t\t\t\t\tdone,\n\t\t\t\t\treset_state, \n\t\t\t\t\textra)\n\n\t\t\t(rollout, \n\t\t\t next_state, \n\t\t\t next_obs, \n\t\t\t next_carry, \n\t\t\t done, \n\t\t\t info, \n\t\t\t extra) = next_scan_carry\n\n\t\t\tif ep_stats is not None:\n\t\t\t\t_ep_stats_update_fn = self._update_ep_stats \\\n\t\t\t\t\tif pop is self.student_pop else self._update_ued_ep_stats\n\n\t\t\t\tep_stats = _ep_stats_update_fn(ep_stats, done, info)\n\n\t\t\treturn (rollout, next_state, next_obs, next_carry, done, extra, ep_stats), None\n\n\t\t(rollout, state, obs, carry, done, extra, ep_stats), _ = jax.lax.scan(\n\t\t\t_scan_rollout,\n\t\t\t(rollout, state, obs, carry, done, extra, ep_stats),\n\t\t\trngs,\n\t\t\tlength=n_steps,\n\t\t\tunroll=self.n_unroll_rollout\n\t\t)\n\n\t\treturn rollout, state, obs, carry, extra, ep_stats\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef _compile_stats(self, \n\t\tupdate_stats, ep_stats, \n\t\tued_update_stats, ued_ep_stats,\n\t\tenv_metrics=None,\n\t\tgrad_stats=None, ued_grad_stats=None):\n\t\tmean_returns_by_student = jax.vmap(lambda x: x.mean())(ep_stats['return'])\n\t\tmean_returns_by_teacher = jax.vmap(lambda x: x.mean())(ued_ep_stats['return'])\n\n\t\tmean_ep_stats = jax.vmap(lambda info: jax.tree_map(lambda x: x.mean(), info))(\n\t\t\t{k:ep_stats[k] for k in self.rolling_stats.names}\n\t\t)\n\t\tued_mean_ep_stats = jax.vmap(lambda info: jax.tree_map(lambda x: x.mean(), info))(\n\t\t\t{k:ued_ep_stats[k] for k in self.ued_rolling_stats.names}\n\t\t)\n\n\t\tstudent_stats = {\n\t\t\tf'mean_{k}':v for k,v in mean_ep_stats.items()\n\t\t}\n\t\tstudent_stats.update(update_stats)\n\n\t\tstats = {}\n\t\tfor i in range(self.n_students):\n\t\t\t_student_stats = jax.tree_util.tree_map(lambda x: x[i], student_stats) # for agent0\n\t\t\tstats.update({f'{k}_a{i}':v for k,v in _student_stats.items()})\n\n\t\tteacher_stats = {\n\t\t\tf'mean_{k}_tch':v for k,v in ued_mean_ep_stats.items()\n\t\t}\n\t\tteacher_stats.update({\n\t\t\tf'{k}_tch':v[0] for k,v in ued_update_stats.items()\n\t\t})\n\t\tstats.update(teacher_stats)\n\n\t\tif self.track_env_metrics:\n\t\t\tpassable_mask = env_metrics.pop('passable')\n\t\t\tmean_env_metrics = jax.tree_util.tree_map(\n\t\t\t\tlambda x: (x*passable_mask).sum()/passable_mask.sum(), \n\t\t\t\tenv_metrics\n\t\t\t)\n\t\t\tmean_env_metrics.update({'passable_ratio': passable_mask.mean()})\n\t\t\tstats.update({\n\t\t\t\tf'env/{k}':v for k,v in mean_env_metrics.items()\n\t\t\t})\n\n\t\tif self.n_devices > 1:\n\t\t\tstats = jax.tree_map(lambda x: jax.lax.pmean(x, 'device'), stats)\n\n\t\treturn stats\n\n\tdef get_shmap_spec(self):\n\t\trunner_state_size = len(inspect.signature(self.run).parameters)\n\t\tin_spec = [P(None,'device'),]*(runner_state_size)\n\t\tout_spec = [P(None,'device'),]*(runner_state_size)\n\n\t\tin_spec[:2] = [P(None),]*2\n\t\tin_spec[6] = P(None)\n\t\tin_spec = tuple(in_spec)\n\t\tout_spec = (P(None),) + in_spec\n\n\t\treturn in_spec, out_spec\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef run(\n\t\tself, \n\t\trng, \n\t\ttrain_state, \n\t\tstate,\n\t\tobs,\n\t\tcarry,\n\t\tep_stats,\n\t\tued_train_state,\n\t\tued_state,\n\t\tued_obs,\n\t\tued_carry,\n\t\tued_ep_stats):\n\t\t\"\"\"\n\t\tPerform one update step: rollout teacher + students\n\t\t\"\"\"\n\t\tif self.n_devices > 1:\n\t\t\trng = jax.random.fold_in(rng, jax.lax.axis_index('device'))\n\n\t\t# === Reset teacher env + rollout teacher\n\t\trng, *vrngs = jax.random.split(rng, self.teacher_pop.n_agents+1)\n\t\tued_reset_out = self.benv.reset_teacher(jnp.array(vrngs))\n\t\tif len(ued_reset_out) > 2:\n\t\t\tued_obs, ued_state, ued_extra = ued_reset_out\n\t\telse:\n\t\t\tued_obs, ued_state = ued_reset_out\n\t\t\tued_extra = None\n\n\t\t# Reset UED ep_stats\n\t\tif self.ued_rolling_stats is not None:\n\t\t\tued_ep_stats = self.ued_rolling_stats.reset_stats(\n\t\t\t\tbatch_shape=(1,self.n_parallel))\n\t\telse:\n\t\t\tued_ep_stats = None\n\n\t\ttch_rollout_batch_shape = (1,self.n_parallel*self.n_eval)\n\t\tdone = jnp.zeros(tch_rollout_batch_shape, dtype=jnp.bool_)\n\t\trng, subrng = jax.random.split(rng)\n\t\tued_rollout, ued_state, ued_obs, ued_carry, _, ued_ep_stats = \\\n\t\t\tself._rollout(\n\t\t\t\tsubrng,\n\t\t\t\tself.teacher_pop,\n\t\t\t\tself.teacher_rollout,\n\t\t\t\tself.teacher_n_rollout_steps,\n\t\t\t\tjax.lax.stop_gradient(ued_train_state.params), \n\t\t\t\tued_state, \n\t\t\t\tued_obs, \n\t\t\t\tued_carry,\n\t\t\t\tdone, \n\t\t\t\textra=ued_extra, \n\t\t\t\tep_stats=ued_ep_stats\n\t\t\t)\n\n\t\t# === Reset student to new envs + rollout students\n\t\trng, *vrngs = jax.random.split(rng, self.teacher_pop.n_agents+1)\n\t\tobs, state, extra = jax.tree_util.tree_map(\n\t\t\tlambda x:x.squeeze(0), self.benv.reset_student(\n\t\t\t\tjnp.array(vrngs),\n\t\t\t\tued_state, \n\t\t\t\tself.student_pop.n_agents))\n\t\treset_state = state\n\n\t\t# Reset student ep_stats\n\t\tst_rollout_batch_shape = (self.n_students,self.n_parallel*self.n_eval)\n\t\tep_stats = self.rolling_stats.reset_stats(\n\t\t\tbatch_shape=st_rollout_batch_shape)\n\n\t\tdone = jnp.zeros(st_rollout_batch_shape, dtype=jnp.bool_)\n\t\trng, subrng = jax.random.split(rng)\n\t\trollout, state, obs, carry, extra, ep_stats = \\\n\t\t\tself._rollout(\n\t\t\t\tsubrng, \n\t\t\t\tself.student_pop,\n\t\t\t\tself.student_rollout,\n\t\t\t\tself.n_rollout_steps,\n\t\t\t\tjax.lax.stop_gradient(train_state.params),\n\t\t\t\tstate, \n\t\t\t\tobs, \n\t\t\t\tcarry, \n\t\t\t\tdone,\n\t\t\t\treset_state=reset_state, \n\t\t\t\textra=extra, \n\t\t\t\tep_stats=ep_stats)\n\n\t\t# === Update student with PPO\n\t\t# PPOAgent vmaps over the train state and batch. Batch must be N x EM\n\t\tstudent_rollout_last_value = self.student_pop.get_value(\n\t\t\tjax.lax.stop_gradient(train_state.params), obs, carry\n\t\t)\n\t\ttrain_batch = self.student_rollout.get_batch(\n\t\t\trollout, \n\t\t\tstudent_rollout_last_value\n\t\t)\n\n\t\trng, subrng = jax.random.split(rng)\n\t\ttrain_state, update_stats = self.student_pop.update(subrng, train_state, train_batch)\n\n\t\t# === Update teacher with PPO\n\t\t# - Compute returns per env per agent\n\t\t# - Compute batched returns based on returns per env per agent\n\t\tued_score, _ = compute_ued_scores(self.ued_score, train_batch, self.n_eval)\n\t\tued_rollout = self.teacher_rollout.set_final_reward(ued_rollout, ued_score)\n\t\tued_train_batch = self.teacher_rollout.get_batch(\n\t\t\tued_rollout, \n\t\t\tjnp.zeros((1, self.n_parallel)) # Last step terminates episode\n\t\t)\n\n\t\tued_ep_stats = self._update_ued_ep_stats(\n\t\t\tued_ep_stats, \n\t\t\tjnp.ones((1,len(ued_score),1), dtype=jnp.bool_),\n\t\t\t{'return': jnp.expand_dims(ued_score, (0,-1))}\n\t\t)\n\n\t\t# Update teacher, batch must be 1 x Ex1\n\t\trng, subrng = jax.random.split(rng)\n\t\tued_train_state, ued_update_stats = self.teacher_pop.update(subrng, ued_train_state, ued_train_batch)\n\n\t\t# --------------------------------------------------\n\t\t# Collect metrics\n\t\tif self.track_env_metrics:\n\t\t\tenv_metrics = self.benv.get_env_metrics(reset_state)\n\t\telse:\n\t\t\tenv_metrics = None\n\n\t\tgrad_stats, ued_grad_stats = None, None\n\n\t\tstats = self._compile_stats(\n\t\t\tupdate_stats, ep_stats, \n\t\t\tued_update_stats, ued_ep_stats,\n\t\t\tenv_metrics,\n\t\t\tgrad_stats, ued_grad_stats)\n\t\tstats.update(dict(n_updates=train_state.n_updates[0]))\n\n\t\ttrain_state = train_state.increment()\n\t\tued_train_state = ued_train_state.increment()\n\t\tself.n_updates += 1\n\n\t\treturn (\n\t\t\tstats, \n\t\t\trng,\n\t\t\ttrain_state, state, obs, carry, ep_stats,\n\t\t\tued_train_state, ued_state, ued_obs, ued_carry, ued_ep_stats\n\t\t)" }, { "identifier": "PLRRunner", "path": "src/minimax/runners/plr_runner.py", "snippet": "class PLRRunner(DRRunner):\n\tdef __init__(\n\t\tself, \n\t\t*,\n\t\treplay_prob=0.5,\n\t\tbuffer_size=100,\n\t\tstaleness_coef=0.3,\n\t\tuse_score_ranks=True,\n\t\ttemp=1.0,\n\t\tmin_fill_ratio=0.5,\n\t\tuse_robust_plr=False,\n\t\tuse_parallel_eval=False,\n\t\tued_score='l1_value_loss',\n\t\tforce_unique=False, # Slower if True\n\t\tmutation_fn=None,\n\t\tn_mutations=0,\n\t\tmutation_criterion='batch',\n\t\tmutation_subsample_size=1,\n\t\t**kwargs):\n\t\tuse_mutations = mutation_fn is not None\n\t\tif use_parallel_eval:\n\t\t\treplay_prob = 1.0 # Replay every rollout cycle\n\t\t\tmutation_criterion = 'batch' # Force batch mutations (no UED scores)\n\t\t\tself._n_parallel_batches = 3 if use_mutations else 2\n\t\t\tkwargs['n_parallel'] *= self._n_parallel_batches\n\n\t\tsuper().__init__(**kwargs)\n\n\t\tself.replay_prob = replay_prob\n\t\tself.buffer_size = buffer_size\n\t\tself.staleness_coef = staleness_coef\n\t\tself.temp = temp\n\t\tself.use_score_ranks = use_score_ranks\n\t\tself.min_fill_ratio = min_fill_ratio\n\t\tself.use_robust_plr = use_robust_plr\n\t\tself.use_parallel_eval = use_parallel_eval\n\t\tself.ued_score = UEDScore[ued_score.upper()]\n\n\t\tself.use_mutations = use_mutations\n\t\tif self.use_mutations:\n\t\t\tself.mutation_fn = envs.get_mutator(self.benv.env_name, mutation_fn)\n\t\telse:\n\t\t\tself.mutation_fn = None\n\t\tself.n_mutations = n_mutations\n\t\tself.mutation_criterion = MutationCriterion[mutation_criterion.upper()]\n\t\tself.mutation_subsample_size = mutation_subsample_size\n\n\t\tself.force_unique = force_unique\n\t\tif force_unique:\n\t\t\tself.comparator_fn = envs.get_comparator(self.benv.env_name)\n\t\telse:\n\t\t\tself.comparator_fn = None\n\n\t\tif mutation_fn is not None and mutation_criterion != 'batch':\n\t\t\tassert self.n_parallel % self.mutation_subsample_size == 0, \\\n\t\t\t\t'Number of parallel envs must be divisible by mutation subsample size.'\n\n\tdef reset(self, rng):\n\t\trunner_state = list(super().reset(rng))\n\t\trng = runner_state[0]\n\t\trunner_state[0], subrng = jax.random.split(rng)\n\t\texample_state = self.benv.env.reset(rng)[1]\n\n\t\tself.plr_mgr = PopPLRManager(\n\t\t\tn_agents=self.n_students,\n\t\t\texample_level=example_state,\n\t\t\tued_score=self.ued_score,\n\t\t\treplay_prob=self.replay_prob,\n\t\t\tbuffer_size=self.buffer_size,\n\t\t\tstaleness_coef=self.staleness_coef,\n\t\t\ttemp=self.temp,\n\t\t\tuse_score_ranks=self.use_score_ranks,\n\t\t\tmin_fill_ratio=self.min_fill_ratio,\n\t\t\tuse_robust_plr=self.use_robust_plr,\n\t\t\tuse_parallel_eval=self.use_parallel_eval,\n\t\t\tcomparator_fn=self.comparator_fn,\n\t\t\tn_devices=self.n_devices\n\t\t)\n\t\tplr_buffer = self.plr_mgr.reset(self.n_students)\n\n\t\ttrain_state = runner_state[1]\n\t\ttrain_state = train_state.replace(plr_buffer=plr_buffer)\n\t\tif self.n_devices == 1:\n\t\t\trunner_state[1] = train_state\n\t\telse:\n\t\t\tplr_buffer = jax.tree_map(lambda x: x.repeat(self.n_devices, 1), plr_buffer) # replicate plr buffer\n\t\t\trunner_state += (plr_buffer,) # Return PLR buffer directly to make shmap easier\n\n\t\tself.dummy_eval_output = self._create_dummy_eval_output(train_state)\n\n\t\treturn tuple(runner_state)\n\n\tdef _create_dummy_eval_output(self, train_state):\n\t\trng, *vrngs = jax.random.split(jax.random.PRNGKey(0), self.n_students+1)\n\t\tobs, state, extra = self.benv.reset(jnp.array(vrngs))\n\n\t\tep_stats = self.rolling_stats.reset_stats(\n\t\t\tbatch_shape=(self.n_students, self.n_parallel*self.n_eval))\n\n\t\tued_scores = jnp.zeros((self.n_students, self.n_parallel))\n\n\t\tif self.student_pop.agent.is_recurrent:\n\t\t\tcarry = self.zero_carry\n\t\telse:\n\t\t\tcarry = None\n\t\trollout = self.student_rollout.reset()\n\n\t\ttrain_batch = self.student_rollout.get_batch(\n\t\t\trollout, \n\t\t\tself.student_pop.get_value(\n\t\t\t\tjax.lax.stop_gradient(train_state.params), \n\t\t\t\tobs, \n\t\t\t\tcarry,\n\t\t\t)\n\t\t)\n\n\t\treturn (\n\t\t\trng,\n\t\t\ttrain_state, \n\t\t\tstate, \n\t\t\tstate,\n\t\t\tobs, \n\t\t\tcarry, \n\t\t\textra, \n\t\t\tep_stats,\n\t\t\tstate,\n\t\t\ttrain_batch,\n\t\t\tued_scores\n\t\t)\n\n\t@partial(jax.jit, static_argnums=(0,8))\n\tdef _eval_and_update_plr(\n\t\t\tself,\n\t\t\trng,\n\t\t\tlevels,\n\t\t\tlevel_idxs, \n\t\t\ttrain_state, \n\t\t\tupdate_plr,\n\t\t\tparent_idxs=None,\n\t\t\tdupe_mask=None,\n\t\t\tfake=False):\n\t\t# Collect rollout and optionally update plr buffer\n\t\t# Returns train_batch and ued_scores\n\t\t# Perform rollout: @todo: pmap this\n\t\tif fake:\n\t\t\tdummy_eval_output = list(self.dummy_eval_output)\n\t\t\tdummy_eval_output[1] = train_state\n\t\t\treturn tuple(dummy_eval_output)\n\n\t\trollout_batch_shape = (self.n_students, self.n_parallel*self.n_eval)\n\t\tobs, state, extra = self.benv.set_state(levels)\n\t\tep_stats = self.rolling_stats.reset_stats(\n\t\t\tbatch_shape=rollout_batch_shape)\n\n\t\trollout_start_state = state\n\n\t\tdone = jnp.zeros(rollout_batch_shape, dtype=jnp.bool_)\n\t\tif self.student_pop.agent.is_recurrent:\n\t\t\tcarry = self.zero_carry\n\t\telse:\n\t\t\tcarry = None\n\n\t\trng, subrng = jax.random.split(rng)\n\t\tstart_state = state\n\t\trollout, state, start_state, obs, carry, extra, ep_stats, train_state = \\\n\t\t\tself._rollout_students(\n\t\t\t\tsubrng, \n\t\t\t\ttrain_state, \n\t\t\t\tstate, \n\t\t\t\tstart_state,\n\t\t\t\tobs, \n\t\t\t\tcarry, \n\t\t\t\tdone,\n\t\t\t\textra, \n\t\t\t\tep_stats\n\t\t\t)\n\n\t\ttrain_batch = self.student_rollout.get_batch(\n\t\t\trollout, \n\t\t\tself.student_pop.get_value(\n\t\t\t\tjax.lax.stop_gradient(train_state.params), \n\t\t\t\tobs, \n\t\t\t\tcarry\n\t\t\t)\n\t\t)\n\n\t\t# Update PLR buffer\n\t\tif self.ued_score == UEDScore.MAX_MC:\n\t\t\tmax_returns = jax.vmap(lambda x,y: x.at[y].get())(train_state.plr_buffer.max_returns, level_idxs)\n\t\t\tmax_returns = jnp.where(\n\t\t\t\tjnp.greater_equal(level_idxs, 0),\n\t\t\t\tmax_returns,\n\t\t\t\tjnp.full_like(max_returns, -jnp.inf)\n\t\t\t)\n\t\t\tued_info = {'max_returns': max_returns}\n\t\telse:\n\t\t\tued_info = None\n\t\tued_scores, ued_score_info = compute_ued_scores(\n\t\t\tself.ued_score, train_batch, self.n_eval, info=ued_info, ignore_val=-jnp.inf, per_agent=True)\n\t\tnext_plr_buffer = self.plr_mgr.update(\n\t\t\ttrain_state.plr_buffer, \n\t\t\tlevels=levels, \n\t\t\tlevel_idxs=level_idxs, \n\t\t\tued_scores=ued_scores,\n\t\t\tdupe_mask=dupe_mask, \n\t\t\tinfo=ued_score_info, \n\t\t\tignore_val=-jnp.inf,\n\t\t\tparent_idxs=parent_idxs)\n\n\t\tnext_plr_buffer = jax.vmap(\n\t\t\tlambda update, new, prev: jax.tree_map(\n\t\t\t\tlambda x, y: jax.lax.select(update, x, y), new, prev)\n\t\t)(update_plr, next_plr_buffer, train_state.plr_buffer)\n\n\t\ttrain_state = train_state.replace(plr_buffer=next_plr_buffer)\n\n\t\treturn (\n\t\t\trng,\n\t\t\ttrain_state, \n\t\t\tstate, \n\t\t\tstart_state, \n\t\t\tobs, \n\t\t\tcarry, \n\t\t\textra, \n\t\t\tep_stats,\n\t\t\trollout_start_state,\n\t\t\ttrain_batch,\n\t\t\tued_scores,\n\t\t)\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef _mutate_levels(self, rng, levels, level_idxs, ued_scores=None):\n\t\tif not self.use_mutations:\n\t\t\treturn levels, level_idxs, jnp.full_like(level_idxs, -1)\n\n\t\tdef upsample_levels(levels, level_idxs, subsample_idxs):\n\t\t\tsubsample_idxs = subsample_idxs.repeat(self.n_parallel//self.mutation_subsample_size, -1)\n\t\t\tparent_idxs = level_idxs.take(subsample_idxs)\n\t\t\tlevels = jax.vmap(\n\t\t\t\tlambda x, y: jax.tree_map(lambda _x: jnp.array(_x).take(y, 0), x)\n\t\t\t)(levels, parent_idxs)\n\t\t\t\n\t\t\treturn levels, parent_idxs\n\n\t\tif self.mutation_criterion == MutationCriterion.BATCH:\n\t\t\tparent_idxs = level_idxs\n\n\t\tif self.mutation_criterion == MutationCriterion.EASY:\n\t\t\t_, top_level_idxs = jax.lax.approx_min_k(ued_scores, self.mutation_subsample_size)\n\t\t\tlevels, parent_idxs = upsample_levels(levels, level_idxs, top_level_idxs)\n\n\t\telif self.mutation_criterion == MutationCriterion.HARD:\n\t\t\t_, top_level_idxs = jax.lax.approx_max_k(ued_scores, self.mutation_subsample_size)\n\t\t\tlevels, parent_idxs = upsample_levels(levels, level_idxs, top_level_idxs)\n\n\t\tn_parallel = level_idxs.shape[-1]\n\t\tvrngs = jax.vmap(lambda subrng: jax.random.split(subrng, n_parallel))(\n\t\t\tjax.random.split(rng, self.n_students)\n\t\t)\n\n\t\tmutated_levels = jax.vmap(\n\t\t\tlambda *args: jax.vmap(self.mutation_fn, in_axes=(0,None,0,None))(*args),\n\t\t\tin_axes=(0,None,0,None)\n\t\t)(vrngs, self.benv.env_params, levels, self.n_mutations)\n\n\t\t# Mutated levels do not have existing idxs in the PLR buffer.\n\t\tmutated_level_idxs = jnp.full((self.n_students, n_parallel), -1)\n\n\t\treturn mutated_levels, mutated_level_idxs, parent_idxs\n\n\tdef _efficient_grad_update(self, rng, train_state, train_batch, is_replay):\n\t\t# PPOAgent vmaps over the train state and batch. Batch must be N x EM\n\t\tskip_grad_update = jnp.logical_and(self.use_robust_plr, ~is_replay)\n\n\t\tif self.n_students == 1:\n\t\t\ttrain_state, stats = jax.lax.cond(\n\t\t\t\tskip_grad_update[0],\n\t\t\t\tpartial(self.student_pop.update, fake=True),\n\t\t\t\tself.student_pop.update,\n\t\t\t\t*(rng, train_state, train_batch)\n\t\t\t)\n\t\telif self.n_students > 1: # Have to vmap all students + take only students that need updates\n\t\t\t_, dummy_stats = jax.vmap(lambda *_: self.student_pop.agent.get_empty_update_stats())(np.arange(self.n_students))\n\t\t\t_train_state, stats = self.student.update(rng, train_state, train_batch)\n\t\t\ttrain_state, stats = jax.vmap(lambda cond,x,y: \\\n\t\t\t\t\tjax.tree_map(lambda _cond,_x,_y: jax.lax.select(_cond,_x,_y), cond, x, y))(\n\t\t\t\t\t\tis_replay, (train_state, stats), (_train_state, dummy_stats)\n\t\t\t\t\t)\n\n\t\treturn train_state, stats\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef _compile_stats(self, update_stats, ep_stats, env_metrics=None, plr_stats=None):\n\t\tstats = super()._compile_stats(update_stats, ep_stats, env_metrics)\n\n\t\tif plr_stats is not None:\n\t\t\tplr_stats = jax.vmap(lambda info: jax.tree_map(lambda x: x.mean(), info))(plr_stats)\n\n\t\tif self.n_students > 1:\n\t\t\t_plr_stats = {}\n\t\t\tfor i in range(self.n_students):\n\t\t\t\t_student_plr_stats = jax.tree_util.tree_map(lambda x: x[i], plr_stats) # for agent0\n\t\t\t\t_plr_stats.update({f'{k}_a{i}':v for k,v in _student_plr_stats.items()})\n\t\t\tplr_stats = _plr_stats\n\t\telse:\n\t\t\tplr_stats = jax.tree_map(lambda x: x[0], plr_stats) \n\n\t\tstats.update({f'plr_{k}':v for k,v in plr_stats.items()})\n\n\t\tif self.n_devices > 1:\n\t\t\tstats = jax.tree_map(lambda x: jax.lax.pmean(x, 'device'), stats)\n\n\t\treturn stats\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef run(\n\t\tself, \n\t\trng, \n\t\ttrain_state, \n\t\tstate, \n\t\tstart_state,\n\t\tobs, \n\t\tcarry=None, \n\t\textra=None, \n\t\tep_stats=None,\n\t\tplr_buffer=None):\n\t\t# If device sharded, load sharded PLR buffer into train state\n\t\tif self.n_devices > 1:\n\t\t\trng = jax.random.fold_in(rng, jax.lax.axis_index('device'))\n\t\t\ttrain_state = train_state.replace(plr_buffer=plr_buffer)\n\n\t\t# Sample next training levels via PLR\n\t\trng, *vrngs = jax.random.split(rng, self.n_students+1)\n\t\tobs, state, extra = self.benv.reset(jnp.array(vrngs), self.n_parallel, 1)\n\n\t\tif self.use_parallel_eval:\n\t\t\tn_level_samples = self.n_parallel//self._n_parallel_batches\n\t\t\tnew_levels = jax.tree_map(lambda x: x.at[:,n_level_samples:2*n_level_samples].get(), state)\n\t\telse:\n\t\t\tn_level_samples = self.n_parallel\n\t\t\tnew_levels = state\n\n\t\trng, subrng = jax.random.split(rng)\n\t\tlevels, level_idxs, is_replay, next_plr_buffer = \\\n\t\t\tself.plr_mgr.sample(subrng, train_state.plr_buffer, new_levels, n_level_samples)\n\t\ttrain_state = train_state.replace(plr_buffer=next_plr_buffer)\n\n\t\t# If use_parallel_eval=True, need to combine replay and non-replay levels together\n\t\t# Need to mutate levels as well\n\t\tparent_idxs = jnp.full((self.n_students, self.n_parallel), -1)\n\t\tif self.use_parallel_eval: # Parallel ACCEL\n\t\t\tnew_level_idxs = jnp.full_like(parent_idxs, -1)\n\n\t\t\t_all_levels = jax.vmap(\n\t\t\t\tlambda x,y: _tree_util.pytree_merge(x,y, start_idx=n_level_samples, src_len=n_level_samples),\n\t\t\t\t)(state, levels)\n\t\t\t_all_level_idxs = jax.vmap(\n\t\t\t\tlambda x,y: _tree_util.pytree_merge(x,y, start_idx=n_level_samples, src_len=n_level_samples),\n\t\t\t\t)(new_level_idxs, level_idxs)\n\n\t\t\tif self.use_mutations:\n\t\t\t\trng, subrng = jax.random.split(rng)\n\t\t\t\tmutated_levels, mutated_level_idxs, _parent_idxs = self._mutate_levels(subrng, levels, level_idxs)\n\t\t\t\t\n\t\t\t\tfallback_levels = jax.tree_map(lambda x: x.at[:,-n_level_samples:].get(), state)\n\t\t\t\tfallback_level_idxs = jnp.full_like(mutated_level_idxs, -1)\n\n\t\t\t\tmutated_levels = jax.vmap(\n\t\t\t\t\tlambda cond,x,y: jax.tree_map(\n\t\t\t\t\t\tlambda _x,_y: jax.lax.select(cond,_x,_y), x, y\n\t\t\t\t\t))(is_replay, mutated_levels, fallback_levels)\n\n\t\t\t\tmutated_level_idxs = jax.vmap(\n\t\t\t\t\tlambda cond,x,y: jax.tree_map(\n\t\t\t\t\t\tlambda _x,_y: jax.lax.select(cond,_x,_y), x, y\n\t\t\t\t\t))(is_replay, mutated_level_idxs, fallback_level_idxs)\n\n\t\t\t\t_parent_idxs = jax.vmap(\n\t\t\t\t\tlambda cond,x,y: jax.tree_map(\n\t\t\t\t\t\tlambda _x,_y: jax.lax.select(cond,_x,_y), x, y\n\t\t\t\t\t))(is_replay, _parent_idxs, fallback_level_idxs)\n\t\t\n\t\t\t\tmutated_levels_start_idx = 2*n_level_samples\n\t\t\t\t_all_levels = jax.vmap(\n\t\t\t\t\tlambda x,y: _tree_util.pytree_merge(x,y, start_idx=mutated_levels_start_idx, src_len=n_level_samples),\n\t\t\t\t\t)(_all_levels, mutated_levels)\n\t\t\t\t_all_level_idxs = jax.vmap(\n\t\t\t\t\tlambda x,y: _tree_util.pytree_merge(x,y, start_idx=mutated_levels_start_idx, src_len=n_level_samples),\n\t\t\t\t\t)(_all_level_idxs, mutated_level_idxs)\n\t\t\t\tparent_idxs = jax.vmap(\n\t\t\t\t\tlambda x,y: _tree_util.pytree_merge(x,y, start_idx=mutated_levels_start_idx, src_len=n_level_samples),\n\t\t\t\t\t)(parent_idxs, _parent_idxs)\n\n\t\t\tlevels = _all_levels\n\t\t\tlevel_idxs = _all_level_idxs\n\n\t\t# dedupe levels, move into PLR buffer logic\n\t\tif self.force_unique:\n\t\t\tlevel_idxs, dupe_mask = self.plr_mgr.dedupe_levels(next_plr_buffer, levels, level_idxs)\n\t\telse:\n\t\t\tdupe_mask = None \n\n\t\t# Evaluate levels + update PLR\n\t\tresult = self._eval_and_update_plr(\n\t\t\trng, levels, level_idxs, train_state, update_plr=jnp.array([True]*self.n_students), parent_idxs=parent_idxs, dupe_mask=dupe_mask)\n\t\trng, train_state, state, start_state, obs, carry, extra, ep_stats, \\\n\t\t\trollout_start_state, train_batch, ued_scores = result\n\n\t\tif self.use_parallel_eval:\n\t\t\treplay_start_idx = self.n_eval*n_level_samples\n\t\t\treplay_end_idx = 2*replay_start_idx\n\t\t\ttrain_batch = jax.vmap(\n\t\t\t\tlambda x: jax.tree_map(\n\t\t\t\t\tlambda _x: _x.at[:,replay_start_idx:replay_end_idx].get(), x)\n\t\t\t\t)(train_batch)\n\n\t\t# Gradient update\n\t\trng, subrng = jax.random.split(rng)\n\t\ttrain_state, update_stats = self._efficient_grad_update(subrng, train_state, train_batch, is_replay)\n\n\t\t# Mutation step\n\t\tuse_mutations = jnp.logical_and(self.use_mutations, is_replay)\n\t\tuse_mutations = jnp.logical_and(use_mutations, not self.use_parallel_eval) # Already mutated above in parallel\n\t\trng, arng, brng = jax.random.split(rng, 3)\n\n\t\tmutated_levels, mutated_level_idxs, parent_idxs = jax.lax.cond(\n\t\t\tuse_mutations.any(),\n\t\t\tself._mutate_levels,\n\t\t\tlambda *_: (levels, level_idxs, jnp.full_like(level_idxs, -1)),\n\t\t\t*(arng, levels, level_idxs, ued_scores)\n\t\t)\n\n\t\tmutated_dupe_mask = jnp.zeros_like(mutated_level_idxs, dtype=jnp.bool_)\n\t\tif self.force_unique: # Should move into update plr logic\n\t\t\tmutated_level_idxs, mutated_dupe_mask = jax.lax.cond(\n\t\t\t\tuse_mutations.any(),\n\t\t\t\tself.plr_mgr.dedupe_levels,\n\t\t\t\tlambda *_: (mutated_level_idxs, mutated_dupe_mask),\n\t\t\t\t*(next_plr_buffer, mutated_levels, mutated_level_idxs)\n\t\t\t)\n\n\t\tmutation_eval_result = jax.lax.cond(\n\t\t\tuse_mutations.any(),\n\t\t\tself._eval_and_update_plr,\n\t\t\tpartial(self._eval_and_update_plr, fake=True),\n\t\t\t*(brng, mutated_levels, mutated_level_idxs, train_state, use_mutations, parent_idxs, mutated_dupe_mask)\n\t\t)\n\t\ttrain_state = mutation_eval_result[1]\n\n\t\t# Collect training env metrics\n\t\tif self.track_env_metrics:\n\t\t\tenv_metrics = self.benv.get_env_metrics(levels)\n\t\telse:\n\t\t\tenv_metrics = None\n\n\t\tplr_stats = self.plr_mgr.get_metrics(train_state.plr_buffer)\n\n\t\tstats = self._compile_stats(update_stats, ep_stats, env_metrics, plr_stats)\n\n\t\tif self.n_devices > 1:\n\t\t\tplr_buffer = train_state.plr_buffer\n\t\t\ttrain_state = train_state.replace(plr_buffer=None)\n\n\t\ttrain_state = train_state.increment()\n\t\tstats.update(dict(n_updates=train_state.n_updates[0]))\n\n\t\treturn (\n\t\t\tstats, \n\t\t\trng, \n\t\t\ttrain_state, \n\t\t\tstate, \n\t\t\tstart_state, \n\t\t\tobs, \n\t\t\tcarry, \n\t\t\textra, \n\t\t\tep_stats,\n\t\t\tplr_buffer\n\t\t)" } ]
import copy import time import numpy as np import jax import minimax.envs as envs import minimax.models as models import minimax.agents as agents from functools import partial from collections import defaultdict from jax.sharding import Mesh, PartitionSpec as P from jax.experimental import mesh_utils from jax.experimental.shard_map import shard_map from .eval_runner import EvalRunner from .dr_runner import DRRunner from .paired_runner import PAIREDRunner from .plr_runner import PLRRunner from minimax.util.rl import UEDScore, PopPLRManager
16,361
""" Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ class RunnerInfo: def __init__( self, runner_cls, is_ued=False): self.runner_cls = runner_cls self.is_ued = is_ued RUNNER_INFO = { 'dr': RunnerInfo(
""" Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ class RunnerInfo: def __init__( self, runner_cls, is_ued=False): self.runner_cls = runner_cls self.is_ued = is_ued RUNNER_INFO = { 'dr': RunnerInfo(
runner_cls=DRRunner,
1
2023-10-28 12:12:01+00:00
24k
nv-tlabs/vid2player3d
poselib/poselib/skeleton/tests/test_skeleton.py
[ { "identifier": "SkeletonTree", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonTree(Serializable):\n \"\"\"\n A skeleton tree gives a complete description of a rigid skeleton. It describes a tree structure\n over a list of nodes with their names indicated by strings. Each edge in the tree has a local\n translation associated with it which describes the distance between the two nodes that it\n connects. \n\n Basic Usage:\n >>> t = SkeletonTree.from_mjcf(SkeletonTree.__example_mjcf_path__)\n >>> t\n SkeletonTree(\n node_names=['torso', 'front_left_leg', 'aux_1', 'front_left_foot', 'front_right_leg', 'aux_2', 'front_right_foot', 'left_back_leg', 'aux_3', 'left_back_foot', 'right_back_leg', 'aux_4', 'right_back_foot'],\n parent_indices=tensor([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11]),\n local_translation=tensor([[ 0.0000, 0.0000, 0.7500],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, -0.2000, 0.0000],\n [ 0.2000, -0.2000, 0.0000]])\n )\n >>> t.node_names\n ['torso', 'front_left_leg', 'aux_1', 'front_left_foot', 'front_right_leg', 'aux_2', 'front_right_foot', 'left_back_leg', 'aux_3', 'left_back_foot', 'right_back_leg', 'aux_4', 'right_back_foot']\n >>> t.parent_indices\n tensor([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11])\n >>> t.local_translation\n tensor([[ 0.0000, 0.0000, 0.7500],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, -0.2000, 0.0000],\n [ 0.2000, -0.2000, 0.0000]])\n >>> t.parent_of('front_left_leg')\n 'torso'\n >>> t.index('front_right_foot')\n 6\n >>> t[2]\n 'aux_1'\n \"\"\"\n\n __example_mjcf_path__ = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), \"tests/ant.xml\"\n )\n\n def __init__(self, node_names, parent_indices, local_translation):\n \"\"\"\n :param node_names: a list of names for each tree node\n :type node_names: List[str]\n :param parent_indices: an int32-typed tensor that represents the edge to its parent.\\\n -1 represents the root node\n :type parent_indices: Tensor\n :param local_translation: a 3d vector that gives local translation information\n :type local_translation: Tensor\n \"\"\"\n ln, lp, ll = len(node_names), len(parent_indices), len(local_translation)\n assert len(set((ln, lp, ll))) == 1\n self._node_names = node_names\n self._parent_indices = parent_indices.long()\n self._local_translation = local_translation\n self._node_indices = {self.node_names[i]: i for i in range(len(self))}\n\n def __len__(self):\n \"\"\" number of nodes in the skeleton tree \"\"\"\n return len(self.node_names)\n\n def __iter__(self):\n \"\"\" iterator that iterate through the name of each node \"\"\"\n yield from self.node_names\n\n def __getitem__(self, item):\n \"\"\" get the name of the node given the index \"\"\"\n return self.node_names[item]\n\n def __repr__(self):\n return (\n \"SkeletonTree(\\n node_names={},\\n parent_indices={},\"\n \"\\n local_translation={}\\n)\".format(\n self._indent(repr(self.node_names)),\n self._indent(repr(self.parent_indices)),\n self._indent(repr(self.local_translation)),\n )\n )\n\n def _indent(self, s):\n return \"\\n \".join(s.split(\"\\n\"))\n\n @property\n def node_names(self):\n return self._node_names\n\n @property\n def parent_indices(self):\n return self._parent_indices\n\n @property\n def local_translation(self):\n return self._local_translation\n\n @property\n def num_joints(self):\n \"\"\" number of nodes in the skeleton tree \"\"\"\n return len(self)\n\n @classmethod\n def from_dict(cls, dict_repr, *args, **kwargs):\n return cls(\n list(map(str, dict_repr[\"node_names\"])),\n TensorUtils.from_dict(dict_repr[\"parent_indices\"], *args, **kwargs),\n TensorUtils.from_dict(dict_repr[\"local_translation\"], *args, **kwargs),\n )\n\n def to_dict(self):\n return OrderedDict(\n [\n (\"node_names\", self.node_names),\n (\"parent_indices\", tensor_to_dict(self.parent_indices)),\n (\"local_translation\", tensor_to_dict(self.local_translation)),\n ]\n )\n\n @classmethod\n def from_mjcf(cls, path: str) -> \"SkeletonTree\":\n \"\"\"\n Parses a mujoco xml scene description file and returns a Skeleton Tree.\n We use the model attribute at the root as the name of the tree.\n \n :param path:\n :type path: string\n :return: The skeleton tree constructed from the mjcf file\n :rtype: SkeletonTree\n \"\"\"\n tree = ET.parse(path)\n xml_doc_root = tree.getroot()\n xml_world_body = xml_doc_root.find(\"worldbody\")\n if xml_world_body is None:\n raise ValueError(\"MJCF parsed incorrectly please verify it.\")\n # assume this is the root\n xml_body_root = xml_world_body.find(\"body\")\n if xml_body_root is None:\n raise ValueError(\"MJCF parsed incorrectly please verify it.\")\n\n node_names = []\n parent_indices = []\n local_translation = []\n\n # recursively adding all nodes into the skel_tree\n def _add_xml_node(xml_node, parent_index, node_index):\n node_name = xml_node.attrib.get(\"name\")\n # parse the local translation into float list\n pos = np.fromstring(xml_node.attrib.get(\"pos\"), dtype=float, sep=\" \")\n node_names.append(node_name)\n parent_indices.append(parent_index)\n local_translation.append(pos)\n curr_index = node_index\n node_index += 1\n for next_node in xml_node.findall(\"body\"):\n node_index = _add_xml_node(next_node, curr_index, node_index)\n return node_index\n\n _add_xml_node(xml_body_root, -1, 0)\n\n return cls(\n node_names,\n torch.from_numpy(np.array(parent_indices, dtype=np.int32)),\n torch.from_numpy(np.array(local_translation, dtype=np.float32)),\n )\n\n def parent_of(self, node_name):\n \"\"\" get the name of the parent of the given node\n\n :param node_name: the name of the node\n :type node_name: string\n :rtype: string\n \"\"\"\n return self[int(self.parent_indices[self.index(node_name)].item())]\n\n def index(self, node_name):\n \"\"\" get the index of the node\n \n :param node_name: the name of the node\n :type node_name: string\n :rtype: int\n \"\"\"\n return self._node_indices[node_name]\n\n def drop_nodes_by_names(\n self, node_names: List[str], pairwise_translation=None\n ) -> \"SkeletonTree\":\n new_length = len(self) - len(node_names)\n new_node_names = []\n new_local_translation = torch.zeros(\n new_length, 3, dtype=self.local_translation.dtype\n )\n new_parent_indices = torch.zeros(new_length, dtype=self.parent_indices.dtype)\n parent_indices = self.parent_indices.numpy()\n new_node_indices: dict = {}\n new_node_index = 0\n for node_index in range(len(self)):\n if self[node_index] in node_names:\n continue\n tb_node_index = parent_indices[node_index]\n if tb_node_index != -1:\n local_translation = self.local_translation[node_index, :]\n while tb_node_index != -1 and self[tb_node_index] in node_names:\n local_translation += self.local_translation[tb_node_index, :]\n tb_node_index = parent_indices[tb_node_index]\n assert tb_node_index != -1, \"the root node cannot be dropped\"\n\n if pairwise_translation is not None:\n local_translation = pairwise_translation[\n tb_node_index, node_index, :\n ]\n else:\n local_translation = self.local_translation[node_index, :]\n\n new_node_names.append(self[node_index])\n new_local_translation[new_node_index, :] = local_translation\n if tb_node_index == -1:\n new_parent_indices[new_node_index] = -1\n else:\n new_parent_indices[new_node_index] = new_node_indices[\n self[tb_node_index]\n ]\n new_node_indices[self[node_index]] = new_node_index\n new_node_index += 1\n\n return SkeletonTree(new_node_names, new_parent_indices, new_local_translation)\n\n def keep_nodes_by_names(\n self, node_names: List[str], pairwise_translation=None\n ) -> \"SkeletonTree\":\n nodes_to_drop = list(filter(lambda x: x not in node_names, self))\n return self.drop_nodes_by_names(nodes_to_drop, pairwise_translation)" }, { "identifier": "SkeletonState", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonState(Serializable):\n \"\"\"\n A skeleton state contains all the information needed to describe a static state of a skeleton.\n It requires a skeleton tree, local/global rotation at each joint and the root translation.\n\n Example:\n >>> t = SkeletonTree.from_mjcf(SkeletonTree.__example_mjcf_path__)\n >>> zero_pose = SkeletonState.zero_pose(t)\n >>> plot_skeleton_state(zero_pose) # can be imported from `.visualization.common`\n [plot of the ant at zero pose\n >>> local_rotation = zero_pose.local_rotation.clone()\n >>> local_rotation[2] = torch.tensor([0, 0, 1, 0])\n >>> new_pose = SkeletonState.from_rotation_and_root_translation(\n ... skeleton_tree=t,\n ... r=local_rotation,\n ... t=zero_pose.root_translation,\n ... is_local=True\n ... )\n >>> new_pose.local_rotation\n tensor([[0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 1., 0., 0.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.]])\n >>> plot_skeleton_state(new_pose) # you should be able to see one of ant's leg is bent\n [plot of the ant with the new pose\n >>> new_pose.global_rotation # the local rotation is propagated to the global rotation at joint #3\n tensor([[0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 1., 0., 0.],\n [0., 1., 0., 0.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.]])\n\n Global/Local Representation (cont. from the previous example)\n >>> new_pose.is_local\n True\n >>> new_pose.tensor # this will return the local rotation followed by the root translation\n tensor([0., 0., 0., 1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0.,\n 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.,\n 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0.,\n 0.])\n >>> new_pose.tensor.shape # 4 * 13 (joint rotation) + 3 (root translatio\n torch.Size([55])\n >>> new_pose.global_repr().is_local\n False\n >>> new_pose.global_repr().tensor # this will return the global rotation followed by the root translation instead\n tensor([0., 0., 0., 1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 1., 0., 0., 0., 0.,\n 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.,\n 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0.,\n 0.])\n >>> new_pose.global_repr().tensor.shape # 4 * 13 (joint rotation) + 3 (root translation\n torch.Size([55])\n \"\"\"\n\n def __init__(self, tensor_backend, skeleton_tree, is_local):\n self._skeleton_tree = skeleton_tree\n self._is_local = is_local\n self.tensor = tensor_backend.clone()\n\n def __len__(self):\n return self.tensor.shape[0]\n\n @property\n def rotation(self):\n if not hasattr(self, \"_rotation\"):\n self._rotation = self.tensor[..., : self.num_joints * 4].reshape(\n *(self.tensor.shape[:-1] + (self.num_joints, 4))\n )\n return self._rotation\n\n @property\n def _local_rotation(self):\n if self._is_local:\n return self.rotation\n else:\n return None\n\n @property\n def _global_rotation(self):\n if not self._is_local:\n return self.rotation\n else:\n return None\n\n @property\n def is_local(self):\n \"\"\" is the rotation represented in local frame? \n \n :rtype: bool\n \"\"\"\n return self._is_local\n\n @property\n def invariant_property(self):\n return {\"skeleton_tree\": self.skeleton_tree, \"is_local\": self.is_local}\n\n @property\n def num_joints(self):\n \"\"\" number of joints in the skeleton tree \n \n :rtype: int\n \"\"\"\n return self.skeleton_tree.num_joints\n\n @property\n def skeleton_tree(self):\n \"\"\" skeleton tree \n \n :rtype: SkeletonTree\n \"\"\"\n return self._skeleton_tree\n\n @property\n def root_translation(self):\n \"\"\" root translation \n \n :rtype: Tensor\n \"\"\"\n if not hasattr(self, \"_root_translation\"):\n self._root_translation = self.tensor[\n ..., self.num_joints * 4 : self.num_joints * 4 + 3\n ]\n return self._root_translation\n\n @property\n def global_transformation(self):\n \"\"\" global transformation of each joint (transform from joint frame to global frame) \"\"\"\n if not hasattr(self, \"_global_transformation\"):\n local_transformation = self.local_transformation\n global_transformation = []\n parent_indices = self.skeleton_tree.parent_indices.numpy()\n # global_transformation = local_transformation.identity_like()\n for node_index in range(len(self.skeleton_tree)):\n parent_index = parent_indices[node_index]\n if parent_index == -1:\n global_transformation.append(\n local_transformation[..., node_index, :]\n )\n else:\n global_transformation.append(\n transform_mul(\n global_transformation[parent_index],\n local_transformation[..., node_index, :],\n )\n )\n self._global_transformation = torch.stack(global_transformation, axis=-2)\n return self._global_transformation\n\n @property\n def global_rotation(self):\n \"\"\" global rotation of each joint (rotation matrix to rotate from joint's F.O.R to global\n F.O.R) \"\"\"\n if self._global_rotation is None:\n if not hasattr(self, \"_comp_global_rotation\"):\n self._comp_global_rotation = transform_rotation(\n self.global_transformation\n )\n return self._comp_global_rotation\n else:\n return self._global_rotation\n\n @property\n def global_translation(self):\n \"\"\" global translation of each joint \"\"\"\n if not hasattr(self, \"_global_translation\"):\n self._global_translation = transform_translation(self.global_transformation)\n return self._global_translation\n\n @property\n def global_translation_xy(self):\n \"\"\" global translation in xy \"\"\"\n trans_xy_data = self.global_translation.zeros_like()\n trans_xy_data[..., 0:2] = self.global_translation[..., 0:2]\n return trans_xy_data\n\n @property\n def global_translation_xz(self):\n \"\"\" global translation in xz \"\"\"\n trans_xz_data = self.global_translation.zeros_like()\n trans_xz_data[..., 0:1] = self.global_translation[..., 0:1]\n trans_xz_data[..., 2:3] = self.global_translation[..., 2:3]\n return trans_xz_data\n\n @property\n def local_rotation(self):\n \"\"\" the rotation from child frame to parent frame given in the order of child nodes appeared\n in `.skeleton_tree.node_names` \"\"\"\n if self._local_rotation is None:\n if not hasattr(self, \"_comp_local_rotation\"):\n local_rotation = quat_identity_like(self.global_rotation)\n for node_index in range(len(self.skeleton_tree)):\n parent_index = self.skeleton_tree.parent_indices[node_index]\n if parent_index == -1:\n local_rotation[..., node_index, :] = self.global_rotation[\n ..., node_index, :\n ]\n else:\n local_rotation[..., node_index, :] = quat_mul_norm(\n quat_inverse(self.global_rotation[..., parent_index, :]),\n self.global_rotation[..., node_index, :],\n )\n self._comp_local_rotation = local_rotation\n return self._comp_local_rotation\n else:\n return self._local_rotation\n\n @property\n def local_transformation(self):\n \"\"\" local translation + local rotation. It describes the transformation from child frame to \n parent frame given in the order of child nodes appeared in `.skeleton_tree.node_names` \"\"\"\n if not hasattr(self, \"_local_transformation\"):\n self._local_transformation = transform_from_rotation_translation(\n r=self.local_rotation, t=self.local_translation\n )\n return self._local_transformation\n\n @property\n def local_translation(self):\n \"\"\" local translation of the skeleton state. It is identical to the local translation in\n `.skeleton_tree.local_translation` except the root translation. The root translation is\n identical to `.root_translation` \"\"\"\n if not hasattr(self, \"_local_translation\"):\n broadcast_shape = (\n tuple(self.tensor.shape[:-1])\n + (len(self.skeleton_tree),)\n + tuple(self.skeleton_tree.local_translation.shape[-1:])\n )\n local_translation = self.skeleton_tree.local_translation.broadcast_to(\n *broadcast_shape\n ).clone()\n local_translation[..., 0, :] = self.root_translation\n self._local_translation = local_translation\n return self._local_translation\n\n # Root Properties\n @property\n def root_translation_xy(self):\n \"\"\" root translation on xy \"\"\"\n if not hasattr(self, \"_root_translation_xy\"):\n self._root_translation_xy = self.global_translation_xy[..., 0, :]\n return self._root_translation_xy\n\n @property\n def global_root_rotation(self):\n \"\"\" root rotation \"\"\"\n if not hasattr(self, \"_global_root_rotation\"):\n self._global_root_rotation = self.global_rotation[..., 0, :]\n return self._global_root_rotation\n\n @property\n def global_root_yaw_rotation(self):\n \"\"\" root yaw rotation \"\"\"\n if not hasattr(self, \"_global_root_yaw_rotation\"):\n self._global_root_yaw_rotation = self.global_root_rotation.yaw_rotation()\n return self._global_root_yaw_rotation\n\n # Properties relative to root\n @property\n def local_translation_to_root(self):\n \"\"\" The 3D translation from joint frame to the root frame. \"\"\"\n if not hasattr(self, \"_local_translation_to_root\"):\n self._local_translation_to_root = (\n self.global_translation - self.root_translation.unsqueeze(-1)\n )\n return self._local_translation_to_root\n\n @property\n def local_rotation_to_root(self):\n \"\"\" The 3D rotation from joint frame to the root frame. It is equivalent to \n The root_R_world * world_R_node \"\"\"\n return (\n quat_inverse(self.global_root_rotation).unsqueeze(-1) * self.global_rotation\n )\n\n def compute_forward_vector(\n self,\n left_shoulder_index,\n right_shoulder_index,\n left_hip_index,\n right_hip_index,\n gaussian_filter_width=20,\n ):\n \"\"\" Computes forward vector based on cross product of the up vector with \n average of the right->left shoulder and hip vectors \"\"\"\n global_positions = self.global_translation\n # Perpendicular to the forward direction.\n # Uses the shoulders and hips to find this.\n side_direction = (\n global_positions[:, left_shoulder_index].numpy()\n - global_positions[:, right_shoulder_index].numpy()\n + global_positions[:, left_hip_index].numpy()\n - global_positions[:, right_hip_index].numpy()\n )\n side_direction = (\n side_direction\n / np.sqrt((side_direction ** 2).sum(axis=-1))[..., np.newaxis]\n )\n\n # Forward direction obtained by crossing with the up direction.\n forward_direction = np.cross(side_direction, np.array([[0, 1, 0]]))\n\n # Smooth the forward direction with a Gaussian.\n # Axis 0 is the time/frame axis.\n forward_direction = filters.gaussian_filter1d(\n forward_direction, gaussian_filter_width, axis=0, mode=\"nearest\"\n )\n forward_direction = (\n forward_direction\n / np.sqrt((forward_direction ** 2).sum(axis=-1))[..., np.newaxis]\n )\n\n return torch.from_numpy(forward_direction)\n\n @staticmethod\n def _to_state_vector(rot, rt):\n state_shape = rot.shape[:-2]\n vr = rot.reshape(*(state_shape + (-1,)))\n vt = rt.broadcast_to(*state_shape + rt.shape[-1:]).reshape(\n *(state_shape + (-1,))\n )\n v = torch.cat([vr, vt], axis=-1)\n return v\n\n @classmethod\n def from_dict(\n cls: Type[\"SkeletonState\"], dict_repr: OrderedDict, *args, **kwargs\n ) -> \"SkeletonState\":\n rot = TensorUtils.from_dict(dict_repr[\"rotation\"], *args, **kwargs)\n rt = TensorUtils.from_dict(dict_repr[\"root_translation\"], *args, **kwargs)\n return cls(\n SkeletonState._to_state_vector(rot, rt),\n SkeletonTree.from_dict(dict_repr[\"skeleton_tree\"], *args, **kwargs),\n dict_repr[\"is_local\"],\n )\n\n def to_dict(self) -> OrderedDict:\n return OrderedDict(\n [\n (\"rotation\", tensor_to_dict(self.rotation)),\n (\"root_translation\", tensor_to_dict(self.root_translation)),\n (\"skeleton_tree\", self.skeleton_tree.to_dict()),\n (\"is_local\", self.is_local),\n ]\n )\n\n @classmethod\n def from_rotation_and_root_translation(cls, skeleton_tree, r, t, is_local=True):\n \"\"\"\n Construct a skeleton state from rotation and root translation\n\n :param skeleton_tree: the skeleton tree\n :type skeleton_tree: SkeletonTree\n :param r: rotation (either global or local)\n :type r: Tensor\n :param t: root translation\n :type t: Tensor\n :param is_local: to indicate that whether the rotation is local or global\n :type is_local: bool, optional, default=True\n \"\"\"\n assert (\n r.dim() > 0\n ), \"the rotation needs to have at least 1 dimension (dim = {})\".format(r.dim)\n return cls(\n SkeletonState._to_state_vector(r, t),\n skeleton_tree=skeleton_tree,\n is_local=is_local,\n )\n\n @classmethod\n def zero_pose(cls, skeleton_tree):\n \"\"\"\n Construct a zero-pose skeleton state from the skeleton tree by assuming that all the local\n rotation is 0 and root translation is also 0.\n\n :param skeleton_tree: the skeleton tree as the rigid body\n :type skeleton_tree: SkeletonTree\n \"\"\"\n return cls.from_rotation_and_root_translation(\n skeleton_tree=skeleton_tree,\n r=quat_identity([skeleton_tree.num_joints]),\n t=torch.zeros(3, dtype=skeleton_tree.local_translation.dtype),\n is_local=True,\n )\n\n def local_repr(self):\n \"\"\" \n Convert the skeleton state into local representation. This will only affects the values of\n .tensor. If the skeleton state already has `is_local=True`. This method will do nothing. \n\n :rtype: SkeletonState\n \"\"\"\n if self.is_local:\n return self\n return SkeletonState.from_rotation_and_root_translation(\n self.skeleton_tree,\n r=self.local_rotation,\n t=self.root_translation,\n is_local=True,\n )\n\n def global_repr(self):\n \"\"\" \n Convert the skeleton state into global representation. This will only affects the values of\n .tensor. If the skeleton state already has `is_local=False`. This method will do nothing. \n\n :rtype: SkeletonState\n \"\"\"\n if not self.is_local:\n return self\n return SkeletonState.from_rotation_and_root_translation(\n self.skeleton_tree,\n r=self.global_rotation,\n t=self.root_translation,\n is_local=False,\n )\n\n def _get_pairwise_average_translation(self):\n global_transform_inv = transform_inverse(self.global_transformation)\n p1 = global_transform_inv.unsqueeze(-2)\n p2 = self.global_transformation.unsqueeze(-3)\n\n pairwise_translation = (\n transform_translation(transform_mul(p1, p2))\n .reshape(-1, len(self.skeleton_tree), len(self.skeleton_tree), 3)\n .mean(axis=0)\n )\n return pairwise_translation\n\n def _transfer_to(self, new_skeleton_tree: SkeletonTree):\n old_indices = list(map(self.skeleton_tree.index, new_skeleton_tree))\n return SkeletonState.from_rotation_and_root_translation(\n new_skeleton_tree,\n r=self.global_rotation[..., old_indices, :],\n t=self.root_translation,\n is_local=False,\n )\n\n def drop_nodes_by_names(\n self, node_names: List[str], estimate_local_translation_from_states: bool = True\n ) -> \"SkeletonState\":\n \"\"\" \n Drop a list of nodes from the skeleton and re-compute the local rotation to match the \n original joint position as much as possible. \n\n :param node_names: a list node names that specifies the nodes need to be dropped\n :type node_names: List of strings\n :param estimate_local_translation_from_states: the boolean indicator that specifies whether\\\n or not to re-estimate the local translation from the states (avg.)\n :type estimate_local_translation_from_states: boolean\n :rtype: SkeletonState\n \"\"\"\n if estimate_local_translation_from_states:\n pairwise_translation = self._get_pairwise_average_translation()\n else:\n pairwise_translation = None\n new_skeleton_tree = self.skeleton_tree.drop_nodes_by_names(\n node_names, pairwise_translation\n )\n return self._transfer_to(new_skeleton_tree)\n\n def keep_nodes_by_names(\n self, node_names: List[str], estimate_local_translation_from_states: bool = True\n ) -> \"SkeletonState\":\n \"\"\" \n Keep a list of nodes and drop all other nodes from the skeleton and re-compute the local \n rotation to match the original joint position as much as possible. \n\n :param node_names: a list node names that specifies the nodes need to be dropped\n :type node_names: List of strings\n :param estimate_local_translation_from_states: the boolean indicator that specifies whether\\\n or not to re-estimate the local translation from the states (avg.)\n :type estimate_local_translation_from_states: boolean\n :rtype: SkeletonState\n \"\"\"\n return self.drop_nodes_by_names(\n list(filter(lambda x: (x not in node_names), self)),\n estimate_local_translation_from_states,\n )\n\n def _remapped_to(\n self, joint_mapping: Dict[str, str], target_skeleton_tree: SkeletonTree\n ):\n joint_mapping_inv = {target: source for source, target in joint_mapping.items()}\n reduced_target_skeleton_tree = target_skeleton_tree.keep_nodes_by_names(\n list(joint_mapping_inv)\n )\n n_joints = (\n len(joint_mapping),\n len(self.skeleton_tree),\n len(reduced_target_skeleton_tree),\n )\n assert (\n len(set(n_joints)) == 1\n ), \"the joint mapping is not consistent with the skeleton trees\"\n source_indices = list(\n map(\n lambda x: self.skeleton_tree.index(joint_mapping_inv[x]),\n reduced_target_skeleton_tree,\n )\n )\n target_local_rotation = self.local_rotation[..., source_indices, :]\n return SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=reduced_target_skeleton_tree,\n r=target_local_rotation,\n t=self.root_translation,\n is_local=True,\n )\n\n def retarget_to(\n self,\n joint_mapping: Dict[str, str],\n source_tpose_local_rotation,\n source_tpose_root_translation: np.ndarray,\n target_skeleton_tree: SkeletonTree,\n target_tpose_local_rotation,\n target_tpose_root_translation: np.ndarray,\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n z_up: bool = True,\n ) -> \"SkeletonState\":\n \"\"\" \n Retarget the skeleton state to a target skeleton tree. This is a naive retarget\n implementation with rough approximations. The function follows the procedures below.\n\n Steps:\n 1. Drop the joints from the source (self) that do not belong to the joint mapping\\\n with an implementation that is similar to \"keep_nodes_by_names()\" - take a\\\n look at the function doc for more details (same for source_tpose)\n \n 2. Rotate the source state and the source tpose by \"rotation_to_target_skeleton\"\\\n to align the source with the target orientation\n \n 3. Extract the root translation and normalize it to match the scale of the target\\\n skeleton\n \n 4. Extract the global rotation from source state relative to source tpose and\\\n re-apply the relative rotation to the target tpose to construct the global\\\n rotation after retargetting\n \n 5. Combine the computed global rotation and the root translation from 3 and 4 to\\\n complete the retargeting.\n \n 6. Make feet on the ground (global translation z)\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose_local_rotation: the local rotation of the source skeleton\n :type source_tpose_local_rotation: Tensor\n \n :param source_tpose_root_translation: the root translation of the source tpose\n :type source_tpose_root_translation: np.ndarray\n \n :param target_skeleton_tree: the target skeleton tree\n :type target_skeleton_tree: SkeletonTree\n \n :param target_tpose_local_rotation: the local rotation of the target skeleton\n :type target_tpose_local_rotation: Tensor\n \n :param target_tpose_root_translation: the root translation of the target tpose\n :type target_tpose_root_translation: Tensor\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonState\n \"\"\"\n\n # STEP 0: Preprocess\n source_tpose = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=self.skeleton_tree,\n r=source_tpose_local_rotation,\n t=source_tpose_root_translation,\n is_local=True,\n )\n target_tpose = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=target_skeleton_tree,\n r=target_tpose_local_rotation,\n t=target_tpose_root_translation,\n is_local=True,\n )\n\n # STEP 1: Drop the irrelevant joints\n pairwise_translation = self._get_pairwise_average_translation()\n node_names = list(joint_mapping)\n new_skeleton_tree = self.skeleton_tree.keep_nodes_by_names(\n node_names, pairwise_translation\n )\n\n # TODO: combine the following steps before STEP 3\n source_tpose = source_tpose._transfer_to(new_skeleton_tree)\n source_state = self._transfer_to(new_skeleton_tree)\n\n source_tpose = source_tpose._remapped_to(joint_mapping, target_skeleton_tree)\n source_state = source_state._remapped_to(joint_mapping, target_skeleton_tree)\n\n # STEP 2: Rotate the source to align with the target\n new_local_rotation = source_tpose.local_rotation.clone()\n new_local_rotation[..., 0, :] = quat_mul_norm(\n rotation_to_target_skeleton, source_tpose.local_rotation[..., 0, :]\n )\n\n source_tpose = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=source_tpose.skeleton_tree,\n r=new_local_rotation,\n t=quat_rotate(rotation_to_target_skeleton, source_tpose.root_translation),\n is_local=True,\n )\n\n new_local_rotation = source_state.local_rotation.clone()\n new_local_rotation[..., 0, :] = quat_mul_norm(\n rotation_to_target_skeleton, source_state.local_rotation[..., 0, :]\n )\n source_state = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=source_state.skeleton_tree,\n r=new_local_rotation,\n t=quat_rotate(rotation_to_target_skeleton, source_state.root_translation),\n is_local=True,\n )\n\n # STEP 3: Normalize to match the target scale\n root_translation_diff = (\n source_state.root_translation - source_tpose.root_translation\n ) * scale_to_target_skeleton\n\n # STEP 4: the global rotation from source state relative to source tpose and\n # re-apply to the target\n current_skeleton_tree = source_state.skeleton_tree\n target_tpose_global_rotation = source_state.global_rotation[0, :].clone()\n for current_index, name in enumerate(current_skeleton_tree):\n if name in target_tpose.skeleton_tree:\n target_tpose_global_rotation[\n current_index, :\n ] = target_tpose.global_rotation[\n target_tpose.skeleton_tree.index(name), :\n ]\n\n global_rotation_diff = quat_mul_norm(\n source_state.global_rotation, quat_inverse(source_tpose.global_rotation)\n )\n new_global_rotation = quat_mul_norm(\n global_rotation_diff, target_tpose_global_rotation\n )\n\n # STEP 5: Putting 3 and 4 together\n current_skeleton_tree = source_state.skeleton_tree\n shape = source_state.global_rotation.shape[:-1]\n shape = shape[:-1] + target_tpose.global_rotation.shape[-2:-1]\n new_global_rotation_output = quat_identity(shape)\n for current_index, name in enumerate(target_skeleton_tree):\n while name not in current_skeleton_tree:\n name = target_skeleton_tree.parent_of(name)\n parent_index = current_skeleton_tree.index(name)\n new_global_rotation_output[:, current_index, :] = new_global_rotation[\n :, parent_index, :\n ]\n\n source_state = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=target_skeleton_tree,\n r=new_global_rotation_output,\n t=target_tpose.root_translation + root_translation_diff,\n is_local=False,\n ).local_repr()\n\n return source_state\n\n def retarget_to_by_tpose(\n self,\n joint_mapping: Dict[str, str],\n source_tpose: \"SkeletonState\",\n target_tpose: \"SkeletonState\",\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n ) -> \"SkeletonState\":\n \"\"\" \n Retarget the skeleton state to a target skeleton tree. This is a naive retarget\n implementation with rough approximations. See the method `retarget_to()` for more information\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose: t-pose of the source skeleton\n :type source_tpose: SkeletonState\n \n :param target_tpose: t-pose of the target skeleton\n :type target_tpose: SkeletonState\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonState\n \"\"\"\n assert (\n len(source_tpose.shape) == 0 and len(target_tpose.shape) == 0\n ), \"the retargeting script currently doesn't support vectorized operations\"\n return self.retarget_to(\n joint_mapping,\n source_tpose.local_rotation,\n source_tpose.root_translation,\n target_tpose.skeleton_tree,\n target_tpose.local_rotation,\n target_tpose.root_translation,\n rotation_to_target_skeleton,\n scale_to_target_skeleton,\n )" }, { "identifier": "SkeletonMotion", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonMotion(SkeletonState):\n def __init__(self, tensor_backend, skeleton_tree, is_local, fps, *args, **kwargs):\n self._fps = fps\n super().__init__(tensor_backend, skeleton_tree, is_local, *args, **kwargs)\n\n def clone(self):\n return SkeletonMotion(\n self.tensor.clone(), self.skeleton_tree, self._is_local, self._fps\n )\n\n @property\n def invariant_property(self):\n return {\n \"skeleton_tree\": self.skeleton_tree,\n \"is_local\": self.is_local,\n \"fps\": self.fps,\n }\n\n @property\n def global_velocity(self):\n \"\"\" global velocity \"\"\"\n curr_index = self.num_joints * 4 + 3\n return self.tensor[..., curr_index : curr_index + self.num_joints * 3].reshape(\n *(self.tensor.shape[:-1] + (self.num_joints, 3))\n )\n\n @property\n def global_angular_velocity(self):\n \"\"\" global angular velocity \"\"\"\n curr_index = self.num_joints * 7 + 3\n return self.tensor[..., curr_index : curr_index + self.num_joints * 3].reshape(\n *(self.tensor.shape[:-1] + (self.num_joints, 3))\n )\n\n @property\n def fps(self):\n \"\"\" number of frames per second \"\"\"\n return self._fps\n\n @property\n def time_delta(self):\n \"\"\" time between two adjacent frames \"\"\"\n return 1.0 / self.fps\n\n @property\n def global_root_velocity(self):\n \"\"\" global root velocity \"\"\"\n return self.global_velocity[..., 0, :]\n\n @property\n def global_root_angular_velocity(self):\n \"\"\" global root angular velocity \"\"\"\n return self.global_angular_velocity[..., 0, :]\n\n @classmethod\n def from_state_vector_and_velocity(\n cls,\n skeleton_tree,\n state_vector,\n global_velocity,\n global_angular_velocity,\n is_local,\n fps,\n ):\n \"\"\"\n Construct a skeleton motion from a skeleton state vector, global velocity and angular\n velocity at each joint.\n\n :param skeleton_tree: the skeleton tree that the motion is based on \n :type skeleton_tree: SkeletonTree\n :param state_vector: the state vector from the skeleton state by `.tensor`\n :type state_vector: Tensor\n :param global_velocity: the global velocity at each joint\n :type global_velocity: Tensor\n :param global_angular_velocity: the global angular velocity at each joint\n :type global_angular_velocity: Tensor\n :param is_local: if the rotation ins the state vector is given in local frame\n :type is_local: boolean\n :param fps: number of frames per second\n :type fps: int\n\n :rtype: SkeletonMotion\n \"\"\"\n state_shape = state_vector.shape[:-1]\n v = global_velocity.reshape(*(state_shape + (-1,)))\n av = global_angular_velocity.reshape(*(state_shape + (-1,)))\n new_state_vector = torch.cat([state_vector, v, av], axis=-1)\n return cls(\n new_state_vector, skeleton_tree=skeleton_tree, is_local=is_local, fps=fps,\n )\n\n @classmethod\n def from_skeleton_state(\n cls: Type[\"SkeletonMotion\"], skeleton_state: SkeletonState, fps: int\n ):\n \"\"\"\n Construct a skeleton motion from a skeleton state. The velocities are estimated using second\n order guassian filter along the last axis. The skeleton state must have at least .dim >= 1\n\n :param skeleton_state: the skeleton state that the motion is based on \n :type skeleton_state: SkeletonState\n :param fps: number of frames per second\n :type fps: int\n\n :rtype: SkeletonMotion\n \"\"\"\n assert (\n type(skeleton_state) == SkeletonState\n ), \"expected type of {}, got {}\".format(SkeletonState, type(skeleton_state))\n global_velocity = SkeletonMotion._compute_velocity(\n p=skeleton_state.global_translation, time_delta=1 / fps\n )\n global_angular_velocity = SkeletonMotion._compute_angular_velocity(\n r=skeleton_state.global_rotation, time_delta=1 / fps\n )\n return cls.from_state_vector_and_velocity(\n skeleton_tree=skeleton_state.skeleton_tree,\n state_vector=skeleton_state.tensor,\n global_velocity=global_velocity,\n global_angular_velocity=global_angular_velocity,\n is_local=skeleton_state.is_local,\n fps=fps,\n )\n\n @staticmethod\n def _to_state_vector(rot, rt, vel, avel):\n state_shape = rot.shape[:-2]\n skeleton_state_v = SkeletonState._to_state_vector(rot, rt)\n v = vel.reshape(*(state_shape + (-1,)))\n av = avel.reshape(*(state_shape + (-1,)))\n skeleton_motion_v = torch.cat([skeleton_state_v, v, av], axis=-1)\n return skeleton_motion_v\n\n @classmethod\n def from_dict(\n cls: Type[\"SkeletonMotion\"], dict_repr: OrderedDict, *args, **kwargs\n ) -> \"SkeletonMotion\":\n rot = TensorUtils.from_dict(dict_repr[\"rotation\"], *args, **kwargs)\n rt = TensorUtils.from_dict(dict_repr[\"root_translation\"], *args, **kwargs)\n vel = TensorUtils.from_dict(dict_repr[\"global_velocity\"], *args, **kwargs)\n avel = TensorUtils.from_dict(\n dict_repr[\"global_angular_velocity\"], *args, **kwargs\n )\n return cls(\n SkeletonMotion._to_state_vector(rot, rt, vel, avel),\n skeleton_tree=SkeletonTree.from_dict(\n dict_repr[\"skeleton_tree\"], *args, **kwargs\n ),\n is_local=dict_repr[\"is_local\"],\n fps=dict_repr[\"fps\"],\n )\n\n def to_dict(self) -> OrderedDict:\n return OrderedDict(\n [\n (\"rotation\", tensor_to_dict(self.rotation)),\n (\"root_translation\", tensor_to_dict(self.root_translation)),\n (\"global_velocity\", tensor_to_dict(self.global_velocity)),\n (\"global_angular_velocity\", tensor_to_dict(self.global_angular_velocity)),\n (\"skeleton_tree\", self.skeleton_tree.to_dict()),\n (\"is_local\", self.is_local),\n (\"fps\", self.fps),\n ]\n )\n\n @classmethod\n def from_fbx(\n cls: Type[\"SkeletonMotion\"],\n fbx_file_path,\n fbx_configs,\n skeleton_tree=None,\n is_local=True,\n fps=120,\n root_joint=\"\",\n root_trans_index=0,\n *args,\n **kwargs,\n ) -> \"SkeletonMotion\":\n \"\"\"\n Construct a skeleton motion from a fbx file (TODO - generalize this). If the skeleton tree\n is not given, it will use the first frame of the mocap to construct the skeleton tree.\n\n :param fbx_file_path: the path of the fbx file\n :type fbx_file_path: string\n :param fbx_configs: the configuration in terms of {\"tmp_path\": ..., \"fbx_py27_path\": ...}\n :type fbx_configs: dict\n :param skeleton_tree: the optional skeleton tree that the rotation will be applied to\n :type skeleton_tree: SkeletonTree, optional\n :param is_local: the state vector uses local or global rotation as the representation\n :type is_local: bool, optional, default=True\n :rtype: SkeletonMotion\n \"\"\"\n joint_names, joint_parents, transforms, fps = fbx_to_array(\n fbx_file_path, fbx_configs, root_joint, fps\n )\n # swap the last two axis to match the convention\n local_transform = euclidean_to_transform(\n transformation_matrix=torch.from_numpy(\n np.swapaxes(np.array(transforms), -1, -2),\n ).float()\n )\n local_rotation = transform_rotation(local_transform)\n root_translation = transform_translation(local_transform)[..., root_trans_index, :]\n joint_parents = torch.from_numpy(np.array(joint_parents)).int()\n\n if skeleton_tree is None:\n local_translation = transform_translation(local_transform).reshape(\n -1, len(joint_parents), 3\n )[0]\n skeleton_tree = SkeletonTree(joint_names, joint_parents, local_translation)\n skeleton_state = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree, r=local_rotation, t=root_translation, is_local=True\n )\n if not is_local:\n skeleton_state = skeleton_state.global_repr()\n return cls.from_skeleton_state(\n skeleton_state=skeleton_state, fps=fps\n )\n\n @staticmethod\n def _compute_velocity(p, time_delta, guassian_filter=True):\n velocity = torch.from_numpy(\n filters.gaussian_filter1d(\n np.gradient(p.numpy(), axis=-3), 2, axis=-3, mode=\"nearest\"\n )\n / time_delta,\n )\n return velocity\n\n @staticmethod\n def _compute_angular_velocity(r, time_delta: float, guassian_filter=True):\n # assume the second last dimension is the time axis\n diff_quat_data = quat_identity_like(r)\n diff_quat_data[..., :-1, :, :] = quat_mul_norm(\n r[..., 1:, :, :], quat_inverse(r[..., :-1, :, :])\n )\n diff_angle, diff_axis = quat_angle_axis(diff_quat_data)\n angular_velocity = diff_axis * diff_angle.unsqueeze(-1) / time_delta\n angular_velocity = torch.from_numpy(\n filters.gaussian_filter1d(\n angular_velocity.numpy(), 2, axis=-3, mode=\"nearest\"\n ),\n )\n return angular_velocity\n\n def crop(self, start: int, end: int, fps: Optional[int] = None):\n \"\"\"\n Crop the motion along its last axis. This is equivalent to performing a slicing on the\n object with [..., start: end: skip_every] where skip_every = old_fps / fps. Note that the\n new fps provided must be a factor of the original fps. \n\n :param start: the beginning frame index\n :type start: int\n :param end: the ending frame index\n :type end: int\n :param fps: number of frames per second in the output (if not given the original fps will be used)\n :type fps: int, optional\n :rtype: SkeletonMotion\n \"\"\"\n if fps is None:\n new_fps = int(self.fps)\n old_fps = int(self.fps)\n else:\n new_fps = int(fps)\n old_fps = int(self.fps)\n assert old_fps % fps == 0, (\n \"the resampling doesn't support fps with non-integer division \"\n \"from the original fps: {} => {}\".format(old_fps, fps)\n )\n skip_every = old_fps // new_fps\n s = slice(start, end, skip_every)\n z = self[..., s]\n\n rot = z.local_rotation if z.is_local else z.global_rotation\n rt = z.root_translation\n vel = z.global_velocity\n avel = z.global_angular_velocity\n return SkeletonMotion(\n SkeletonMotion._to_state_vector(rot, rt, vel, avel),\n skeleton_tree=z.skeleton_tree,\n is_local=z.is_local,\n fps=new_fps,\n )\n\n def retarget_to(\n self,\n joint_mapping: Dict[str, str],\n source_tpose_local_rotation,\n source_tpose_root_translation: np.ndarray,\n target_skeleton_tree: \"SkeletonTree\",\n target_tpose_local_rotation,\n target_tpose_root_translation: np.ndarray,\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n z_up: bool = True,\n ) -> \"SkeletonMotion\":\n \"\"\" \n Same as the one in :class:`SkeletonState`. This method discards all velocity information before\n retargeting and re-estimate the velocity after the retargeting. The same fps is used in the\n new retargetted motion.\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose_local_rotation: the local rotation of the source skeleton\n :type source_tpose_local_rotation: Tensor\n \n :param source_tpose_root_translation: the root translation of the source tpose\n :type source_tpose_root_translation: np.ndarray\n \n :param target_skeleton_tree: the target skeleton tree\n :type target_skeleton_tree: SkeletonTree\n \n :param target_tpose_local_rotation: the local rotation of the target skeleton\n :type target_tpose_local_rotation: Tensor\n \n :param target_tpose_root_translation: the root translation of the target tpose\n :type target_tpose_root_translation: Tensor\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonMotion\n \"\"\"\n return SkeletonMotion.from_skeleton_state(\n super().retarget_to(\n joint_mapping,\n source_tpose_local_rotation,\n source_tpose_root_translation,\n target_skeleton_tree,\n target_tpose_local_rotation,\n target_tpose_root_translation,\n rotation_to_target_skeleton,\n scale_to_target_skeleton,\n z_up,\n ),\n self.fps,\n )\n\n def retarget_to_by_tpose(\n self,\n joint_mapping: Dict[str, str],\n source_tpose: \"SkeletonState\",\n target_tpose: \"SkeletonState\",\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n z_up: bool = True,\n ) -> \"SkeletonMotion\":\n \"\"\" \n Same as the one in :class:`SkeletonState`. This method discards all velocity information before\n retargeting and re-estimate the velocity after the retargeting. The same fps is used in the\n new retargetted motion.\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose: t-pose of the source skeleton\n :type source_tpose: SkeletonState\n \n :param target_tpose: t-pose of the target skeleton\n :type target_tpose: SkeletonState\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonMotion\n \"\"\"\n return self.retarget_to(\n joint_mapping,\n source_tpose.local_rotation,\n source_tpose.root_translation,\n target_tpose.skeleton_tree,\n target_tpose.local_rotation,\n target_tpose.root_translation,\n rotation_to_target_skeleton,\n scale_to_target_skeleton,\n z_up,\n )" }, { "identifier": "plot_skeleton_state", "path": "poselib/poselib/visualization/common.py", "snippet": "def plot_skeleton_state(skeleton_state, task_name=\"\"):\n \"\"\"\n Visualize a skeleton state\n\n :param skeleton_state:\n :param task_name:\n :type skeleton_state: SkeletonState\n :type task_name: string, optional\n \"\"\"\n logger.info(\"plotting {}\".format(task_name))\n task = Draw3DSkeletonState(task_name=task_name, skeleton_state=skeleton_state)\n plotter = Matplotlib3DPlotter(task)\n plotter.show()" }, { "identifier": "plot_skeleton_motion_interactive", "path": "poselib/poselib/visualization/common.py", "snippet": "def plot_skeleton_motion_interactive(skeleton_motion, task_name=\"\"):\n \"\"\"\n Visualize a skeleton motion along its first dimension interactively.\n\n :param skeleton_motion:\n :param task_name:\n :type skeleton_motion: SkeletonMotion\n :type task_name: string, optional\n \"\"\"\n for _ in plot_skeleton_motion_interactive_base(skeleton_motion, task_name):\n pass" }, { "identifier": "Matplotlib3DPlotter", "path": "poselib/poselib/visualization/plt_plotter.py", "snippet": "class Matplotlib3DPlotter(BasePlotter):\n _fig: plt.figure # plt figure\n _ax: p3.Axes3D # plt 3d axis\n # stores artist objects for each task (task name as the key)\n _artist_cache: Dict[str, Any]\n # callables for each task primitives\n _create_impl_callables: Dict[str, Callable]\n _update_impl_callables: Dict[str, Callable]\n\n def __init__(self, task: \"BasePlotterTask\") -> None:\n self._fig = plt.figure()\n self._ax = p3.Axes3D(self._fig)\n self._artist_cache = {}\n\n self._create_impl_callables = {\n \"Draw3DLines\": self._lines_create_impl,\n \"Draw3DDots\": self._dots_create_impl,\n \"Draw3DTrail\": self._trail_create_impl,\n }\n self._update_impl_callables = {\n \"Draw3DLines\": self._lines_update_impl,\n \"Draw3DDots\": self._dots_update_impl,\n \"Draw3DTrail\": self._trail_update_impl,\n }\n self._init_lim()\n super().__init__(task)\n\n @property\n def ax(self):\n return self._ax\n\n @property\n def fig(self):\n return self._fig\n\n def show(self):\n plt.show()\n\n def _min(self, x, y):\n if x is None:\n return y\n if y is None:\n return x\n return min(x, y)\n\n def _max(self, x, y):\n if x is None:\n return y\n if y is None:\n return x\n return max(x, y)\n\n def _init_lim(self):\n self._curr_x_min = None\n self._curr_y_min = None\n self._curr_z_min = None\n self._curr_x_max = None\n self._curr_y_max = None\n self._curr_z_max = None\n\n def _update_lim(self, xs, ys, zs):\n self._curr_x_min = self._min(np.min(xs), self._curr_x_min)\n self._curr_y_min = self._min(np.min(ys), self._curr_y_min)\n self._curr_z_min = self._min(np.min(zs), self._curr_z_min)\n self._curr_x_max = self._max(np.max(xs), self._curr_x_max)\n self._curr_y_max = self._max(np.max(ys), self._curr_y_max)\n self._curr_z_max = self._max(np.max(zs), self._curr_z_max)\n\n def _set_lim(self):\n if not (\n self._curr_x_min is None\n or self._curr_x_max is None\n or self._curr_y_min is None\n or self._curr_y_max is None\n or self._curr_z_min is None\n or self._curr_z_max is None\n ):\n self._ax.set_xlim3d(self._curr_x_min, self._curr_x_max)\n self._ax.set_ylim3d(self._curr_y_min, self._curr_y_max)\n self._ax.set_zlim3d(self._curr_z_min, self._curr_z_max)\n self._init_lim()\n\n @staticmethod\n def _lines_extract_xyz_impl(index, lines_task):\n return lines_task[index, :, 0], lines_task[index, :, 1], lines_task[index, :, 2]\n\n @staticmethod\n def _trail_extract_xyz_impl(index, trail_task):\n return (\n trail_task[index : index + 2, 0],\n trail_task[index : index + 2, 1],\n trail_task[index : index + 2, 2],\n )\n\n def _lines_create_impl(self, lines_task):\n color = lines_task.color\n self._artist_cache[lines_task.task_name] = [\n self._ax.plot(\n *Matplotlib3DPlotter._lines_extract_xyz_impl(i, lines_task),\n color=color,\n linewidth=lines_task.line_width,\n alpha=lines_task.alpha\n )[0]\n for i in range(len(lines_task))\n ]\n\n def _lines_update_impl(self, lines_task):\n lines_artists = self._artist_cache[lines_task.task_name]\n for i in range(len(lines_task)):\n artist = lines_artists[i]\n xs, ys, zs = Matplotlib3DPlotter._lines_extract_xyz_impl(i, lines_task)\n artist.set_data(xs, ys)\n artist.set_3d_properties(zs)\n if lines_task.influence_lim:\n self._update_lim(xs, ys, zs)\n\n def _dots_create_impl(self, dots_task):\n color = dots_task.color\n self._artist_cache[dots_task.task_name] = self._ax.plot(\n dots_task[:, 0],\n dots_task[:, 1],\n dots_task[:, 2],\n c=color,\n linestyle=\"\",\n marker=\".\",\n markersize=dots_task.marker_size,\n alpha=dots_task.alpha,\n )[0]\n\n def _dots_update_impl(self, dots_task):\n dots_artist = self._artist_cache[dots_task.task_name]\n dots_artist.set_data(dots_task[:, 0], dots_task[:, 1])\n dots_artist.set_3d_properties(dots_task[:, 2])\n if dots_task.influence_lim:\n self._update_lim(dots_task[:, 0], dots_task[:, 1], dots_task[:, 2])\n\n def _trail_create_impl(self, trail_task):\n color = trail_task.color\n trail_length = len(trail_task) - 1\n self._artist_cache[trail_task.task_name] = [\n self._ax.plot(\n *Matplotlib3DPlotter._trail_extract_xyz_impl(i, trail_task),\n color=trail_task.color,\n linewidth=trail_task.line_width,\n alpha=trail_task.alpha * (1.0 - i / (trail_length - 1))\n )[0]\n for i in range(trail_length)\n ]\n\n def _trail_update_impl(self, trail_task):\n trails_artists = self._artist_cache[trail_task.task_name]\n for i in range(len(trail_task) - 1):\n artist = trails_artists[i]\n xs, ys, zs = Matplotlib3DPlotter._trail_extract_xyz_impl(i, trail_task)\n artist.set_data(xs, ys)\n artist.set_3d_properties(zs)\n if trail_task.influence_lim:\n self._update_lim(xs, ys, zs)\n\n def _create_impl(self, task_list):\n for task in task_list:\n self._create_impl_callables[task.task_type](task)\n self._draw()\n\n def _update_impl(self, task_list):\n for task in task_list:\n self._update_impl_callables[task.task_type](task)\n self._draw()\n\n def _set_aspect_equal_3d(self):\n xlim = self._ax.get_xlim3d()\n ylim = self._ax.get_ylim3d()\n zlim = self._ax.get_zlim3d()\n\n xmean = np.mean(xlim)\n ymean = np.mean(ylim)\n zmean = np.mean(zlim)\n\n plot_radius = max(\n [\n abs(lim - mean_)\n for lims, mean_ in ((xlim, xmean), (ylim, ymean), (zlim, zmean))\n for lim in lims\n ]\n )\n\n self._ax.set_xlim3d([xmean - plot_radius, xmean + plot_radius])\n self._ax.set_ylim3d([ymean - plot_radius, ymean + plot_radius])\n self._ax.set_zlim3d([zmean - plot_radius, zmean + plot_radius])\n\n def _draw(self):\n self._set_lim()\n self._set_aspect_equal_3d()\n self._fig.canvas.draw()\n self._fig.canvas.flush_events()\n plt.pause(0.00001)" }, { "identifier": "Draw3DSkeletonMotion", "path": "poselib/poselib/visualization/skeleton_plotter_tasks.py", "snippet": "class Draw3DSkeletonMotion(BasePlotterTask):\n def __init__(\n self,\n task_name: str,\n skeleton_motion,\n frame_index=None,\n joints_color=\"red\",\n lines_color=\"blue\",\n velocity_color=\"green\",\n angular_velocity_color=\"purple\",\n trail_color=\"black\",\n trail_length=10,\n alpha=1.0,\n ) -> None:\n super().__init__(task_name=task_name, task_type=\"3DSkeletonMotion\")\n self._trail_length = trail_length\n self._skeleton_motion = skeleton_motion\n # if frame_index is None:\n curr_skeleton_motion = self._skeleton_motion.clone()\n if frame_index is not None:\n curr_skeleton_motion.tensor = self._skeleton_motion.tensor[frame_index, :]\n # else:\n # curr_skeleton_motion = self._skeleton_motion[frame_index, :]\n self._skeleton_state_task = Draw3DSkeletonState(\n self.get_scoped_name(\"skeleton_state\"),\n curr_skeleton_motion,\n joints_color=joints_color,\n lines_color=lines_color,\n alpha=alpha,\n )\n vel_lines, avel_lines = Draw3DSkeletonMotion._get_vel_and_avel(\n curr_skeleton_motion\n )\n self._com_pos = curr_skeleton_motion.root_translation.numpy()[\n np.newaxis, ...\n ].repeat(trail_length, axis=0)\n self._vel_task = Draw3DLines(\n self.get_scoped_name(\"velocity\"),\n vel_lines,\n velocity_color,\n influence_lim=False,\n alpha=alpha,\n )\n self._avel_task = Draw3DLines(\n self.get_scoped_name(\"angular_velocity\"),\n avel_lines,\n angular_velocity_color,\n influence_lim=False,\n alpha=alpha,\n )\n self._com_trail_task = Draw3DTrail(\n self.get_scoped_name(\"com_trail\"),\n self._com_pos,\n trail_color,\n marker_size=2,\n influence_lim=True,\n alpha=alpha,\n )\n\n @property\n def name(self):\n return \"3DSkeletonMotion\"\n\n def update(self, frame_index=None, reset_trail=False, skeleton_motion=None) -> None:\n if skeleton_motion is not None:\n self._skeleton_motion = skeleton_motion\n\n curr_skeleton_motion = self._skeleton_motion.clone()\n if frame_index is not None:\n curr_skeleton_motion.tensor = curr_skeleton_motion.tensor[frame_index, :]\n if reset_trail:\n self._com_pos = curr_skeleton_motion.root_translation.numpy()[\n np.newaxis, ...\n ].repeat(self._trail_length, axis=0)\n else:\n self._com_pos = np.concatenate(\n (\n curr_skeleton_motion.root_translation.numpy()[np.newaxis, ...],\n self._com_pos[:-1],\n ),\n axis=0,\n )\n self._skeleton_state_task.update(curr_skeleton_motion)\n self._com_trail_task.update(self._com_pos)\n self._update(*Draw3DSkeletonMotion._get_vel_and_avel(curr_skeleton_motion))\n\n @staticmethod\n def _get_vel_and_avel(skeleton_motion):\n \"\"\"Get all the velocity and angular velocity lines\n \"\"\"\n pos = skeleton_motion.global_translation.numpy()\n vel = skeleton_motion.global_velocity.numpy()\n avel = skeleton_motion.global_angular_velocity.numpy()\n\n vel_lines = np.stack((pos, pos + vel * 0.02), axis=1)\n avel_lines = np.stack((pos, pos + avel * 0.01), axis=1)\n return vel_lines, avel_lines\n\n def _update(self, vel_lines, avel_lines) -> None:\n self._vel_task.update(vel_lines)\n self._avel_task.update(avel_lines)\n\n def __iter__(self):\n yield from self._skeleton_state_task\n yield from self._vel_task\n yield from self._avel_task\n yield from self._com_trail_task" }, { "identifier": "Draw3DSkeletonState", "path": "poselib/poselib/visualization/skeleton_plotter_tasks.py", "snippet": "class Draw3DSkeletonState(BasePlotterTask):\n _lines_task: Draw3DLines # sub-task for drawing lines\n _dots_task: Draw3DDots # sub-task for drawing dots\n\n def __init__(\n self,\n task_name: str,\n skeleton_state,\n joints_color: str = \"red\",\n lines_color: str = \"blue\",\n alpha=1.0,\n ) -> None:\n super().__init__(task_name=task_name, task_type=\"3DSkeletonState\")\n lines, dots = Draw3DSkeletonState._get_lines_and_dots(skeleton_state)\n self._lines_task = Draw3DLines(\n self.get_scoped_name(\"bodies\"), lines, joints_color, alpha=alpha\n )\n self._dots_task = Draw3DDots(\n self.get_scoped_name(\"joints\"), dots, lines_color, alpha=alpha\n )\n\n @property\n def name(self):\n return \"3DSkeleton\"\n\n def update(self, skeleton_state) -> None:\n self._update(*Draw3DSkeletonState._get_lines_and_dots(skeleton_state))\n\n @staticmethod\n def _get_lines_and_dots(skeleton_state):\n \"\"\"Get all the lines and dots needed to draw the skeleton state\n \"\"\"\n assert (\n len(skeleton_state.tensor.shape) == 1\n ), \"the state has to be zero dimensional\"\n dots = skeleton_state.global_translation.numpy()\n skeleton_tree = skeleton_state.skeleton_tree\n parent_indices = skeleton_tree.parent_indices.numpy()\n lines = []\n for node_index in range(len(skeleton_tree)):\n parent_index = parent_indices[node_index]\n if parent_index != -1:\n lines.append([dots[node_index], dots[parent_index]])\n lines = np.array(lines)\n return lines, dots\n\n def _update(self, lines, dots) -> None:\n self._lines_task.update(lines)\n self._dots_task.update(dots)\n\n def __iter__(self):\n yield from self._lines_task\n yield from self._dots_task" } ]
from ...core import * from ..skeleton3d import SkeletonTree, SkeletonState, SkeletonMotion from ...visualization.common import ( plot_skeleton_state, plot_skeleton_motion_interactive, ) from ...visualization.plt_plotter import Matplotlib3DPlotter from ...visualization.skeleton_plotter_tasks import ( Draw3DSkeletonMotion, Draw3DSkeletonState, ) import numpy as np import torch
18,606
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. def test_skel_tree(): skel_tree = SkeletonTree.from_mjcf( "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml", backend="pytorch", ) skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch") # assert skel_tree.to_str() == skel_tree_rec.to_str() print(skel_tree.node_names) print(skel_tree.local_translation) print(skel_tree.parent_indices) skel_state = SkeletonState.zero_pose(skeleton_tree=skel_tree) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) skel_state = skel_state.drop_nodes_by_names(["right_hip", "left_hip"]) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) def test_skel_motion(): skel_motion = SkeletonMotion.from_file( "/tmp/tmp.npy", backend="pytorch", load_context=True ) plot_skeleton_motion_interactive(skel_motion) def test_grad(): source_motion = SkeletonMotion.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\motions\\JogFlatTerrain_01_ase.npy", backend="pytorch", device="cuda:0", ) source_tpose = SkeletonState.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\fox_tpose.npy", backend="pytorch", device="cuda:0", ) target_tpose = SkeletonState.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\flex_tpose.npy", backend="pytorch", device="cuda:0", ) target_skeleton_tree = target_tpose.skeleton_tree joint_mapping = { "upArm_r": "right_shoulder", "upArm_l": "left_shoulder", "loArm_r": "right_elbow", "loArm_l": "left_elbow", "upLeg_r": "right_hip", "upLeg_l": "left_hip", "loLeg_r": "right_knee", "loLeg_l": "left_knee", "foot_r": "right_ankle", "foot_l": "left_ankle", "hips": "pelvis", "neckA": "neck", "spineA": "abdomen", } rotation_to_target_skeleton = quat_from_angle_axis( angle=torch.tensor(90.0).float(), axis=torch.tensor([1, 0, 0]).float(), degree=True, ) target_motion = source_motion.retarget_to( joint_mapping=joint_mapping, source_tpose_local_rotation=source_tpose.local_rotation, source_tpose_root_translation=source_tpose.root_translation, target_skeleton_tree=target_skeleton_tree, target_tpose_local_rotation=target_tpose.local_rotation, target_tpose_root_translation=target_tpose.root_translation, rotation_to_target_skeleton=rotation_to_target_skeleton, scale_to_target_skeleton=0.01, ) target_state = SkeletonState( target_motion.tensor[800, :], target_motion.skeleton_tree, target_motion.is_local, ) skeleton_tree = target_state.skeleton_tree root_translation = target_state.root_translation global_translation = target_state.global_translation q = np.zeros((len(skeleton_tree), 4), dtype=np.float32) q[..., 3] = 1.0 q = torch.from_numpy(q) max_its = 10000
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. def test_skel_tree(): skel_tree = SkeletonTree.from_mjcf( "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml", backend="pytorch", ) skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch") # assert skel_tree.to_str() == skel_tree_rec.to_str() print(skel_tree.node_names) print(skel_tree.local_translation) print(skel_tree.parent_indices) skel_state = SkeletonState.zero_pose(skeleton_tree=skel_tree) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) skel_state = skel_state.drop_nodes_by_names(["right_hip", "left_hip"]) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) def test_skel_motion(): skel_motion = SkeletonMotion.from_file( "/tmp/tmp.npy", backend="pytorch", load_context=True ) plot_skeleton_motion_interactive(skel_motion) def test_grad(): source_motion = SkeletonMotion.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\motions\\JogFlatTerrain_01_ase.npy", backend="pytorch", device="cuda:0", ) source_tpose = SkeletonState.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\fox_tpose.npy", backend="pytorch", device="cuda:0", ) target_tpose = SkeletonState.from_file( "c:\\Users\\bmatusch\\carbmimic\\data\\skeletons\\flex_tpose.npy", backend="pytorch", device="cuda:0", ) target_skeleton_tree = target_tpose.skeleton_tree joint_mapping = { "upArm_r": "right_shoulder", "upArm_l": "left_shoulder", "loArm_r": "right_elbow", "loArm_l": "left_elbow", "upLeg_r": "right_hip", "upLeg_l": "left_hip", "loLeg_r": "right_knee", "loLeg_l": "left_knee", "foot_r": "right_ankle", "foot_l": "left_ankle", "hips": "pelvis", "neckA": "neck", "spineA": "abdomen", } rotation_to_target_skeleton = quat_from_angle_axis( angle=torch.tensor(90.0).float(), axis=torch.tensor([1, 0, 0]).float(), degree=True, ) target_motion = source_motion.retarget_to( joint_mapping=joint_mapping, source_tpose_local_rotation=source_tpose.local_rotation, source_tpose_root_translation=source_tpose.root_translation, target_skeleton_tree=target_skeleton_tree, target_tpose_local_rotation=target_tpose.local_rotation, target_tpose_root_translation=target_tpose.root_translation, rotation_to_target_skeleton=rotation_to_target_skeleton, scale_to_target_skeleton=0.01, ) target_state = SkeletonState( target_motion.tensor[800, :], target_motion.skeleton_tree, target_motion.is_local, ) skeleton_tree = target_state.skeleton_tree root_translation = target_state.root_translation global_translation = target_state.global_translation q = np.zeros((len(skeleton_tree), 4), dtype=np.float32) q[..., 3] = 1.0 q = torch.from_numpy(q) max_its = 10000
task = Draw3DSkeletonState(task_name="", skeleton_state=target_state)
7
2023-10-30 20:43:43+00:00
24k
masked-spacetime-hashing/msth
MSTH/SpaceTimeHashing/permute_field.py
[ { "identifier": "RaySamples", "path": "nerfstudio/cameras/rays.py", "snippet": "class RaySamples(TensorDataclass):\n \"\"\"Samples along a ray\"\"\"\n\n frustums: Frustums\n \"\"\"Frustums along ray.\"\"\"\n camera_indices: Optional[TensorType[\"bs\":..., 1]] = None\n \"\"\"Camera index.\"\"\"\n deltas: Optional[TensorType[\"bs\":..., 1]] = None\n \"\"\"\"width\" of each sample.\"\"\"\n spacing_starts: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None\n \"\"\"Start of normalized bin edges along ray [0,1], before warping is applied, ie. linear in disparity sampling.\"\"\"\n spacing_ends: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None\n \"\"\"Start of normalized bin edges along ray [0,1], before warping is applied, ie. linear in disparity sampling.\"\"\"\n spacing_to_euclidean_fn: Optional[Callable] = None\n \"\"\"Function to convert bins to euclidean distance.\"\"\"\n metadata: Optional[Dict[str, TensorType[\"bs\":..., \"latent_dims\"]]] = None\n \"\"\"additional information relevant to generating ray samples\"\"\"\n\n times: Optional[TensorType[..., 1]] = None\n \"\"\"Times at which rays are sampled\"\"\"\n\n def get_transmittance(self, densities: TensorType[..., \"num_samples\", 1]) -> TensorType[..., \"num_samples\", 1]:\n \"\"\"Return weights based on predicted densities\n\n Args:\n densities: Predicted densities for samples along ray\n\n Returns:\n Weights for each sample\n \"\"\"\n\n delta_density = self.deltas * densities\n alphas = 1 - torch.exp(-delta_density)\n\n transmittance = torch.cumsum(delta_density[..., :-1, :], dim=-2)\n transmittance = torch.cat(\n [torch.zeros((*transmittance.shape[:1], 1, 1), device=densities.device), transmittance], dim=-2\n )\n transmittance = torch.exp(-transmittance) # [..., \"num_samples\"]\n transmittance = torch.nan_to_num(transmittance)\n\n return transmittance\n\n def get_weights(self, densities: TensorType[..., \"num_samples\", 1]) -> TensorType[..., \"num_samples\", 1]:\n \"\"\"Return weights based on predicted densities\n\n Args:\n densities: Predicted densities for samples along ray\n\n Returns:\n Weights for each sample\n \"\"\"\n\n delta_density = self.deltas * densities\n alphas = 1 - torch.exp(-delta_density)\n\n transmittance = torch.cumsum(delta_density[..., :-1, :], dim=-2)\n transmittance = torch.cat(\n [torch.zeros((*transmittance.shape[:1], 1, 1), device=densities.device), transmittance], dim=-2\n )\n transmittance = torch.exp(-transmittance) # [..., \"num_samples\"]\n\n weights = alphas * transmittance # [..., \"num_samples\"]\n weights = torch.nan_to_num(weights)\n\n return weights" }, { "identifier": "Frustums", "path": "nerfstudio/cameras/rays.py", "snippet": "class Frustums(TensorDataclass):\n \"\"\"Describes region of space as a frustum.\"\"\"\n\n origins: TensorType[\"bs\":..., 3]\n \"\"\"xyz coordinate for ray origin.\"\"\"\n directions: TensorType[\"bs\":..., 3]\n \"\"\"Direction of ray.\"\"\"\n starts: TensorType[\"bs\":..., 1]\n \"\"\"Where the frustum starts along a ray.\"\"\"\n ends: TensorType[\"bs\":..., 1]\n \"\"\"Where the frustum ends along a ray.\"\"\"\n pixel_area: TensorType[\"bs\":..., 1]\n \"\"\"Projected area of pixel a distance 1 away from origin.\"\"\"\n offsets: Optional[TensorType[\"bs\":..., 3]] = None\n \"\"\"Offsets for each sample position\"\"\"\n\n def get_positions(self) -> TensorType[..., 3]:\n \"\"\"Calculates \"center\" position of frustum. Not weighted by mass.\n\n Returns:\n xyz positions.\n \"\"\"\n pos = self.origins + self.directions * (self.starts + self.ends) / 2\n if self.offsets is not None:\n pos = pos + self.offsets\n return pos\n\n def set_offsets(self, offsets):\n \"\"\"Sets offsets for this frustum for computing positions\"\"\"\n self.offsets = offsets\n\n def get_gaussian_blob(self) -> Gaussians:\n \"\"\"Calculates guassian approximation of conical frustum.\n\n Returns:\n Conical frustums approximated by gaussian distribution.\n \"\"\"\n # Cone radius is set such that the square pixel_area matches the cone area.\n cone_radius = torch.sqrt(self.pixel_area) / 1.7724538509055159 # r = sqrt(pixel_area / pi)\n if self.offsets is not None:\n raise NotImplementedError()\n return conical_frustum_to_gaussian(\n origins=self.origins,\n directions=self.directions,\n starts=self.starts,\n ends=self.ends,\n radius=cone_radius,\n )\n\n @classmethod\n def get_mock_frustum(cls, device: Optional[TORCH_DEVICE] = \"cpu\") -> \"Frustums\":\n \"\"\"Helper function to generate a placeholder frustum.\n\n Returns:\n A size 1 frustum with meaningless values.\n \"\"\"\n return Frustums(\n origins=torch.ones((1, 3)).to(device),\n directions=torch.ones((1, 3)).to(device),\n starts=torch.ones((1, 1)).to(device),\n ends=torch.ones((1, 1)).to(device),\n pixel_area=torch.ones((1, 1)).to(device),\n )" }, { "identifier": "RayBundle", "path": "nerfstudio/cameras/rays.py", "snippet": "class RayBundle(TensorDataclass):\n \"\"\"A bundle of ray parameters.\"\"\"\n\n # TODO(ethan): make sure the sizes with ... are correct\n origins: TensorType[..., 3]\n \"\"\"Ray origins (XYZ)\"\"\"\n directions: TensorType[..., 3]\n \"\"\"Unit ray direction vector\"\"\"\n pixel_area: TensorType[..., 1]\n \"\"\"Projected area of pixel a distance 1 away from origin\"\"\"\n camera_indices: Optional[TensorType[..., 1]] = None\n \"\"\"Camera indices\"\"\"\n nears: Optional[TensorType[..., 1]] = None\n \"\"\"Distance along ray to start sampling\"\"\"\n fars: Optional[TensorType[..., 1]] = None\n \"\"\"Rays Distance along ray to stop sampling\"\"\"\n metadata: Optional[Dict[str, TensorType[\"num_rays\", \"latent_dims\"]]] = None\n \"\"\"Additional metadata or data needed for interpolation, will mimic shape of rays\"\"\"\n times: Optional[TensorType[..., 1]] = None\n \"\"\"Times at which rays are sampled\"\"\"\n\n def set_camera_indices(self, camera_index: int) -> None:\n \"\"\"Sets all of the the camera indices to a specific camera index.\n\n Args:\n camera_index: Camera index.\n \"\"\"\n self.camera_indices = torch.ones_like(self.origins[..., 0:1]).long() * camera_index\n\n def __len__(self) -> int:\n num_rays = torch.numel(self.origins) // self.origins.shape[-1]\n return num_rays\n\n def sample(self, num_rays: int) -> \"RayBundle\":\n \"\"\"Returns a RayBundle as a subset of rays.\n\n Args:\n num_rays: Number of rays in output RayBundle\n\n Returns:\n RayBundle with subset of rays.\n \"\"\"\n assert num_rays <= len(self)\n indices = random.sample(range(len(self)), k=num_rays)\n return self[indices]\n\n def get_row_major_sliced_ray_bundle(self, start_idx: int, end_idx: int) -> \"RayBundle\":\n \"\"\"Flattens RayBundle and extracts chunk given start and end indices.\n\n Args:\n start_idx: Start index of RayBundle chunk.\n end_idx: End index of RayBundle chunk.\n\n Returns:\n Flattened RayBundle with end_idx-start_idx rays.\n\n \"\"\"\n return self.flatten()[start_idx:end_idx]\n\n def get_ray_samples(\n self,\n bin_starts: TensorType[\"bs\":..., \"num_samples\", 1],\n bin_ends: TensorType[\"bs\":..., \"num_samples\", 1],\n spacing_starts: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None,\n spacing_ends: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None,\n spacing_to_euclidean_fn: Optional[Callable] = None,\n ) -> RaySamples:\n \"\"\"Produces samples for each ray by projection points along the ray direction. Currently samples uniformly.\n\n Args:\n bin_starts: Distance from origin to start of bin.\n bin_ends: Distance from origin to end of bin.\n\n Returns:\n Samples projected along ray.\n \"\"\"\n deltas = bin_ends - bin_starts\n if self.camera_indices is not None:\n camera_indices = self.camera_indices[..., None]\n else:\n camera_indices = None\n\n shaped_raybundle_fields = self[..., None]\n\n frustums = Frustums(\n origins=shaped_raybundle_fields.origins, # [..., 1, 3]\n directions=shaped_raybundle_fields.directions, # [..., 1, 3]\n starts=bin_starts, # [..., num_samples, 1]\n ends=bin_ends, # [..., num_samples, 1]\n pixel_area=shaped_raybundle_fields.pixel_area, # [..., 1, 1]\n )\n\n ray_samples = RaySamples(\n frustums=frustums,\n camera_indices=camera_indices, # [..., 1, 1]\n deltas=deltas, # [..., num_samples, 1]\n spacing_starts=spacing_starts, # [..., num_samples, 1]\n spacing_ends=spacing_ends, # [..., num_samples, 1]\n spacing_to_euclidean_fn=spacing_to_euclidean_fn,\n metadata=shaped_raybundle_fields.metadata,\n times=None if self.times is None else self.times[..., None], # [..., 1, 1]\n )\n\n return ray_samples" }, { "identifier": "SceneBox", "path": "nerfstudio/data/scene_box.py", "snippet": "class SceneBox:\n \"\"\"Data to represent the scene box.\"\"\"\n\n aabb: TensorType[2, 3] = None\n \"\"\"aabb: axis-aligned bounding box.\n aabb[0] is the minimum (x,y,z) point.\n aabb[1] is the maximum (x,y,z) point.\"\"\"\n\n def get_diagonal_length(self):\n \"\"\"Returns the longest diagonal length.\"\"\"\n diff = self.aabb[1] - self.aabb[0]\n length = torch.sqrt((diff**2).sum() + 1e-20)\n return length\n\n def get_center(self):\n \"\"\"Returns the center of the box.\"\"\"\n diff = self.aabb[1] - self.aabb[0]\n return self.aabb[0] + diff / 2.0\n\n def get_centered_and_scaled_scene_box(self, scale_factor: Union[float, torch.Tensor] = 1.0):\n \"\"\"Returns a new box that has been shifted and rescaled to be centered\n about the origin.\n\n Args:\n scale_factor: How much to scale the camera origins by.\n \"\"\"\n return SceneBox(aabb=(self.aabb - self.get_center()) * scale_factor)\n\n @staticmethod\n def get_normalized_positions(positions: TensorType[..., 3], aabb: TensorType[2, 3]):\n \"\"\"Return normalized positions in range [0, 1] based on the aabb axis-aligned bounding box.\n\n Args:\n positions: the xyz positions\n aabb: the axis-aligned bounding box\n \"\"\"\n aabb_lengths = aabb[1] - aabb[0]\n normalized_positions = (positions - aabb[0]) / aabb_lengths\n return normalized_positions\n\n def to_json(self) -> Dict:\n \"\"\"Returns a json object from the Python object.\"\"\"\n return {\"type\": \"aabb\", \"min_point\": self.aabb[0].tolist(), \"max_point\": self.aabb[1].tolist()}\n\n @staticmethod\n def from_json(json_: Dict) -> \"SceneBox\":\n \"\"\"Returns the an instance of SceneBox from a json dictionary.\n\n Args:\n json_: the json dictionary containing scene box information\n \"\"\"\n assert json_[\"type\"] == \"aabb\"\n aabb = torch.tensor([json_[0], json_[1]])\n return SceneBox(aabb=aabb)\n\n @staticmethod\n def from_camera_poses(poses: TensorType[..., 3, 4], scale_factor: float) -> \"SceneBox\":\n \"\"\"Returns the instance of SceneBox that fully envelopes a set of poses\n\n Args:\n poses: tensor of camera pose matrices\n scale_factor: How much to scale the camera origins by.\n \"\"\"\n xyzs = poses[..., :3, -1]\n aabb = torch.stack([torch.min(xyzs, dim=0)[0], torch.max(xyzs, dim=0)[0]])\n return SceneBox(aabb=aabb * scale_factor)" }, { "identifier": "trunc_exp", "path": "nerfstudio/field_components/activations.py", "snippet": "class _TruncExp(Function): # pylint: disable=abstract-method\n def forward(ctx, x): # pylint: disable=arguments-differ\n def backward(ctx, g): # pylint: disable=arguments-differ" }, { "identifier": "Embedding", "path": "nerfstudio/field_components/embedding.py", "snippet": "class Embedding(FieldComponent):\n \"\"\"Index into embeddings.\n # TODO: add different types of initializations\n\n Args:\n in_dim: Number of embeddings\n out_dim: Dimension of the embedding vectors\n \"\"\"\n\n def __init__(self, in_dim: int, out_dim: int) -> None:\n super().__init__()\n self.in_dim = in_dim\n self.out_dim = out_dim\n self.build_nn_modules()\n\n def build_nn_modules(self) -> None:\n self.embedding = torch.nn.Embedding(self.in_dim, self.out_dim)\n\n def mean(self, dim=0):\n \"\"\"Return the mean of the embedding weights along a dim.\"\"\"\n return self.embedding.weight.mean(dim)\n\n def forward(self, in_tensor: TensorType[..., \"input_dim\"]) -> TensorType[..., \"output_dim\"]:\n \"\"\"Call forward\n\n Args:\n in_tensor: input tensor to process\n \"\"\"\n return self.embedding(in_tensor)" }, { "identifier": "FieldHeadNames", "path": "nerfstudio/field_components/field_heads.py", "snippet": "class FieldHeadNames(Enum):\n \"\"\"Possible field outputs\"\"\"\n\n RGB = \"rgb\"\n SH = \"sh\"\n DENSITY = \"density\"\n NORMALS = \"normals\"\n PRED_NORMALS = \"pred_normals\"\n UNCERTAINTY = \"uncertainty\"\n TRANSIENT_RGB = \"transient_rgb\"\n TRANSIENT_DENSITY = \"transient_density\"\n SEMANTICS = \"semantics\"" }, { "identifier": "Field", "path": "nerfstudio/fields/base_field.py", "snippet": "class Field(nn.Module):\n \"\"\"Base class for fields.\"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self._sample_locations = None\n self._density_before_activation = None\n\n def density_fn(self, positions: TensorType[\"bs\":..., 3]) -> TensorType[\"bs\":..., 1]:\n \"\"\"Returns only the density. Used primarily with the density grid.\n\n Args:\n positions: the origin of the samples/frustums\n \"\"\"\n # Need to figure out a better way to describe positions with a ray.\n ray_samples = RaySamples(\n frustums=Frustums(\n origins=positions,\n directions=torch.ones_like(positions),\n starts=torch.zeros_like(positions[..., :1]),\n ends=torch.zeros_like(positions[..., :1]),\n pixel_area=torch.ones_like(positions[..., :1]),\n )\n )\n density, _ = self.get_density(ray_samples)\n return density\n\n @abstractmethod\n def get_density(self, ray_samples: RaySamples) -> Tuple[TensorType[..., 1], TensorType[..., \"num_features\"]]:\n \"\"\"Computes and returns the densities. Returns a tensor of densities and a tensor of features.\n\n Args:\n ray_samples: Samples locations to compute density.\n \"\"\"\n\n def get_normals(self) -> TensorType[..., 3]:\n \"\"\"Computes and returns a tensor of normals.\n\n Args:\n density: Tensor of densities.\n \"\"\"\n assert self._sample_locations is not None, \"Sample locations must be set before calling get_normals.\"\n assert self._density_before_activation is not None, \"Density must be set before calling get_normals.\"\n assert (\n self._sample_locations.shape[:-1] == self._density_before_activation.shape[:-1]\n ), \"Sample locations and density must have the same shape besides the last dimension.\"\n\n normals = torch.autograd.grad(\n self._density_before_activation,\n self._sample_locations,\n grad_outputs=torch.ones_like(self._density_before_activation),\n retain_graph=True,\n )[0]\n\n normals = -torch.nn.functional.normalize(normals, dim=-1)\n\n return normals\n\n @abstractmethod\n def get_outputs(\n self, ray_samples: RaySamples, density_embedding: Optional[TensorType] = None\n ) -> Dict[FieldHeadNames, TensorType]:\n \"\"\"Computes and returns the colors. Returns output field values.\n\n Args:\n ray_samples: Samples locations to compute outputs.\n density_embedding: Density embeddings to condition on.\n \"\"\"\n\n def forward(self, ray_samples: RaySamples, compute_normals: bool = False) -> Dict[FieldHeadNames, TensorType]:\n \"\"\"Evaluates the field at points along the ray.\n\n Args:\n ray_samples: Samples to evaluate field on.\n \"\"\"\n if compute_normals:\n with torch.enable_grad():\n density, density_embedding = self.get_density(ray_samples)\n else:\n density, density_embedding = self.get_density(ray_samples)\n\n field_outputs = self.get_outputs(ray_samples, density_embedding=density_embedding)\n field_outputs[FieldHeadNames.DENSITY] = density # type: ignore\n\n if compute_normals:\n with torch.enable_grad():\n normals = self.get_normals()\n field_outputs[FieldHeadNames.NORMALS] = normals # type: ignore\n return field_outputs" }, { "identifier": "ProposalNetworkSampler", "path": "nerfstudio/model_components/ray_samplers.py", "snippet": "class ProposalNetworkSampler(Sampler):\n \"\"\"Sampler that uses a proposal network to generate samples.\n\n Args:\n num_proposal_samples_per_ray: Number of samples to generate per ray for each proposal step.\n num_nerf_samples_per_ray: Number of samples to generate per ray for the NERF model.\n num_proposal_network_iterations: Number of proposal network iterations to run.\n single_jitter: Use a same random jitter for all samples along a ray.\n update_sched: A function that takes the iteration number of steps between updates.\n initial_sampler: Sampler to use for the first iteration. Uses UniformLinDispPiecewise if not set.\n \"\"\"\n\n def __init__(\n self,\n num_proposal_samples_per_ray: Tuple[int] = (64,),\n num_nerf_samples_per_ray: int = 32,\n num_proposal_network_iterations: int = 2,\n single_jitter: bool = False,\n update_sched: Callable = lambda x: 1,\n initial_sampler: Optional[Sampler] = None,\n ) -> None:\n super().__init__()\n self.num_proposal_samples_per_ray = num_proposal_samples_per_ray\n self.num_nerf_samples_per_ray = num_nerf_samples_per_ray\n self.num_proposal_network_iterations = num_proposal_network_iterations\n self.update_sched = update_sched\n if self.num_proposal_network_iterations < 1:\n raise ValueError(\"num_proposal_network_iterations must be >= 1\")\n\n # samplers\n if initial_sampler is None:\n self.initial_sampler = UniformLinDispPiecewiseSampler(single_jitter=single_jitter)\n else:\n self.initial_sampler = initial_sampler\n self.pdf_sampler = PDFSampler(include_original=False, single_jitter=single_jitter)\n\n self._anneal = 1.0\n self._steps_since_update = 0\n self._step = 0\n\n def set_anneal(self, anneal: float) -> None:\n \"\"\"Set the anneal value for the proposal network.\"\"\"\n self._anneal = anneal\n\n def step_cb(self, step):\n \"\"\"Callback to register a training step has passed. This is used to keep track of the sampling schedule\"\"\"\n self._step = step\n self._steps_since_update += 1\n\n def generate_ray_samples(\n self,\n ray_bundle: Optional[RayBundle] = None,\n density_fns: Optional[List[Callable]] = None,\n ) -> Tuple[RaySamples, List, List]:\n assert ray_bundle is not None\n assert density_fns is not None\n\n weights_list = []\n ray_samples_list = []\n\n n = self.num_proposal_network_iterations\n weights = None\n ray_samples = None\n updated = self._steps_since_update > self.update_sched(self._step) or self._step < 10\n for i_level in range(n + 1):\n is_prop = i_level < n\n num_samples = self.num_proposal_samples_per_ray[i_level] if is_prop else self.num_nerf_samples_per_ray\n if i_level == 0:\n # Uniform sampling because we need to start with some samples\n ray_samples = self.initial_sampler(ray_bundle, num_samples=num_samples)\n else:\n # PDF sampling based on the last samples and their weights\n # Perform annealing to the weights. This will be a no-op if self._anneal is 1.0.\n assert weights is not None\n annealed_weights = torch.pow(weights, self._anneal)\n ray_samples = self.pdf_sampler(ray_bundle, ray_samples, annealed_weights, num_samples=num_samples)\n if is_prop:\n if updated:\n # always update on the first step or the inf check in grad scaling crashes\n density = density_fns[i_level](ray_samples.frustums.get_positions())\n else:\n with torch.no_grad():\n density = density_fns[i_level](ray_samples.frustums.get_positions())\n weights = ray_samples.get_weights(density)\n weights_list.append(weights) # (num_rays, num_samples)\n ray_samples_list.append(ray_samples)\n if updated:\n self._steps_since_update = 0\n\n assert ray_samples is not None\n return ray_samples, weights_list, ray_samples_list" }, { "identifier": "UniformSampler", "path": "nerfstudio/model_components/ray_samplers.py", "snippet": "class UniformSampler(SpacedSampler):\n \"\"\"Sample uniformly along a ray\n\n Args:\n num_samples: Number of samples per ray\n train_stratified: Use stratified sampling during training. Defaults to True\n single_jitter: Use a same random jitter for all samples along a ray. Defaults to False\n \"\"\"\n\n def __init__(\n self,\n num_samples: Optional[int] = None,\n train_stratified=True,\n single_jitter=False,\n ) -> None:\n super().__init__(\n num_samples=num_samples,\n spacing_fn=lambda x: x,\n spacing_fn_inv=lambda x: x,\n train_stratified=train_stratified,\n single_jitter=single_jitter,\n )" }, { "identifier": "AccumulationRenderer", "path": "nerfstudio/model_components/renderers.py", "snippet": "class AccumulationRenderer(nn.Module):\n \"\"\"Accumulated value along a ray.\"\"\"\n\n @classmethod\n def forward(\n cls,\n weights: TensorType[\"bs\":..., \"num_samples\", 1],\n ray_indices: Optional[TensorType[\"num_samples\"]] = None,\n num_rays: Optional[int] = None,\n ) -> TensorType[\"bs\":..., 1]:\n \"\"\"Composite samples along ray and calculate accumulation.\n\n Args:\n weights: Weights for each sample\n ray_indices: Ray index for each sample, used when samples are packed.\n num_rays: Number of rays, used when samples are packed.\n\n Returns:\n Outputs of accumulated values.\n \"\"\"\n\n if ray_indices is not None and num_rays is not None:\n # Necessary for packed samples from volumetric ray sampler\n accumulation = nerfacc.accumulate_along_rays(weights, ray_indices, None, num_rays)\n else:\n accumulation = torch.sum(weights, dim=-2)\n return accumulation" }, { "identifier": "DepthRenderer", "path": "nerfstudio/model_components/renderers.py", "snippet": "class DepthRenderer(nn.Module):\n \"\"\"Calculate depth along ray.\n\n Depth Method:\n - median: Depth is set to the distance where the accumulated weight reaches 0.5.\n - expected: Expected depth along ray. Same procedure as rendering rgb, but with depth.\n\n Args:\n method: Depth calculation method.\n \"\"\"\n\n def __init__(self, method: Literal[\"median\", \"expected\"] = \"median\") -> None:\n super().__init__()\n self.method = method\n\n def forward(\n self,\n weights: TensorType[..., \"num_samples\", 1],\n ray_samples: RaySamples,\n ray_indices: Optional[TensorType[\"num_samples\"]] = None,\n num_rays: Optional[int] = None,\n ) -> TensorType[..., 1]:\n \"\"\"Composite samples along ray and calculate depths.\n\n Args:\n weights: Weights for each sample.\n ray_samples: Set of ray samples.\n ray_indices: Ray index for each sample, used when samples are packed.\n num_rays: Number of rays, used when samples are packed.\n\n Returns:\n Outputs of depth values.\n \"\"\"\n\n if self.method == \"median\":\n steps = (ray_samples.frustums.starts + ray_samples.frustums.ends) / 2\n\n if ray_indices is not None and num_rays is not None:\n raise NotImplementedError(\"Median depth calculation is not implemented for packed samples.\")\n cumulative_weights = torch.cumsum(weights[..., 0], dim=-1) # [..., num_samples]\n split = torch.ones((*weights.shape[:-2], 1), device=weights.device) * 0.5 # [..., 1]\n median_index = torch.searchsorted(cumulative_weights, split, side=\"left\") # [..., 1]\n median_index = torch.clamp(median_index, 0, steps.shape[-2] - 1) # [..., 1]\n median_depth = torch.gather(steps[..., 0], dim=-1, index=median_index) # [..., 1]\n return median_depth\n if self.method == \"expected\":\n eps = 1e-10\n steps = (ray_samples.frustums.starts + ray_samples.frustums.ends) / 2\n\n if ray_indices is not None and num_rays is not None:\n # Necessary for packed samples from volumetric ray sampler\n depth = nerfacc.accumulate_along_rays(weights, ray_indices, steps, num_rays)\n accumulation = nerfacc.accumulate_along_rays(weights, ray_indices, None, num_rays)\n depth = depth / (accumulation + eps)\n else:\n depth = torch.sum(weights * steps, dim=-2) / (torch.sum(weights, -2) + eps)\n\n depth = torch.clip(depth, steps.min(), steps.max())\n\n return depth\n\n raise NotImplementedError(f\"Method {self.method} not implemented\")" }, { "identifier": "NormalsRenderer", "path": "nerfstudio/model_components/renderers.py", "snippet": "class NormalsRenderer(nn.Module):\n \"\"\"Calculate normals along the ray.\"\"\"\n\n @classmethod\n def forward(\n cls,\n normals: TensorType[\"bs\":..., \"num_samples\", 3],\n weights: TensorType[\"bs\":..., \"num_samples\", 1],\n normalize: bool = True,\n ) -> TensorType[\"bs\":..., 3]:\n \"\"\"Calculate normals along the ray.\n\n Args:\n normals: Normals for each sample.\n weights: Weights of each sample.\n normalize: Normalize normals.\n \"\"\"\n n = torch.sum(weights * normals, dim=-2)\n if normalize:\n n = safe_normalize(n)\n return n" }, { "identifier": "RGBRenderer", "path": "nerfstudio/model_components/renderers.py", "snippet": "class RGBRenderer(nn.Module):\n \"\"\"Standard volumetric rendering.\n\n Args:\n background_color: Background color as RGB. Uses random colors if None.\n \"\"\"\n\n def __init__(self, background_color: Union[Literal[\"random\", \"last_sample\"], TensorType[3]] = \"random\") -> None:\n super().__init__()\n self.background_color = background_color\n\n @classmethod\n def combine_rgb(\n cls,\n rgb: TensorType[\"bs\":..., \"num_samples\", 3],\n weights: TensorType[\"bs\":..., \"num_samples\", 1],\n background_color: Union[Literal[\"random\", \"white\", \"black\", \"last_sample\"], TensorType[3]] = \"random\",\n ray_indices: Optional[TensorType[\"num_samples\"]] = None,\n num_rays: Optional[int] = None,\n ) -> TensorType[\"bs\":..., 3]:\n \"\"\"Composite samples along ray and render color image\n\n Args:\n rgb: RGB for each sample\n weights: Weights for each sample\n background_color: Background color as RGB.\n ray_indices: Ray index for each sample, used when samples are packed.\n num_rays: Number of rays, used when samples are packed.\n\n Returns:\n Outputs rgb values.\n \"\"\"\n if ray_indices is not None and num_rays is not None:\n # Necessary for packed samples from volumetric ray sampler\n if background_color == \"last_sample\":\n raise NotImplementedError(\"Background color 'last_sample' not implemented for packed samples.\")\n comp_rgb = nerfacc.accumulate_along_rays(weights, ray_indices, rgb, num_rays)\n accumulated_weight = nerfacc.accumulate_along_rays(weights, ray_indices, None, num_rays)\n else:\n comp_rgb = torch.sum(weights * rgb, dim=-2)\n accumulated_weight = torch.sum(weights, dim=-2)\n\n if BACKGROUND_COLOR_OVERRIDE is not None:\n background_color = BACKGROUND_COLOR_OVERRIDE\n if background_color == \"last_sample\":\n background_color = rgb[..., -1, :]\n if background_color == \"random\":\n background_color = torch.rand_like(comp_rgb).to(rgb.device)\n if isinstance(background_color, str) and background_color in colors.COLORS_DICT:\n background_color = colors.COLORS_DICT[background_color].to(rgb.device)\n\n assert isinstance(background_color, torch.Tensor)\n comp_rgb = comp_rgb + background_color.to(weights.device) * (1.0 - accumulated_weight)\n\n return comp_rgb\n\n def forward(\n self,\n rgb: TensorType[\"bs\":..., \"num_samples\", 3],\n weights: TensorType[\"bs\":..., \"num_samples\", 1],\n ray_indices: Optional[TensorType[\"num_samples\"]] = None,\n num_rays: Optional[int] = None,\n ) -> TensorType[\"bs\":..., 3]:\n \"\"\"Composite samples along ray and render color image\n\n Args:\n rgb: RGB for each sample\n weights: Weights for each sample\n ray_indices: Ray index for each sample, used when samples are packed.\n num_rays: Number of rays, used when samples are packed.\n\n Returns:\n Outputs of rgb values.\n \"\"\"\n\n if not self.training:\n rgb = torch.nan_to_num(rgb)\n rgb = self.combine_rgb(\n rgb, weights, background_color=self.background_color, ray_indices=ray_indices, num_rays=num_rays\n )\n if not self.training:\n torch.clamp_(rgb, min=0.0, max=1.0)\n return rgb" }, { "identifier": "SceneContraction", "path": "nerfstudio/field_components/spatial_distortions.py", "snippet": "class SceneContraction(SpatialDistortion):\n \"\"\"Contract unbounded space using the contraction was proposed in MipNeRF-360.\n We use the following contraction equation:\n\n .. math::\n\n f(x) = \\\\begin{cases}\n x & ||x|| \\\\leq 1 \\\\\\\\\n (2 - \\\\frac{1}{||x||})(\\\\frac{x}{||x||}) & ||x|| > 1\n \\\\end{cases}\n\n If the order is not specified, we use the Frobenius norm, this will contract the space to a sphere of\n radius 1. If the order is L_inf (order=float(\"inf\")), we will contract the space to a cube of side length 2.\n If using voxel based encodings such as the Hash encoder, we recommend using the L_inf norm.\n\n Args:\n order: Order of the norm. Default to the Frobenius norm. Must be set to None for Gaussians.\n\n \"\"\"\n\n def __init__(self, order: Optional[Union[float, int]] = None) -> None:\n super().__init__()\n self.order = order\n\n def forward(self, positions):\n def contract(x):\n mag = torch.linalg.norm(x, ord=self.order, dim=-1)[..., None]\n return torch.where(mag < 1, x, (2 - (1 / mag)) * (x / mag))\n\n if isinstance(positions, Gaussians):\n means = contract(positions.mean.clone())\n\n contract = lambda x: (2 - (1 / torch.linalg.norm(x, ord=self.order, dim=-1, keepdim=True))) * (\n x / torch.linalg.norm(x, ord=self.order, dim=-1, keepdim=True)\n )\n jc_means = vmap(jacrev(contract))(positions.mean.view(-1, positions.mean.shape[-1]))\n jc_means = jc_means.view(list(positions.mean.shape) + [positions.mean.shape[-1]])\n\n # Only update covariances on positions outside the unit sphere\n mag = positions.mean.norm(dim=-1)\n mask = mag >= 1\n cov = positions.cov.clone()\n cov[mask] = jc_means[mask] @ positions.cov[mask] @ torch.transpose(jc_means[mask], -2, -1)\n\n return Gaussians(mean=means, cov=cov)\n\n return contract(positions)" }, { "identifier": "SpatialDistortion", "path": "nerfstudio/field_components/spatial_distortions.py", "snippet": "class SpatialDistortion(nn.Module):\n \"\"\"Apply spatial distortions\"\"\"\n\n def forward(\n self, positions: Union[TensorType[\"bs\":..., 3], Gaussians]\n ) -> Union[TensorType[\"bs\":..., 3], Gaussians]:\n \"\"\"\n Args:\n positions: Sample to distort\n\n Returns:\n Union: distorted sample\n \"\"\"" }, { "identifier": "MSELoss", "path": "nerfstudio/model_components/losses.py", "snippet": "LOSSES = {\"L1\": L1Loss, \"MSE\": MSELoss}\nEPS = 1.0e-7\nURF_SIGMA_SCALE_FACTOR = 3.0\n DS_NERF = 1\n URF = 2\nclass DepthLossType(Enum):\nclass MiDaSMSELoss(nn.Module):\nclass GradientLoss(nn.Module):\nclass ScaleAndShiftInvariantLoss(nn.Module):\ndef outer(\n t0_starts: TensorType[..., \"num_samples_0\"],\n t0_ends: TensorType[..., \"num_samples_0\"],\n t1_starts: TensorType[..., \"num_samples_1\"],\n t1_ends: TensorType[..., \"num_samples_1\"],\n y1: TensorType[..., \"num_samples_1\"],\n) -> TensorType[..., \"num_samples_0\"]:\ndef lossfun_outer(\n t: TensorType[..., \"num_samples+1\"],\n w: TensorType[..., \"num_samples\"],\n t_env: TensorType[..., \"num_samples+1\"],\n w_env: TensorType[..., \"num_samples\"],\n):\ndef ray_samples_to_sdist(ray_samples):\ndef interlevel_loss(weights_list, ray_samples_list):\ndef lossfun_distortion(t, w):\ndef distortion_loss(weights_list, ray_samples_list):\ndef nerfstudio_distortion_loss(\n ray_samples: RaySamples,\n densities: TensorType[\"bs\":..., \"num_samples\", 1] = None,\n weights: TensorType[\"bs\":..., \"num_samples\", 1] = None,\n) -> TensorType[\"bs\":..., 1]:\ndef orientation_loss(\n weights: TensorType[\"bs\":..., \"num_samples\", 1],\n normals: TensorType[\"bs\":..., \"num_samples\", 3],\n viewdirs: TensorType[\"bs\":..., 3],\n):\ndef pred_normal_loss(\n weights: TensorType[\"bs\":..., \"num_samples\", 1],\n normals: TensorType[\"bs\":..., \"num_samples\", 3],\n pred_normals: TensorType[\"bs\":..., \"num_samples\", 3],\n):\ndef ds_nerf_depth_loss(\n weights: TensorType[..., \"num_samples\", 1],\n termination_depth: TensorType[..., 1],\n steps: TensorType[..., \"num_samples\", 1],\n lengths: TensorType[..., \"num_samples\", 1],\n sigma: TensorType[0],\n) -> TensorType[..., 1]:\ndef urban_radiance_field_depth_loss(\n weights: TensorType[..., \"num_samples\", 1],\n termination_depth: TensorType[..., 1],\n predicted_depth: TensorType[..., 1],\n steps: TensorType[..., \"num_samples\", 1],\n sigma: TensorType[0],\n) -> TensorType[..., 1]:\ndef depth_loss(\n weights: TensorType[..., \"num_samples\", 1],\n ray_samples: RaySamples,\n termination_depth: TensorType[..., 1],\n predicted_depth: TensorType[..., 1],\n sigma: TensorType[0],\n directions_norm: TensorType[..., 1],\n is_euclidean: bool,\n depth_loss_type: DepthLossType,\n) -> TensorType[0]:\ndef monosdf_normal_loss(\n normal_pred: TensorType[\"num_samples\", 3], normal_gt: TensorType[\"num_samples\", 3]\n) -> TensorType[0]:\n def __init__(self, reduction_type: Literal[\"image\", \"batch\"] = \"batch\"):\n def forward(\n self, prediction: TensorType[1, 32, \"mult\"], target: TensorType[1, 32, \"mult\"], mask: TensorType[1, 32, \"mult\"]\n ) -> TensorType[0]:\n def __init__(self, scales: int = 4, reduction_type: Literal[\"image\", \"batch\"] = \"batch\"):\n def forward(\n self, prediction: TensorType[1, 32, \"mult\"], target: TensorType[1, 32, \"mult\"], mask: TensorType[1, 32, \"mult\"]\n ) -> TensorType[0]:\n def gradient_loss(\n self, prediction: TensorType[1, 32, \"mult\"], target: TensorType[1, 32, \"mult\"], mask: TensorType[1, 32, \"mult\"]\n ) -> TensorType[0]:\n def __init__(self, alpha: float = 0.5, scales: int = 4, reduction_type: Literal[\"image\", \"batch\"] = \"batch\"):\n def compute_scale_and_shift(\n cls, prediction: TensorType[1, 32, \"mult\"], target: TensorType[1, 32, \"mult\"], mask: TensorType[1, 32, \"mult\"]\n ):\n def forward(\n self, prediction: TensorType[1, 32, \"mult\"], target: TensorType[1, 32, \"mult\"], mask: TensorType[1, 32, \"mult\"]\n ) -> TensorType[0]:\n def __get_prediction_ssi(self):" }, { "identifier": "SceneContraction", "path": "nerfstudio/field_components/spatial_distortions.py", "snippet": "class SceneContraction(SpatialDistortion):\n \"\"\"Contract unbounded space using the contraction was proposed in MipNeRF-360.\n We use the following contraction equation:\n\n .. math::\n\n f(x) = \\\\begin{cases}\n x & ||x|| \\\\leq 1 \\\\\\\\\n (2 - \\\\frac{1}{||x||})(\\\\frac{x}{||x||}) & ||x|| > 1\n \\\\end{cases}\n\n If the order is not specified, we use the Frobenius norm, this will contract the space to a sphere of\n radius 1. If the order is L_inf (order=float(\"inf\")), we will contract the space to a cube of side length 2.\n If using voxel based encodings such as the Hash encoder, we recommend using the L_inf norm.\n\n Args:\n order: Order of the norm. Default to the Frobenius norm. Must be set to None for Gaussians.\n\n \"\"\"\n\n def __init__(self, order: Optional[Union[float, int]] = None) -> None:\n super().__init__()\n self.order = order\n\n def forward(self, positions):\n def contract(x):\n mag = torch.linalg.norm(x, ord=self.order, dim=-1)[..., None]\n return torch.where(mag < 1, x, (2 - (1 / mag)) * (x / mag))\n\n if isinstance(positions, Gaussians):\n means = contract(positions.mean.clone())\n\n contract = lambda x: (2 - (1 / torch.linalg.norm(x, ord=self.order, dim=-1, keepdim=True))) * (\n x / torch.linalg.norm(x, ord=self.order, dim=-1, keepdim=True)\n )\n jc_means = vmap(jacrev(contract))(positions.mean.view(-1, positions.mean.shape[-1]))\n jc_means = jc_means.view(list(positions.mean.shape) + [positions.mean.shape[-1]])\n\n # Only update covariances on positions outside the unit sphere\n mag = positions.mean.norm(dim=-1)\n mask = mag >= 1\n cov = positions.cov.clone()\n cov[mask] = jc_means[mask] @ positions.cov[mask] @ torch.transpose(jc_means[mask], -2, -1)\n\n return Gaussians(mean=means, cov=cov)\n\n return contract(positions)" }, { "identifier": "HashMLPDensityField", "path": "nerfstudio/fields/density_fields.py", "snippet": "class HashMLPDensityField(Field):\n \"\"\"A lightweight density field module.\n\n Args:\n aabb: parameters of scene aabb bounds\n num_layers: number of hidden layers\n hidden_dim: dimension of hidden layers\n spatial_distortion: spatial distortion module\n use_linear: whether to skip the MLP and use a single linear layer instead\n \"\"\"\n\n def __init__(\n self,\n aabb: TensorType,\n num_layers: int = 2,\n hidden_dim: int = 64,\n spatial_distortion: Optional[SpatialDistortion] = None,\n use_linear: bool = False,\n num_levels: int = 8,\n max_res: int = 1024,\n base_res: int = 16,\n log2_hashmap_size: int = 18,\n features_per_level: int = 2,\n ) -> None:\n super().__init__()\n self.register_buffer(\"aabb\", aabb)\n self.spatial_distortion = spatial_distortion\n self.use_linear = use_linear\n growth_factor = np.exp((np.log(max_res) - np.log(base_res)) / (num_levels - 1))\n\n self.register_buffer(\"max_res\", torch.tensor(max_res))\n self.register_buffer(\"num_levels\", torch.tensor(num_levels))\n self.register_buffer(\"log2_hashmap_size\", torch.tensor(log2_hashmap_size))\n\n config = {\n \"encoding\": {\n \"otype\": \"HashGrid\",\n \"n_levels\": num_levels,\n \"n_features_per_level\": features_per_level,\n \"log2_hashmap_size\": log2_hashmap_size,\n \"base_resolution\": base_res,\n \"per_level_scale\": growth_factor,\n },\n \"network\": {\n \"otype\": \"FullyFusedMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"None\",\n \"n_neurons\": hidden_dim,\n \"n_hidden_layers\": num_layers - 1,\n },\n }\n\n if not self.use_linear:\n self.mlp_base = tcnn.NetworkWithInputEncoding(\n n_input_dims=3,\n n_output_dims=1,\n encoding_config=config[\"encoding\"],\n network_config=config[\"network\"],\n )\n else:\n self.encoding = tcnn.Encoding(n_input_dims=3, encoding_config=config[\"encoding\"])\n self.linear = torch.nn.Linear(self.encoding.n_output_dims, 1)\n\n def get_density(self, ray_samples: RaySamples) -> Tuple[TensorType, None]:\n if self.spatial_distortion is not None:\n positions = self.spatial_distortion(ray_samples.frustums.get_positions())\n positions = (positions + 2.0) / 4.0\n else:\n positions = SceneBox.get_normalized_positions(ray_samples.frustums.get_positions(), self.aabb)\n # Make sure the tcnn gets inputs between 0 and 1.\n selector = ((positions > 0.0) & (positions < 1.0)).all(dim=-1)\n positions = positions * selector[..., None]\n positions_flat = positions.view(-1, 3)\n if not self.use_linear:\n density_before_activation = (\n self.mlp_base(positions_flat).view(*ray_samples.frustums.shape, -1).to(positions)\n )\n else:\n x = self.encoding(positions_flat).to(positions)\n density_before_activation = self.linear(x).view(*ray_samples.frustums.shape, -1)\n\n # Rectifying the density with an exponential is much more stable than a ReLU or\n # softplus, because it enables high post-activation (float32) density outputs\n # from smaller internal (float16) parameters.\n density = trunc_exp(density_before_activation)\n density = density * selector[..., None]\n return density, None\n\n def get_outputs(self, ray_samples: RaySamples, density_embedding: Optional[TensorType] = None) -> dict:\n return {}" }, { "identifier": "NearFarCollider", "path": "nerfstudio/model_components/scene_colliders.py", "snippet": "class NearFarCollider(SceneCollider):\n \"\"\"Sets the nears and fars with fixed values.\n\n Args:\n near_plane: distance to near plane\n far_plane: distance to far plane\n \"\"\"\n\n def __init__(self, near_plane: float, far_plane: float, **kwargs) -> None:\n self.near_plane = near_plane\n self.far_plane = far_plane\n super().__init__(**kwargs)\n\n def set_nears_and_fars(self, ray_bundle: RayBundle) -> RayBundle:\n ones = torch.ones_like(ray_bundle.origins[..., 0:1])\n near_plane = self.near_plane if self.training else 0\n ray_bundle.nears = ones * near_plane\n ray_bundle.fars = ones * self.far_plane\n return ray_bundle" }, { "identifier": "NormalsShader", "path": "nerfstudio/model_components/shaders.py", "snippet": "class NormalsShader(nn.Module):\n \"\"\"Calculate shading for normals.\"\"\"\n\n @classmethod\n def forward(\n cls,\n normals: TensorType[\"bs\":..., 3],\n weights: Optional[TensorType[\"bs\":..., 1]] = None,\n ):\n \"\"\"Applies a rainbow colormap to the normals.\n\n Args:\n normals: Normalized 3D vectors.\n weights: Optional weights to scale to the normal colors. (Can be used for masking)\n\n Returns:\n Colored normals\n \"\"\"\n normals = (normals + 1) / 2\n if weights is not None:\n normals = normals * weights\n return normals" }, { "identifier": "Model", "path": "nerfstudio/models/base_model.py", "snippet": "class Model(nn.Module):\n \"\"\"Model class\n Where everything (Fields, Optimizers, Samplers, Visualization, etc) is linked together. This should be\n subclassed for custom NeRF model.\n\n Args:\n config: configuration for instantiating model\n scene_box: dataset scene box\n \"\"\"\n\n config: ModelConfig\n\n def __init__(\n self,\n config: ModelConfig,\n scene_box: SceneBox,\n num_train_data: int,\n **kwargs,\n ) -> None:\n super().__init__()\n self.config = config\n self.scene_box = scene_box\n self.render_aabb = None # the box that we want to render - should be a subset of scene_box\n self.num_train_data = num_train_data\n self.kwargs = kwargs\n self.collider = None\n\n self.populate_modules() # populate the modules\n self.callbacks = None\n # to keep track of which device the nn.Module is on\n self.device_indicator_param = nn.Parameter(torch.empty(0))\n\n @property\n def device(self):\n \"\"\"Returns the device that the model is on.\"\"\"\n return self.device_indicator_param.device\n\n def get_training_callbacks( # pylint:disable=no-self-use\n self, training_callback_attributes: TrainingCallbackAttributes # pylint: disable=unused-argument\n ) -> List[TrainingCallback]:\n \"\"\"Returns a list of callbacks that run functions at the specified training iterations.\"\"\"\n return []\n\n def populate_modules(self):\n \"\"\"Set the necessary modules to get the network working.\"\"\"\n # default instantiates optional modules that are common among many networks\n # NOTE: call `super().populate_modules()` in subclasses\n\n if self.config.enable_collider:\n self.collider = NearFarCollider(\n near_plane=self.config.collider_params[\"near_plane\"], far_plane=self.config.collider_params[\"far_plane\"]\n )\n\n @abstractmethod\n def get_param_groups(self) -> Dict[str, List[Parameter]]:\n \"\"\"Obtain the parameter groups for the optimizers\n\n Returns:\n Mapping of different parameter groups\n \"\"\"\n\n @abstractmethod\n def get_outputs(self, ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:\n \"\"\"Takes in a Ray Bundle and returns a dictionary of outputs.\n\n Args:\n ray_bundle: Input bundle of rays. This raybundle should have all the\n needed information to compute the outputs.\n\n Returns:\n Outputs of model. (ie. rendered colors)\n \"\"\"\n\n def forward(self, ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:\n \"\"\"Run forward starting with a ray bundle. This outputs different things depending on the configuration\n of the model and whether or not the batch is provided (whether or not we are training basically)\n\n Args:\n ray_bundle: containing all the information needed to render that ray latents included\n \"\"\"\n\n if self.collider is not None:\n ray_bundle = self.collider(ray_bundle)\n\n return self.get_outputs(ray_bundle)\n\n def get_metrics_dict(self, outputs, batch) -> Dict[str, torch.Tensor]:\n \"\"\"Compute and returns metrics.\n\n Args:\n outputs: the output to compute loss dict to\n batch: ground truth batch corresponding to outputs\n \"\"\"\n # pylint: disable=unused-argument\n # pylint: disable=no-self-use\n return {}\n\n @abstractmethod\n def get_loss_dict(self, outputs, batch, metrics_dict=None) -> Dict[str, torch.Tensor]:\n \"\"\"Computes and returns the losses dict.\n\n Args:\n outputs: the output to compute loss dict to\n batch: ground truth batch corresponding to outputs\n metrics_dict: dictionary of metrics, some of which we can use for loss\n \"\"\"\n\n @torch.no_grad()\n def get_outputs_for_camera_ray_bundle(self, camera_ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:\n \"\"\"Takes in camera parameters and computes the output of the model.\n\n Args:\n camera_ray_bundle: ray bundle to calculate outputs over\n \"\"\"\n num_rays_per_chunk = self.config.eval_num_rays_per_chunk\n image_height, image_width = camera_ray_bundle.origins.shape[:2]\n num_rays = len(camera_ray_bundle)\n outputs_lists = defaultdict(list)\n with Timer(\"forwarding\"):\n _t1 = time.time()\n for i in range(0, num_rays, num_rays_per_chunk):\n start_idx = i\n end_idx = i + num_rays_per_chunk\n ray_bundle = camera_ray_bundle.get_row_major_sliced_ray_bundle(start_idx, end_idx)\n outputs = self.forward(ray_bundle=ray_bundle)\n for output_name, output in outputs.items(): # type: ignore\n outputs_lists[output_name].append(output)\n print(f\"forwarding took {time.time() - _t1} seconds\")\n outputs = {}\n for output_name, outputs_list in outputs_lists.items():\n if not torch.is_tensor(outputs_list[0]):\n # TODO: handle lists of tensors as well\n continue\n if output_name == \"mask_val\":\n outputs[\"mask_val\"] = torch.cat(outputs_list, dim=0)\n outputs[output_name] = torch.cat(outputs_list).view(image_height, image_width, -1) # type: ignore\n return outputs\n\n @abstractmethod\n def get_image_metrics_and_images(\n self, outputs: Dict[str, torch.Tensor], batch: Dict[str, torch.Tensor]\n ) -> Tuple[Dict[str, float], Dict[str, torch.Tensor]]:\n \"\"\"Writes the test image outputs.\n TODO: This shouldn't return a loss\n\n Args:\n image_idx: Index of the image.\n step: Current step.\n batch: Batch of data.\n outputs: Outputs of the model.\n\n Returns:\n A dictionary of metrics.\n \"\"\"\n\n def load_model(self, loaded_state: Dict[str, Any]) -> None:\n \"\"\"Load the checkpoint from the given path\n\n Args:\n loaded_state: dictionary of pre-trained model states\n \"\"\"\n state = {key.replace(\"module.\", \"\"): value for key, value in loaded_state[\"model\"].items()}\n self.load_state_dict(state) # type: ignore\n\n def update_to_step(self, step: int) -> None:\n \"\"\"Called when loading a model from a checkpoint. Sets any model parameters that change over\n training to the correct value, based on the training step of the checkpoint.\n\n Args:\n step: training step of the loaded checkpoint\n \"\"\"" }, { "identifier": "ModelConfig", "path": "nerfstudio/models/base_model.py", "snippet": "class ModelConfig(InstantiateConfig):\n \"\"\"Configuration for model instantiation\"\"\"\n\n _target: Type = field(default_factory=lambda: Model)\n \"\"\"target class to instantiate\"\"\"\n enable_collider: bool = True\n \"\"\"Whether to create a scene collider to filter rays.\"\"\"\n collider_params: Optional[Dict[str, float]] = to_immutable_dict({\"near_plane\": 2.0, \"far_plane\": 6.0})\n \"\"\"parameters to instantiate scene collider with\"\"\"\n loss_coefficients: Dict[str, float] = to_immutable_dict({\"rgb_loss_coarse\": 1.0, \"rgb_loss_fine\": 1.0})\n \"\"\"parameters to instantiate density field with\"\"\"\n eval_num_rays_per_chunk: int = 4096\n \"\"\"specifies number of rays per chunk during eval\"\"\"" }, { "identifier": "colormaps", "path": "nerfstudio/utils/colormaps.py", "snippet": "def apply_colormap(image: TensorType[\"bs\":..., 1], cmap=\"viridis\") -> TensorType[\"bs\":..., \"rgb\":3]:\ndef apply_depth_colormap(\n depth: TensorType[\"bs\":..., 1],\n accumulation: Optional[TensorType[\"bs\":..., 1]] = None,\n near_plane: Optional[float] = None,\n far_plane: Optional[float] = None,\n cmap=\"turbo\",\n) -> TensorType[\"bs\":..., \"rgb\":3]:\ndef apply_boolean_colormap(\n image: TensorType[\"bs\":..., 1, bool],\n true_color: TensorType[\"bs\":..., \"rgb\":3] = colors.WHITE,\n false_color: TensorType[\"bs\":..., \"rgb\":3] = colors.BLACK,\n) -> TensorType[\"bs\":..., \"rgb\":3]:" }, { "identifier": "RayBundle", "path": "nerfstudio/cameras/rays.py", "snippet": "class RayBundle(TensorDataclass):\n \"\"\"A bundle of ray parameters.\"\"\"\n\n # TODO(ethan): make sure the sizes with ... are correct\n origins: TensorType[..., 3]\n \"\"\"Ray origins (XYZ)\"\"\"\n directions: TensorType[..., 3]\n \"\"\"Unit ray direction vector\"\"\"\n pixel_area: TensorType[..., 1]\n \"\"\"Projected area of pixel a distance 1 away from origin\"\"\"\n camera_indices: Optional[TensorType[..., 1]] = None\n \"\"\"Camera indices\"\"\"\n nears: Optional[TensorType[..., 1]] = None\n \"\"\"Distance along ray to start sampling\"\"\"\n fars: Optional[TensorType[..., 1]] = None\n \"\"\"Rays Distance along ray to stop sampling\"\"\"\n metadata: Optional[Dict[str, TensorType[\"num_rays\", \"latent_dims\"]]] = None\n \"\"\"Additional metadata or data needed for interpolation, will mimic shape of rays\"\"\"\n times: Optional[TensorType[..., 1]] = None\n \"\"\"Times at which rays are sampled\"\"\"\n\n def set_camera_indices(self, camera_index: int) -> None:\n \"\"\"Sets all of the the camera indices to a specific camera index.\n\n Args:\n camera_index: Camera index.\n \"\"\"\n self.camera_indices = torch.ones_like(self.origins[..., 0:1]).long() * camera_index\n\n def __len__(self) -> int:\n num_rays = torch.numel(self.origins) // self.origins.shape[-1]\n return num_rays\n\n def sample(self, num_rays: int) -> \"RayBundle\":\n \"\"\"Returns a RayBundle as a subset of rays.\n\n Args:\n num_rays: Number of rays in output RayBundle\n\n Returns:\n RayBundle with subset of rays.\n \"\"\"\n assert num_rays <= len(self)\n indices = random.sample(range(len(self)), k=num_rays)\n return self[indices]\n\n def get_row_major_sliced_ray_bundle(self, start_idx: int, end_idx: int) -> \"RayBundle\":\n \"\"\"Flattens RayBundle and extracts chunk given start and end indices.\n\n Args:\n start_idx: Start index of RayBundle chunk.\n end_idx: End index of RayBundle chunk.\n\n Returns:\n Flattened RayBundle with end_idx-start_idx rays.\n\n \"\"\"\n return self.flatten()[start_idx:end_idx]\n\n def get_ray_samples(\n self,\n bin_starts: TensorType[\"bs\":..., \"num_samples\", 1],\n bin_ends: TensorType[\"bs\":..., \"num_samples\", 1],\n spacing_starts: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None,\n spacing_ends: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None,\n spacing_to_euclidean_fn: Optional[Callable] = None,\n ) -> RaySamples:\n \"\"\"Produces samples for each ray by projection points along the ray direction. Currently samples uniformly.\n\n Args:\n bin_starts: Distance from origin to start of bin.\n bin_ends: Distance from origin to end of bin.\n\n Returns:\n Samples projected along ray.\n \"\"\"\n deltas = bin_ends - bin_starts\n if self.camera_indices is not None:\n camera_indices = self.camera_indices[..., None]\n else:\n camera_indices = None\n\n shaped_raybundle_fields = self[..., None]\n\n frustums = Frustums(\n origins=shaped_raybundle_fields.origins, # [..., 1, 3]\n directions=shaped_raybundle_fields.directions, # [..., 1, 3]\n starts=bin_starts, # [..., num_samples, 1]\n ends=bin_ends, # [..., num_samples, 1]\n pixel_area=shaped_raybundle_fields.pixel_area, # [..., 1, 1]\n )\n\n ray_samples = RaySamples(\n frustums=frustums,\n camera_indices=camera_indices, # [..., 1, 1]\n deltas=deltas, # [..., num_samples, 1]\n spacing_starts=spacing_starts, # [..., num_samples, 1]\n spacing_ends=spacing_ends, # [..., num_samples, 1]\n spacing_to_euclidean_fn=spacing_to_euclidean_fn,\n metadata=shaped_raybundle_fields.metadata,\n times=None if self.times is None else self.times[..., None], # [..., 1, 1]\n )\n\n return ray_samples" }, { "identifier": "TrainingCallback", "path": "nerfstudio/engine/callbacks.py", "snippet": "class TrainingCallback:\n \"\"\"Callback class used during training.\n The function 'func' with 'args' and 'kwargs' will be called every 'update_every_num_iters' training iterations,\n including at iteration 0. The function is called after the training iteration.\n\n Args:\n where_to_run: List of locations for when to run callback (before/after iteration)\n func: The function that will be called.\n update_every_num_iters: How often to call the function `func`.\n iters: Tuple of iteration steps to perform callback\n args: args for the function 'func'.\n kwargs: kwargs for the function 'func'.\n \"\"\"\n\n def __init__(\n self,\n where_to_run: List[TrainingCallbackLocation],\n func: Callable,\n update_every_num_iters: Optional[int] = None,\n iters: Optional[Tuple[int, ...]] = None,\n args: Optional[List] = None,\n kwargs: Optional[Dict] = None,\n ):\n assert (\n \"step\" in signature(func).parameters.keys()\n ), f\"'step: int' must be an argument in the callback function 'func': {func.__name__}\"\n self.where_to_run = where_to_run\n self.update_every_num_iters = update_every_num_iters\n self.iters = iters\n self.func = func\n self.args = args if args is not None else []\n self.kwargs = kwargs if kwargs is not None else {}\n\n def run_callback(self, step: int) -> None:\n \"\"\"Callback to run after training step\n\n Args:\n step: current iteration step\n \"\"\"\n if self.update_every_num_iters is not None:\n if step % self.update_every_num_iters == 0:\n self.func(*self.args, **self.kwargs, step=step)\n elif self.iters is not None:\n if step in self.iters:\n self.func(*self.args, **self.kwargs, step=step)\n\n def run_callback_at_location(self, step: int, location: TrainingCallbackLocation) -> None:\n \"\"\"Runs the callback if it's supposed to be run at the given location.\n\n Args:\n step: current iteration step\n location: when to run callback (before/after iteration)\n \"\"\"\n if location in self.where_to_run:\n self.run_callback(step=step)" }, { "identifier": "TrainingCallbackAttributes", "path": "nerfstudio/engine/callbacks.py", "snippet": "class TrainingCallbackAttributes:\n \"\"\"Attributes that can be used to configure training callbacks.\n The callbacks can be specified in the Dataloader or Model implementations.\n Instead of providing access to the entire Trainer object, we only provide these attributes.\n This should be least prone to errors and fairly clean from a user perspective.\"\"\"\n\n # TODO(ethan): type this without circular imports\n optimizers: Optional[InitVar]\n \"\"\"optimizers for training\"\"\"\n grad_scaler: Optional[InitVar]\n \"\"\"gradient scalers\"\"\"\n pipeline: Optional[InitVar]\n \"\"\"reference to training pipeline\"\"\"" }, { "identifier": "TrainingCallbackLocation", "path": "nerfstudio/engine/callbacks.py", "snippet": "class TrainingCallbackLocation(Enum):\n \"\"\"Enum for specifying where the training callback should be run.\"\"\"\n\n BEFORE_TRAIN_ITERATION = auto()\n AFTER_TRAIN_ITERATION = auto()" }, { "identifier": "ProposalNetworkSamplerSpatial", "path": "MSTH/SpaceTimeHashing/ray_samplers.py", "snippet": "class ProposalNetworkSamplerSpatial(ProposalNetworkSampler):\n def __init__(\n self,\n num_proposal_samples_per_ray: Tuple[int] = ...,\n num_nerf_samples_per_ray: int = 32,\n num_proposal_network_iterations: int = 2,\n single_jitter: bool = True,\n update_sched: Callable = ...,\n initial_sampler: Optional[Sampler] = None,\n middle_distance: int = 1,\n ) -> None:\n super(Sampler, self).__init__()\n self.num_proposal_samples_per_ray = num_proposal_samples_per_ray\n self.num_nerf_samples_per_ray = num_nerf_samples_per_ray\n self.num_proposal_network_iterations = num_proposal_network_iterations\n self.update_sched = update_sched\n if self.num_proposal_network_iterations < 1:\n raise ValueError(\"num_proposal_network_iterations must be >= 1\")\n\n # samplers\n if initial_sampler is None:\n self.initial_sampler = UniformLinDispPiecewiseSamplerSpatial(\n single_jitter=single_jitter, middle_distance=middle_distance\n )\n else:\n self.initial_sampler = initial_sampler\n self.pdf_sampler = PDFSamplerSpatial(include_original=False, single_jitter=single_jitter)\n\n self._anneal = 1.0\n self._steps_since_update = 0\n self._step = 0\n\n def generate_ray_samples(\n self,\n ray_bundle: Optional[RayBundle] = None,\n density_fns: Optional[List[Callable]] = None,\n ) -> Tuple[RaySamples, List, List, List]:\n with Timer(\"total_ps\"):\n with Timer(\"prep\"):\n assert ray_bundle is not None\n assert density_fns is not None\n\n weights_list = []\n # weights_list_static = []\n ray_samples_list = []\n\n n = self.num_proposal_network_iterations\n weights = None\n weights_static = None\n ray_samples = None\n updated = self._steps_since_update > self.update_sched(self._step) or self._step < 10\n # print(n)\n for i_level in range(n + 1):\n with Timer(\"level\"):\n is_prop = i_level < n\n num_samples = (\n self.num_proposal_samples_per_ray[i_level] if is_prop else self.num_nerf_samples_per_ray\n )\n # print(\"num_samples\", num_samples)\n if i_level == 0:\n # Uniform sampling because we need to start with some samples\n with Timer(\"initial\"):\n ray_samples = self.initial_sampler(ray_bundle, num_samples=num_samples)\n else:\n # PDF sampling based on the last samples and their weights\n # Perform annealing to the weights. This will be a no-op if self._anneal is 1.0.\n assert weights is not None\n with Timer(\"anneal\"):\n annealed_weights = torch.pow(weights, self._anneal)\n self.last_weight = annealed_weights\n with Timer(\"pdf\"):\n ray_samples = self.pdf_sampler(\n ray_bundle, ray_samples, annealed_weights, num_samples=num_samples\n )\n # print(\"ray_samples.shape\", ray_samples.shape)\n if is_prop:\n if updated:\n # always update on the first step or the inf check in grad scaling crashes\n # density = density_fns[i_level](ray_samples.frustums.get_positions())\n with Timer(\"updated density_fn query\"):\n density, density_static = density_fns[i_level](spacetime(ray_samples))\n # print(density.max())\n else:\n with Timer(\"no_updated density_fn query\"):\n with torch.no_grad():\n # density = density_fns[i_level](ray_samples.frustums.get_positions())\n density, density_static = density_fns[i_level](spacetime(ray_samples))\n # print(\"ray_samples.shape\", ray_samples.shape)\n # print(\"density.shape\", density.shape)\n with Timer(\"get weights\"):\n weights = ray_samples.get_weights(density)\n with Timer(\"append\"):\n weights_list.append(weights) # (num_rays, num_samples)\n # weights_static = ray_samples.get_weights(density_static)\n # weights_list_static.append(weights_static) # (num_rays, num_samples)\n ray_samples_list.append(ray_samples)\n if updated:\n self._steps_since_update = 0\n\n assert ray_samples is not None\n return ray_samples, weights_list, ray_samples_list # , weights_list_static" }, { "identifier": "spacetime", "path": "MSTH/SpaceTimeHashing/ray_samplers.py", "snippet": "def spacetime(ray_samples: RaySamples):\n positions = ray_samples.frustums.get_positions()\n assert ray_samples.times is not None, \"ray samples should contain time information\"\n times = ray_samples.times.unsqueeze(1).repeat(1, positions.size(1), 1).to(positions)\n # [num_rays, num_samples, 4]\n return torch.cat([positions, times], dim=-1)" }, { "identifier": "spacetime_concat", "path": "MSTH/SpaceTimeHashing/ray_samplers.py", "snippet": "def spacetime_concat(positions, times):\n # print(positions.shape)\n # print(times.shape)\n if times.dim() < positions.dim():\n times = times.unsqueeze(1).repeat(1, positions.size(1), 1)\n return torch.cat([positions, times.to(positions)], dim=-1)" } ]
import torch import numpy as np import tinycudann as tcnn from typing import * from nerfacc import ContractionType, contract from torch.nn.parameter import Parameter from torchtyping import TensorType from dataclasses import dataclass, field from nerfstudio.cameras.rays import RaySamples, Frustums, RayBundle from nerfstudio.data.scene_box import SceneBox from nerfstudio.field_components.activations import trunc_exp from nerfstudio.field_components.embedding import Embedding from nerfstudio.field_components.field_heads import FieldHeadNames from nerfstudio.fields.base_field import Field from nerfstudio.model_components.ray_samplers import ( ProposalNetworkSampler, UniformSampler, ) from nerfstudio.model_components.renderers import ( AccumulationRenderer, DepthRenderer, NormalsRenderer, RGBRenderer, ) from nerfstudio.field_components.spatial_distortions import ( SceneContraction, SpatialDistortion, ) from nerfstudio.model_components.losses import ( MSELoss, distortion_loss, interlevel_loss, orientation_loss, pred_normal_loss, ) from torchmetrics import PeakSignalNoiseRatio from torchmetrics.functional import structural_similarity_index_measure from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity from nerfstudio.field_components.spatial_distortions import SceneContraction from nerfstudio.fields.density_fields import HashMLPDensityField from nerfstudio.model_components.scene_colliders import NearFarCollider from nerfstudio.model_components.shaders import NormalsShader from nerfstudio.models.base_model import Model, ModelConfig from nerfstudio.utils import colormaps from nerfstudio.cameras.rays import RayBundle from nerfstudio.engine.callbacks import ( TrainingCallback, TrainingCallbackAttributes, TrainingCallbackLocation, ) from MSTH.SpaceTimeHashing.ray_samplers import ProposalNetworkSamplerSpatial, spacetime, spacetime_concat from rich.console import Console
17,600
CONSOLE = Console(width=120) class SpaceTimeDensityFieldWithPermutation(Field): def __init__( self, aabb: TensorType, num_layers: int = 2, hidden_dim: int = 64, spatial_distortion: Optional[SpatialDistortion] = None, use_linear: bool = False, num_levels: int = 8, max_res: int = 1024, base_res: int = 16, log2_hashmap_size: int = 18, features_per_level: int = 2, ) -> None: super().__init__() self.register_buffer("aabb", aabb) self.spatial_distortion = spatial_distortion self.use_linear = use_linear growth_factor = np.exp((np.log(max_res) - np.log(base_res)) / (num_levels - 1)) self.register_buffer("max_res", torch.tensor(max_res)) self.register_buffer("num_levels", torch.tensor(num_levels)) self.register_buffer("log2_hashmap_size", torch.tensor(log2_hashmap_size)) config = { "encoding": { "otype": "HashGrid", "n_levels": num_levels, "n_features_per_level": features_per_level, "log2_hashmap_size": log2_hashmap_size, "base_resolution": base_res, "per_level_scale": growth_factor, }, "network": { "otype": "FullyFusedMLP", "activation": "ReLU", "output_activation": "None", "n_neurons": hidden_dim, "n_hidden_layers": num_layers - 1, }, } if not self.use_linear: self.mlp_base = tcnn.NetworkWithInputEncoding( n_input_dims=4, n_output_dims=1, encoding_config=config["encoding"], network_config=config["network"], ) self.spatial_mlp_base = tcnn.NetworkWithInputEncoding( n_input_dims=3, n_output_dims=1, encoding_config=config["encoding"], network_config=config["network"], ) self.xyzt = tcnn.NetworkWithInputEncoding( n_input_dims=4, n_output_dims=1, encoding_config=config["encoding"], network_config=config["network"], ) self.xtyz = tcnn.NetworkWithInputEncoding( n_input_dims=4, n_output_dims=1, encoding_config=config["encoding"], network_config=config["network"], ) self.xytz = tcnn.NetworkWithInputEncoding( n_input_dims=4, n_output_dims=1, encoding_config=config["encoding"], network_config=config["network"], ) self.txyz = tcnn.NetworkWithInputEncoding( n_input_dims=4, n_output_dims=1, encoding_config=config["encoding"], network_config=config["network"], ) else: self.encoding = tcnn.Encoding(n_input_dims=4, encoding_config=config["encoding"]) self.linear = torch.nn.Linear(self.encoding.n_output_dims, 1)
CONSOLE = Console(width=120) class SpaceTimeDensityFieldWithPermutation(Field): def __init__( self, aabb: TensorType, num_layers: int = 2, hidden_dim: int = 64, spatial_distortion: Optional[SpatialDistortion] = None, use_linear: bool = False, num_levels: int = 8, max_res: int = 1024, base_res: int = 16, log2_hashmap_size: int = 18, features_per_level: int = 2, ) -> None: super().__init__() self.register_buffer("aabb", aabb) self.spatial_distortion = spatial_distortion self.use_linear = use_linear growth_factor = np.exp((np.log(max_res) - np.log(base_res)) / (num_levels - 1)) self.register_buffer("max_res", torch.tensor(max_res)) self.register_buffer("num_levels", torch.tensor(num_levels)) self.register_buffer("log2_hashmap_size", torch.tensor(log2_hashmap_size)) config = { "encoding": { "otype": "HashGrid", "n_levels": num_levels, "n_features_per_level": features_per_level, "log2_hashmap_size": log2_hashmap_size, "base_resolution": base_res, "per_level_scale": growth_factor, }, "network": { "otype": "FullyFusedMLP", "activation": "ReLU", "output_activation": "None", "n_neurons": hidden_dim, "n_hidden_layers": num_layers - 1, }, } if not self.use_linear: self.mlp_base = tcnn.NetworkWithInputEncoding( n_input_dims=4, n_output_dims=1, encoding_config=config["encoding"], network_config=config["network"], ) self.spatial_mlp_base = tcnn.NetworkWithInputEncoding( n_input_dims=3, n_output_dims=1, encoding_config=config["encoding"], network_config=config["network"], ) self.xyzt = tcnn.NetworkWithInputEncoding( n_input_dims=4, n_output_dims=1, encoding_config=config["encoding"], network_config=config["network"], ) self.xtyz = tcnn.NetworkWithInputEncoding( n_input_dims=4, n_output_dims=1, encoding_config=config["encoding"], network_config=config["network"], ) self.xytz = tcnn.NetworkWithInputEncoding( n_input_dims=4, n_output_dims=1, encoding_config=config["encoding"], network_config=config["network"], ) self.txyz = tcnn.NetworkWithInputEncoding( n_input_dims=4, n_output_dims=1, encoding_config=config["encoding"], network_config=config["network"], ) else: self.encoding = tcnn.Encoding(n_input_dims=4, encoding_config=config["encoding"]) self.linear = torch.nn.Linear(self.encoding.n_output_dims, 1)
def get_density(self, ray_samples: RaySamples) -> Tuple[TensorType, None]:
0
2023-10-26 04:39:15+00:00
24k
chenruduan/OAReactDiff
oa_reactdiff/trainer/pl_trainer.py
[ { "identifier": "ProcessedQM9", "path": "oa_reactdiff/dataset/qm9.py", "snippet": "class ProcessedQM9(BaseQM9):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=2,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n **kwargs,\n ):\n super().__init__(\n npz_path=npz_path,\n center=center,\n device=device,\n zero_charge=zero_charge,\n remove_h=remove_h,\n )\n\n self.n_fragments = pad_fragments + 1\n self.device = torch.device(device)\n\n n_samples = len(self.raw_dataset[\"charges\"])\n self.n_samples = n_samples\n\n self.data = {}\n self.process_molecules(\"raw_dataset\", n_samples, idx=0)\n\n for idx in range(pad_fragments):\n self.patch_dummy_molecules(idx + 1)\n\n self.data[\"condition\"] = [\n torch.zeros(\n size=(1, 1),\n dtype=torch.int64,\n device=self.device,\n )\n for _ in range(self.n_samples)\n ]" }, { "identifier": "ProcessedDoubleQM9", "path": "oa_reactdiff/dataset/qm9.py", "snippet": "class ProcessedDoubleQM9(BaseQM9):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=1,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n **kwargs,\n ):\n super().__init__(\n npz_path=npz_path,\n center=center,\n device=device,\n zero_charge=zero_charge,\n remove_h=remove_h,\n )\n\n self.n_fragments = pad_fragments + 2\n self.device = torch.device(device)\n n_samples = len(self.raw_dataset[\"charges\"])\n self.n_samples = len(self.raw_dataset[\"charges\"])\n\n self.get_subsets()\n self.get_pairs()\n\n self.data = {}\n self.process_molecules(\"frag1_data\", n_samples, idx=0)\n self.process_molecules(\"frag2_data\", n_samples, idx=1)\n\n for idx in range(pad_fragments):\n self.patch_dummy_molecules(idx + 2)\n\n self.data[\"condition\"] = [\n torch.zeros(\n size=(1, 1),\n dtype=torch.int64,\n device=self.device,\n )\n for _ in range(self.n_samples)\n ]\n\n def get_pairs(self):\n self.frag1_data, self.frag2_data = {}, {}\n frag1_O_idx_1sthalf = np.random.choice(\n len(self.hasO_set[\"charges\"]),\n int(self.n_samples / 2),\n replace=True,\n )\n frag2_N_idx_1sthalf = np.random.choice(\n len(self.hasN_set[\"charges\"]),\n int(self.n_samples / 2),\n replace=True,\n )\n frag1_N_idx_2ndhalf = np.random.choice(\n len(self.hasN_set[\"charges\"]),\n int(self.n_samples / 2),\n replace=True,\n )\n frag2_O_idx_2ndhalf = np.random.choice(\n len(self.hasO_set[\"charges\"]),\n int(self.n_samples / 2),\n replace=True,\n )\n self.frag1_data = {\n key: np.concatenate(\n [\n self.hasO_set[key][frag1_O_idx_1sthalf],\n self.hasN_set[key][frag1_N_idx_2ndhalf],\n ],\n axis=0,\n )\n for key in self.raw_dataset\n }\n self.frag2_data = {\n key: np.concatenate(\n [\n self.hasN_set[key][frag2_N_idx_1sthalf],\n self.hasO_set[key][frag2_O_idx_2ndhalf],\n ],\n axis=0,\n )\n for key in self.raw_dataset\n }" }, { "identifier": "ProcessedTripleQM9", "path": "oa_reactdiff/dataset/qm9.py", "snippet": "class ProcessedTripleQM9(BaseQM9):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=0,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n **kwargs,\n ):\n super().__init__(\n npz_path=npz_path,\n center=center,\n device=device,\n zero_charge=zero_charge,\n remove_h=remove_h,\n )\n\n self.n_fragments = pad_fragments + 3\n self.device = torch.device(device)\n n_samples = len(self.raw_dataset[\"charges\"])\n self.n_samples = len(self.raw_dataset[\"charges\"])\n\n self.get_subsets()\n self.get_pairs()\n\n self.data = {}\n self.process_molecules(\"frag1_data\", n_samples, idx=0)\n self.process_molecules(\"frag2_data\", n_samples, idx=1)\n self.process_molecules(\"frag3_data\", n_samples, idx=2)\n\n for idx in range(pad_fragments):\n self.patch_dummy_molecules(idx + 3)\n\n self.data[\"condition\"] = [\n torch.zeros(\n size=(1, 1),\n dtype=torch.int64,\n device=self.device,\n )\n for _ in range(self.n_samples)\n ]\n\n def get_pairs(self):\n n1 = int(self.n_samples / 3)\n n2 = int(self.n_samples / 3)\n n3 = self.n_samples - n1 - n2\n self.frag1_data, self.frag2_data = {}, {}\n frag1_O_idx_1_3 = np.random.choice(\n len(self.hasO_set[\"charges\"]),\n n1,\n replace=True,\n )\n frag2_N_idx_1_3 = np.random.choice(\n len(self.hasN_set[\"charges\"]),\n n1,\n replace=True,\n )\n frag3_F_idx_1_3 = np.random.choice(\n len(self.hasF_set[\"charges\"]),\n n1,\n replace=True,\n )\n frag1_F_idx_2_3 = np.random.choice(\n len(self.hasF_set[\"charges\"]),\n n2,\n replace=True,\n )\n frag2_O_idx_2_3 = np.random.choice(\n len(self.hasO_set[\"charges\"]),\n n2,\n replace=True,\n )\n frag3_N_idx_2_3 = np.random.choice(\n len(self.hasN_set[\"charges\"]),\n n2,\n replace=True,\n )\n frag1_N_idx_3_3 = np.random.choice(\n len(self.hasN_set[\"charges\"]),\n n3,\n replace=True,\n )\n frag2_F_idx_3_3 = np.random.choice(\n len(self.hasF_set[\"charges\"]),\n n3,\n replace=True,\n )\n frag3_O_idx_3_3 = np.random.choice(\n len(self.hasO_set[\"charges\"]),\n n3,\n replace=True,\n )\n self.frag1_data = {\n key: np.concatenate(\n [\n self.hasO_set[key][frag1_O_idx_1_3],\n self.hasF_set[key][frag1_F_idx_2_3],\n self.hasN_set[key][frag1_N_idx_3_3],\n ],\n axis=0,\n )\n for key in self.raw_dataset\n }\n self.frag2_data = {\n key: np.concatenate(\n [\n self.hasN_set[key][frag2_N_idx_1_3],\n self.hasO_set[key][frag2_O_idx_2_3],\n self.hasF_set[key][frag2_F_idx_3_3],\n ],\n axis=0,\n )\n for key in self.raw_dataset\n }\n self.frag3_data = {\n key: np.concatenate(\n [\n self.hasF_set[key][frag3_F_idx_1_3],\n self.hasN_set[key][frag3_N_idx_2_3],\n self.hasO_set[key][frag3_O_idx_3_3],\n ],\n axis=0,\n )\n for key in self.raw_dataset\n }" }, { "identifier": "ProcessedTS1x", "path": "oa_reactdiff/dataset/transition1x.py", "snippet": "class ProcessedTS1x(BaseDataset):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=0,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n single_frag_only=True,\n swapping_react_prod=False,\n append_frag=False,\n reflection=False,\n use_by_ind=False,\n only_ts=False,\n confidence_model=False,\n position_key=\"positions\",\n ediff=None,\n **kwargs,\n ):\n super().__init__(\n npz_path=npz_path,\n center=center,\n device=device,\n zero_charge=zero_charge,\n remove_h=remove_h,\n )\n if confidence_model:\n use_by_ind = False\n if remove_h:\n print(\"remove_h is ignored because it is not reasonble for TS.\")\n if single_frag_only:\n single_frag_inds = np.where(\n np.array(self.raw_dataset[\"single_fragment\"]) == 1\n )[0]\n else:\n single_frag_inds = np.array(range(len(self.raw_dataset[\"single_fragment\"])))\n if use_by_ind:\n use_inds = self.raw_dataset[\"use_ind\"]\n else:\n use_inds = range(len(self.raw_dataset[\"single_fragment\"]))\n single_frag_inds = list(set(single_frag_inds).intersection(set(use_inds)))\n\n data_duplicated = copy.deepcopy(self.raw_dataset)\n for k, mapped_k in FRAG_MAPPING.items():\n for v, val in data_duplicated[k].items():\n self.raw_dataset[k][v] = [val[ii] for ii in single_frag_inds]\n if swapping_react_prod:\n mapped_val = data_duplicated[mapped_k][v]\n self.raw_dataset[k][v] += [\n mapped_val[ii] for ii in single_frag_inds\n ]\n if reflection:\n for k, mapped_k in FRAG_MAPPING.items():\n for v, val in self.raw_dataset[k].items():\n if v in [\"wB97x_6-31G(d).forces\", position_key]:\n self.raw_dataset[k][v] += [reflect_z(_val) for _val in val]\n else:\n self.raw_dataset[k][v] += val\n\n self.reactant = self.raw_dataset[\"reactant\"]\n self.transition_state = self.raw_dataset[\"transition_state\"]\n self.product = self.raw_dataset[\"product\"]\n\n self.n_fragments = pad_fragments + 3\n self.device = torch.device(device)\n n_samples = len(self.reactant[\"charges\"])\n self.n_samples = len(self.reactant[\"charges\"])\n\n self.data = {}\n repeat = 2 if swapping_react_prod else 1\n if confidence_model:\n self.data[\"target\"] = torch.tensor(\n self.raw_dataset[\"target\"] * repeat\n ).unsqueeze(1)\n self.data[\"rmsd\"] = torch.tensor(\n self.raw_dataset[\"rmsd\"] * repeat\n ).unsqueeze(1)\n if ediff is not None:\n self.data[\"ediff\"] = torch.tensor(\n self.raw_dataset[ediff][\"ediff\"] * repeat\n ).unsqueeze(1)\n if not only_ts:\n if not append_frag:\n self.process_molecules(\n \"reactant\", n_samples, idx=0, position_key=position_key\n )\n self.process_molecules(\"transition_state\", n_samples, idx=1)\n self.process_molecules(\n \"product\", n_samples, idx=2, position_key=position_key\n )\n else:\n self.process_molecules(\n \"reactant\",\n n_samples,\n idx=0,\n append_charge=0,\n position_key=position_key,\n )\n self.process_molecules(\n \"transition_state\", n_samples, idx=1, append_charge=1\n )\n self.process_molecules(\n \"product\",\n n_samples,\n idx=2,\n append_charge=0,\n position_key=position_key,\n )\n\n for idx in range(pad_fragments):\n self.patch_dummy_molecules(idx + 3)\n else:\n if not append_frag:\n self.process_molecules(\"transition_state\", n_samples, idx=0)\n else:\n self.process_molecules(\n \"transition_state\", n_samples, idx=0, append_charge=1\n )\n # for idx in range(2):\n # self.patch_dummy_molecules(idx + 1)\n\n self.data[\"condition\"] = [\n torch.zeros(\n size=(1, 1),\n dtype=torch.int64,\n device=self.device,\n )\n for _ in range(self.n_samples)\n ]" }, { "identifier": "EGNNDynamics", "path": "oa_reactdiff/dynamics/egnn_dynamics.py", "snippet": "class EGNNDynamics(BaseDynamics):\n def __init__(\n self,\n model_config: Dict,\n fragment_names: List[str],\n node_nfs: List[int],\n edge_nf: int,\n condition_nf: int = 0,\n pos_dim: int = 3,\n update_pocket_coords: bool = True,\n condition_time: bool = True,\n edge_cutoff: Optional[float] = None,\n model: nn.Module = EGNN,\n device: torch.device = torch.device(\"cuda\"),\n enforce_same_encoding: Optional[List] = None,\n source: Optional[Dict] = None,\n ) -> None:\n r\"\"\"Base dynamics class set up for denoising process.\n\n Args:\n model_config (Dict): config for the equivariant model.\n fragment_names (List[str]): list of names for fragments\n node_nfs (List[int]): list of number of input node attributues.\n edge_nf (int): number of input edge attributes.\n condition_nf (int): number of attributes for conditional generation.\n Defaults to 0.\n pos_dim (int): dimension for position vector. Defaults to 3.\n update_pocket_coords (bool): whether to update positions of everything.\n Defaults to True.\n condition_time (bool): whether to condition on time. Defaults to True.\n edge_cutoff (Optional[float]): cutoff for building intra-fragment edges.\n Defaults to None.\n model (Optional[nn.Module]): Module for equivariant model. Defaults to None.\n \"\"\"\n super().__init__(\n model_config,\n fragment_names,\n node_nfs,\n edge_nf,\n condition_nf,\n pos_dim,\n update_pocket_coords,\n condition_time,\n edge_cutoff,\n model,\n device,\n enforce_same_encoding,\n source=source,\n )\n\n def forward(\n self,\n xh: List[Tensor],\n edge_index: Tensor,\n t: Tensor,\n conditions: Tensor,\n n_frag_switch: Tensor,\n combined_mask: Tensor,\n edge_attr: Optional[Tensor] = None,\n ) -> Tuple[List[Tensor], Tensor]:\n r\"\"\"predict noise /mu.\n\n Args:\n xh (List[Tensor]): list of concatenated tensors for pos and h\n edge_index (Tensor): [n_edge, 2]\n t (Tensor): time tensor. If dim is 1, same for all samples;\n otherwise different t for different samples\n conditions (Tensor): condition tensors\n n_frag_switch (Tensor): [n_nodes], fragment index for each nodes\n combined_mask (Tensor): [n_nodes], sample index for each node\n edge_attr (Optional[Tensor]): [n_edge, dim_edge_attribute]. Defaults to None.\n\n Raises:\n NotImplementedError: The fragement-position-fixed mode is not implement.\n\n Returns:\n Tuple[List[Tensor], Tensor]: updated pos-h and edge attributes\n \"\"\"\n pos = torch.concat(\n [_xh[:, : self.pos_dim].clone() for _xh in xh],\n dim=0,\n )\n h = torch.concat(\n [\n self.encoders[ii](xh[ii][:, self.pos_dim :].clone())\n for ii, name in enumerate(self.fragment_names)\n ],\n dim=0,\n )\n if self.edge_encoder is not None:\n edge_attr = self.edge_encoder(edge_attr)\n\n condition_dim = 0\n if self.condition_time:\n if len(t.size()) == 1:\n # t is the same for all elements in batch.\n h_time = torch.empty_like(h[:, 0:1]).fill_(t.item())\n else:\n # t is different over the batch dimension.\n h_time = t[combined_mask]\n h = torch.cat([h, h_time], dim=1)\n condition_dim += 1\n\n if self.condition_nf > 0:\n h_condition = conditions[combined_mask]\n h = torch.cat([h, h_condition], dim=1)\n condition_dim += self.condition_nf\n\n subgraph_mask = get_subgraph_mask(edge_index, n_frag_switch)\n if self.update_pocket_coords:\n update_coords_mask = None\n else:\n raise NotImplementedError # no need to mask pos for inpainting mode.\n\n h_final, pos_final, edge_attr_final = self.model(\n h,\n pos,\n edge_index,\n edge_attr,\n node_mask=None,\n edge_mask=None,\n update_coords_mask=update_coords_mask,\n subgraph_mask=subgraph_mask[:, None],\n )\n vel = pos_final - pos\n if torch.any(torch.isnan(vel)):\n print(\"Warning: detected nan in pos, resetting EGNN output to randn.\")\n vel = torch.randn_like(vel)\n if torch.any(torch.isnan(vel)):\n print(\"Warning: detected nan in h, resetting EGNN output to randn.\")\n h_final = torch.randn_like(h_final)\n\n h_final = h_final[:, :-condition_dim]\n\n frag_index = self.compute_frag_index(n_frag_switch)\n xh_final = [\n torch.cat(\n [\n self.remove_mean_batch(\n vel[frag_index[ii] : frag_index[ii + 1]],\n combined_mask[frag_index[ii] : frag_index[ii + 1]],\n ),\n self.decoders[ii](h_final[frag_index[ii] : frag_index[ii + 1]]),\n ],\n dim=-1,\n )\n for ii, name in enumerate(self.fragment_names)\n ]\n\n # xh_final = self.enpose_pbc(xh_final)\n\n if edge_attr_final is None or edge_attr_final.size(1) <= max(1, self.dist_dim):\n edge_attr_final = None\n else:\n edge_attr_final = self.edge_decoder(edge_attr_final)\n return xh_final, edge_attr_final\n\n @staticmethod\n def enpose_pbc(xh: List[Tensor], magnitude=10.0) -> List[Tensor]:\n xrange = magnitude * 2\n xh = [torch.remainder(_xh + magnitude, xrange) - magnitude for _xh in xh]\n return xh\n\n @staticmethod\n def compute_frag_index(n_frag_switch: Tensor) -> np.ndarray:\n counts = [\n torch.where(n_frag_switch == ii)[0].numel()\n for ii in torch.unique(n_frag_switch)\n ]\n return np.concatenate([np.array([0]), np.cumsum(counts)])\n\n @torch.no_grad()\n def adjust_edge_attr_on_new_eij(\n self,\n edge_index: Tensor,\n edge_attr: Tensor,\n edge_index_new: Tensor,\n ) -> Tensor:\n r\"\"\"Get ready new edge attributes (e_ij) given old {ij, e_ij} and new {ij}\n\n Args:\n edge_index (Tensor): ij\n edge_attr (Tensor): e_ij\n edge_index_new (Tensor): new ij\n\n Raises:\n ValueError: finding multiple entries for the same ij pair\n\n Returns:\n Tensor: new e_ij\n \"\"\"\n edge_index_T = torch.transpose(edge_index, 1, 0)\n edge_index_new_T = torch.transpose(edge_index_new, 1, 0)\n\n edge_attr_new = []\n for _ind, ij in enumerate(edge_index_new_T):\n ind = torch.where((ij == edge_index_T).all(dim=1))[0]\n if ind.size(0) > 1:\n raise ValueError(f\"ind should only be 0 or 1, getting {ind}\")\n\n if ind.size(0) == 0:\n self.create_new_edge_attr(\n ind_new=_ind,\n ij_new=ij,\n edge_index_new_T=edge_index_new_T,\n edge_attr_new=edge_attr_new,\n edge_attr=edge_attr,\n )\n else:\n edge_attr_new.append(edge_attr[ind.item()].detach())\n return torch.stack(edge_attr_new, dim=0)\n\n @staticmethod\n def init_edge_attr(sample_edge_attr):\n r\"\"\"initialize edge attributes.\"\"\"\n return torch.rand_like(sample_edge_attr)\n\n def create_new_edge_attr(\n self,\n ind_new: Tensor,\n ij_new: Tensor,\n edge_index_new_T: Tensor,\n edge_attr_new: List[Tensor],\n edge_attr: Tensor,\n ) -> List[Tensor]:\n r\"\"\"Create new edge attrbution for ij that is not present in old connections\n\n Args:\n ind_new (Tensor): natural index of new ij\n ij_new (Tensor): new ij\n edge_index_new_T (Tensor): new edge indexes, [n_edge, 2]\n edge_attr_new (List[Tensor]): list of new edge attributes\n edge_attr (Tensor): old edge attributes\n\n Raises:\n ValueError: not ji found for ij in new indexes\n\n Returns:\n List[Tensor]: list of new edge attributes\n \"\"\"\n ij_new_reverse = ij_new[torch.tensor([1, 0])]\n ind_new_reverse = torch.where((ij_new_reverse == edge_index_new_T).all(dim=1))[\n 0\n ]\n print(ind_new_reverse)\n if ind_new_reverse.size(0) == 0:\n raise ValueError(f\"should always find a reverse ind.\")\n # print(ij_new, ind_new, ind_new_reverse)\n if ind_new_reverse.item() >= ind_new:\n edge_attr_new.append(self.init_edge_attr(edge_attr[0]))\n else:\n edge_attr_new.append(edge_attr_new[ind_new_reverse.item()])\n return edge_attr_new\n\n @staticmethod\n def remove_mean_batch(x, indices):\n mean = scatter_mean(x, indices, dim=0)\n x = x - mean[indices]\n return x" }, { "identifier": "Confidence", "path": "oa_reactdiff/dynamics/confidence.py", "snippet": "class Confidence(BaseDynamics):\n def __init__(\n self,\n model_config: Dict,\n fragment_names: List[str],\n node_nfs: List[int],\n edge_nf: int,\n condition_nf: int = 0,\n pos_dim: int = 3,\n edge_cutoff: Optional[float] = None,\n model: nn.Module = EGNN,\n device: torch.device = torch.device(\"cuda\"),\n enforce_same_encoding: Optional[List] = None,\n source: Optional[Dict] = None,\n **kwargs,\n ) -> None:\n r\"\"\"Confindence score for generated samples.\n\n Args:\n model_config (Dict): config for the equivariant model.\n fragment_names (List[str]): list of names for fragments\n node_nfs (List[int]): list of number of input node attributues.\n edge_nf (int): number of input edge attributes.\n condition_nf (int): number of attributes for conditional generation.\n Defaults to 0.\n pos_dim (int): dimension for position vector. Defaults to 3.\n update_pocket_coords (bool): whether to update positions of everything.\n Defaults to True.\n condition_time (bool): whether to condition on time. Defaults to True.\n edge_cutoff (Optional[float]): cutoff for building intra-fragment edges.\n Defaults to None.\n model (Optional[nn.Module]): Module for equivariant model. Defaults to None.\n \"\"\"\n model_config.update({\"for_conf\": True})\n update_pocket_coords = True\n condition_time = (True,)\n super().__init__(\n model_config,\n fragment_names,\n node_nfs,\n edge_nf,\n condition_nf,\n pos_dim,\n update_pocket_coords,\n condition_time,\n edge_cutoff,\n model,\n device,\n enforce_same_encoding,\n source=source,\n )\n\n hidden_channels = model_config[\"hidden_channels\"]\n self.readout = GatedMLP(\n in_dim=hidden_channels,\n out_dims=[hidden_channels, hidden_channels, 1],\n activation=\"swish\",\n bias=True,\n last_layer_no_activation=True,\n )\n\n def _forward(\n self,\n xh: List[Tensor],\n edge_index: Tensor,\n t: Tensor,\n conditions: Tensor,\n n_frag_switch: Tensor,\n combined_mask: Tensor,\n edge_attr: Optional[Tensor] = None,\n ) -> Tensor:\n r\"\"\"predict confidence.\n\n Args:\n xh (List[Tensor]): list of concatenated tensors for pos and h\n edge_index (Tensor): [n_edge, 2]\n t (Tensor): time tensor. If dim is 1, same for all samples;\n otherwise different t for different samples\n conditions (Tensor): condition tensors\n n_frag_switch (Tensor): [n_nodes], fragment index for each nodes\n combined_mask (Tensor): [n_nodes], sample index for each node\n edge_attr (Optional[Tensor]): [n_edge, dim_edge_attribute]. Defaults to None.\n\n Raises:\n NotImplementedError: The fragement-position-fixed mode is not implement.\n\n Returns:\n Tensor: binary probability of confidence fo each graph.\n \"\"\"\n pos = torch.concat(\n [_xh[:, : self.pos_dim].clone() for _xh in xh],\n dim=0,\n )\n h = torch.concat(\n [\n self.encoders[ii](xh[ii][:, self.pos_dim :].clone())\n for ii, name in enumerate(self.fragment_names)\n ],\n dim=0,\n )\n if self.edge_encoder is not None:\n edge_attr = self.edge_encoder(edge_attr)\n\n condition_dim = 0\n if self.condition_time:\n if len(t.size()) == 1:\n # t is the same for all elements in batch.\n h_time = torch.empty_like(h[:, 0:1]).fill_(t.item())\n else:\n # t is different over the batch dimension.\n h_time = t[combined_mask]\n h = torch.cat([h, h_time], dim=1)\n condition_dim += 1\n\n if self.condition_nf > 0:\n h_condition = conditions[combined_mask]\n h = torch.cat([h, h_condition], dim=1)\n condition_dim += self.condition_nf\n\n subgraph_mask = get_subgraph_mask(edge_index, n_frag_switch)\n if self.update_pocket_coords:\n update_coords_mask = None\n else:\n raise NotImplementedError # no need to mask pos for inpainting mode.\n\n node_features = self.model(\n h,\n pos,\n edge_index,\n edge_attr,\n node_mask=None,\n edge_mask=None,\n update_coords_mask=update_coords_mask,\n subgraph_mask=subgraph_mask[:, None],\n ) # (n_node, n_hidden)\n\n graph_features = scatter_mean(\n node_features,\n index=combined_mask,\n dim=0,\n ) # (n_system, n_hidden)\n conf = self.readout(graph_features)\n return conf.squeeze()\n\n def forward(\n self,\n representations: List[Dict],\n conditions: Tensor,\n ):\n masks = [repre[\"mask\"] for repre in representations]\n combined_mask = torch.cat(masks)\n edge_index = get_edges_index(combined_mask, remove_self_edge=True)\n fragments_nodes = [repr[\"size\"] for repr in representations]\n n_frag_switch = get_n_frag_switch(fragments_nodes)\n\n xh = [\n torch.cat(\n [repre[feature_type] for feature_type in FEATURE_MAPPING],\n dim=1,\n )\n for repre in representations\n ]\n\n pred = self._forward(\n xh=xh,\n edge_index=edge_index,\n t=torch.tensor([0]),\n conditions=conditions,\n n_frag_switch=n_frag_switch,\n combined_mask=combined_mask,\n edge_attr=None,\n )\n return pred" }, { "identifier": "DiffSchedule", "path": "oa_reactdiff/diffusion/_schedule.py", "snippet": "class DiffSchedule(nn.Module):\n def __init__(self, gamma_module: nn.Module, norm_values: Tuple[float]) -> None:\n super().__init__()\n self.gamma_module = gamma_module\n self.norm_values = norm_values\n self.check_issues_norm_values()\n\n @staticmethod\n def inflate_batch_array(array, target):\n r\"\"\"\n Inflates the batch array (array) with only a single axis\n (i.e. shape = (batch_size,), or possibly more empty axes\n (i.e. shape (batch_size, 1, ..., 1)) to match the target shape.\n \"\"\"\n target_shape = (array.size(0),) + (1,) * (len(target.size()) - 1)\n return array.view(target_shape)\n\n def sigma(self, gamma, target_tensor):\n r\"\"\"Computes sigma given gamma.\"\"\"\n return self.inflate_batch_array(torch.sqrt(torch.sigmoid(gamma)), target_tensor)\n\n def alpha(self, gamma, target_tensor):\n r\"\"\"Computes alpha given gamma.\"\"\"\n return self.inflate_batch_array(\n torch.sqrt(torch.sigmoid(-gamma)), target_tensor\n )\n\n @staticmethod\n def SNR(gamma):\n r\"\"\"Computes signal to noise ratio (alpha^2/sigma^2) given gamma.\"\"\"\n return torch.exp(-gamma)\n\n def sigma_and_alpha_t_given_s(\n self, gamma_t: Tensor, gamma_s: Tensor, target_tensor: Tensor\n ) -> tuple[Tensor, Tensor, Tensor]:\n r\"\"\"\n Computes sigma t given s, using gamma_t and gamma_s. Used during sampling.\n These are defined as:\n alpha t given s = alpha t / alpha s,\n sigma t given s = sqrt(1 - (alpha t given s) ^2 ).\n \"\"\"\n sigma2_t_given_s = self.inflate_batch_array(\n -torch.expm1(F.softplus(gamma_s) - F.softplus(gamma_t)), target_tensor\n )\n\n # alpha_t_given_s = alpha_t / alpha_s\n log_alpha2_t = F.logsigmoid(-gamma_t)\n log_alpha2_s = F.logsigmoid(-gamma_s)\n log_alpha2_t_given_s = log_alpha2_t - log_alpha2_s\n\n alpha_t_given_s = torch.exp(0.5 * log_alpha2_t_given_s)\n alpha_t_given_s = self.inflate_batch_array(alpha_t_given_s, target_tensor)\n\n sigma_t_given_s = torch.sqrt(sigma2_t_given_s)\n\n return sigma2_t_given_s, sigma_t_given_s, alpha_t_given_s\n\n def check_issues_norm_values(self, num_stdevs=8):\n zeros = torch.zeros((1, 1))\n gamma_0 = self.gamma_module(zeros)\n sigma_0 = self.sigma(gamma_0, target_tensor=zeros).item()\n\n # Checked if 1 / norm_value is still larger than 10 * standard\n # deviation.\n norm_value = self.norm_values[1]\n\n if sigma_0 * num_stdevs > 1.0 / norm_value:\n raise ValueError(\n f\"Value for normalization value {norm_value} probably too \"\n f\"large with sigma_0 {sigma_0:.5f} and \"\n f\"1 / norm_value = {1. / norm_value}\"\n )" }, { "identifier": "PredefinedNoiseSchedule", "path": "oa_reactdiff/diffusion/_schedule.py", "snippet": "class PredefinedNoiseSchedule(nn.Module):\n r\"\"\"\n Predefined noise schedule. Essentially creates a lookup array for predefined\n (non-learned) noise schedules.\n \"\"\"\n\n def __init__(\n self,\n noise_schedule: str,\n timesteps: int,\n precision: float,\n ):\n super().__init__()\n self.timesteps = timesteps\n\n if \"cosine\" in noise_schedule:\n splits = noise_schedule.split(\"_\")\n assert len(splits) <= 2\n power = 1 if len(splits) == 1 else float(splits[1])\n alphas2 = cosine_beta_schedule(timesteps, raise_to_power=power)\n elif \"polynomial\" in noise_schedule:\n splits = noise_schedule.split(\"_\")\n assert len(splits) == 2\n power = float(splits[1])\n alphas2 = polynomial_schedule(timesteps, s=precision, power=power)\n elif \"csin\" in noise_schedule:\n splits = noise_schedule.split(\"_\")\n assert len(splits) == 4\n start, end, tau = float(splits[1]), float(splits[2]), float(splits[3])\n alphas2 = ccosine_schedule(timesteps, start=start, end=end, tau=tau)\n elif \"linear\" in noise_schedule:\n alphas2 = linear_schedule(timesteps)\n else:\n raise ValueError(noise_schedule)\n\n # print(\"alphas2\", alphas2)\n\n sigmas2 = 1 - alphas2\n\n log_alphas2 = np.log(alphas2)\n log_sigmas2 = np.log(sigmas2)\n\n log_alphas2_to_sigmas2 = log_alphas2 - log_sigmas2\n\n # print(\"gamma\", -log_alphas2_to_sigmas2)\n\n self.gamma = torch.nn.Parameter(\n torch.from_numpy(-log_alphas2_to_sigmas2).float(), requires_grad=False\n )\n\n def forward(self, t):\n t_int = torch.round(t * self.timesteps).long()\n return self.gamma[t_int]" }, { "identifier": "Normalizer", "path": "oa_reactdiff/diffusion/_normalizer.py", "snippet": "class Normalizer(nn.Module):\n def __init__(\n self,\n norm_values: Tuple = (1.0, 1.0, 1.0),\n norm_biases: Tuple = (0.0, 0.0, 0.0),\n pos_dim: int = 3,\n ) -> None:\n super().__init__()\n self.norm_values = norm_values\n self.norm_biases = norm_biases\n self.pos_dim = pos_dim\n\n def normalize(self, representations: List[Dict]) -> List[Dict]:\n for ii in range(len(representations)):\n for jj, feature_type in enumerate(FEATURE_MAPPING):\n representations[ii][feature_type] = (\n representations[ii][feature_type] - self.norm_biases[jj]\n ) / self.norm_values[jj]\n return representations\n\n def unnormalize(self, x: Tensor, ind: int) -> Tensor:\n return x * self.norm_values[ind] + self.norm_biases[ind]\n\n def unnormalize_z(self, z_combined: List[Tensor]) -> List[Tensor]:\n for ii in range(len(z_combined)):\n z_combined[ii][:, : self.pos_dim] = self.unnormalize(\n z_combined[ii][:, : self.pos_dim], 0\n )\n z_combined[ii][:, self.pos_dim : -1] = self.unnormalize(\n z_combined[ii][:, self.pos_dim : -1], 1\n )\n z_combined[ii][:, -1:] = self.unnormalize(z_combined[ii][:, -1:], 2)\n return z_combined" }, { "identifier": "FEATURE_MAPPING", "path": "oa_reactdiff/diffusion/_normalizer.py", "snippet": "FEATURE_MAPPING = [\"pos\", \"one_hot\", \"charge\"]" }, { "identifier": "EnVariationalDiffusion", "path": "oa_reactdiff/diffusion/en_diffusion.py", "snippet": "class EnVariationalDiffusion(nn.Module):\n \"\"\"\n The E(n) Diffusion Module.\n \"\"\"\n\n def __init__(\n self,\n dynamics: EGNNDynamics,\n schdule: DiffSchedule,\n normalizer: Normalizer,\n size_histogram: Optional[Dict] = None,\n loss_type: str = \"l2\",\n pos_only: bool = False,\n fixed_idx: Optional[List] = None,\n ):\n super().__init__()\n assert loss_type in {\"vlb\", \"l2\"}\n\n self.dynamics = dynamics\n self.schedule = schdule\n self.normalizer = normalizer\n self.size_histogram = size_histogram\n self.loss_type = loss_type\n self.pos_only = pos_only\n self.fixed_idx = fixed_idx or []\n\n self.pos_dim = dynamics.pos_dim\n self.node_nfs = dynamics.node_nfs\n self.fragment_names = dynamics.fragment_names\n self.T = schdule.gamma_module.timesteps\n self.norm_values = normalizer.norm_values\n self.norm_biases = normalizer.norm_biases\n\n # ------ FORWARD PASS ------\n\n def forward(\n self,\n representations: List[Dict],\n conditions: Tensor,\n return_pred: bool = False,\n ):\n r\"\"\"\n Computes the loss and NLL terms.\n\n #TODO: edge_attr not considered at all\n \"\"\"\n num_sample = representations[0][\"size\"].size(0)\n n_nodes = torch.stack(\n [repr[\"size\"] for repr in representations],\n dim=0,\n ).sum(dim=0)\n device = representations[0][\"pos\"].device\n masks = [repre[\"mask\"] for repre in representations]\n combined_mask = torch.cat(masks)\n edge_index = get_edges_index(combined_mask, remove_self_edge=True)\n fragments_nodes = [repr[\"size\"] for repr in representations]\n n_frag_switch = get_n_frag_switch(fragments_nodes)\n\n # Normalize data, take into account volume change in x.\n representations = self.normalizer.normalize(representations)\n\n # Likelihood change due to normalization\n delta_log_px = self.delta_log_px(n_nodes.sum())\n\n # Sample a timestep t for each example in batch\n # At evaluation time, loss_0 will be computed separately to decrease\n # variance in the estimator (costs two forward passes)\n lowest_t = 0 if self.training else 1\n t_int = torch.randint(\n lowest_t, self.T + 1, size=(num_sample, 1), device=device\n ).float()\n s_int = t_int - 1 # previous timestep\n\n # Masks: important to compute log p(x | z0).\n t_is_zero = (t_int == 0).float()\n t_is_not_zero = 1 - t_is_zero\n\n # Normalize t to [0, 1]. Note that the negative\n # step of s will never be used, since then p(x | z0) is computed.\n s = s_int / self.T\n t = t_int / self.T\n\n # Compute gamma_s and gamma_t via the network.\n gamma_s = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(s), representations[0][\"pos\"]\n )\n gamma_t = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(t), representations[0][\"pos\"]\n )\n\n # Concatenate x, and h[categorical].\n xh = [\n torch.cat(\n [repre[feature_type] for feature_type in FEATURE_MAPPING],\n dim=1,\n )\n for repre in representations\n ]\n\n # Find noised representation\n z_t, eps_xh = self.noised_representation(xh, masks, gamma_t)\n\n # Neural net prediction.\n net_eps_xh, net_eps_edge_attr = self.dynamics(\n xh=z_t,\n edge_index=edge_index,\n t=t,\n conditions=conditions,\n n_frag_switch=n_frag_switch,\n combined_mask=combined_mask,\n edge_attr=None, # TODO: no edge_attr is considered now\n )\n\n if return_pred:\n return eps_xh, net_eps_xh\n\n # TODO: LJ term not implemented\n # xh_lig_hat = self.xh_given_zt_and_epsilon(z_t_lig, net_out_lig, gamma_t,\n # ligand['mask'])\n if self.pos_only:\n for ii in range(len(masks)):\n net_eps_xh[ii][:, self.pos_dim :] = torch.zeros_like(\n net_eps_xh[ii][:, self.pos_dim :],\n device=device,\n )\n # Compute the L2 error.\n error_t: List[Tensor] = [\n utils.sum_except_batch(\n (eps_xh[ii] - net_eps_xh[ii]) ** 2,\n masks[ii],\n dim_size=num_sample,\n )\n for ii in range(len(masks))\n ] # TODO: no edge_attr contribution\n\n # Compute weighting with SNR: (1 - SNR(s-t)) for epsilon parametrization\n SNR_weight = (1 - self.schedule.SNR(gamma_s - gamma_t)).squeeze(1)\n assert error_t[0].size() == SNR_weight.size()\n\n # The _constants_ depending on sigma_0 from the\n # cross entropy term E_q(z0 | x) [log p(x | z0)].\n neg_log_constants = -self.log_constants_p_x_given_z0(\n n_nodes=n_nodes, device=device\n )\n\n # The KL between q(zT | x) and p(zT) = Normal(0, 1).\n # Should be close to zero.\n # kl_prior = self.kl_prior_with_pocket(\n # xh_lig, xh_pocket, ligand['mask'], pocket['mask'],\n # ligand['size'] + pocket['size'])\n # TODO: approximate KL prior with zero now, which should not influence training.\n kl_prior = torch.zeros_like(neg_log_constants)\n\n if self.training:\n # Computes the L_0 term (even if gamma_t is not actually gamma_0)\n # and this will later be selected via masking.\n log_p_h_given_z0 = self.log_pxh_given_z0_without_constants(\n representations=representations,\n z_t=z_t,\n eps_xh=eps_xh,\n net_eps_xh=net_eps_xh,\n gamma_t=gamma_t,\n epsilon=1e-10,\n )\n loss_0_x = [\n -_log_p_fragment * t_is_zero.squeeze()\n for _log_p_fragment in log_p_h_given_z0[0]\n ]\n loss_0_cat = [\n -_log_p_fragment * t_is_zero.squeeze()\n for _log_p_fragment in log_p_h_given_z0[1]\n ]\n loss_0_charge = [\n -_log_p_fragment * t_is_zero.squeeze()\n for _log_p_fragment in log_p_h_given_z0[2]\n ]\n\n # apply t_is_zero mask\n error_t = [_error_t * t_is_not_zero.squeeze() for _error_t in error_t]\n\n else:\n # Compute noise values for t = 0.\n t_zeros = torch.zeros_like(s)\n gamma_0 = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(t_zeros), representations[0][\"pos\"]\n )\n\n # Sample z_0 given x, h for timestep t, from q(z_t | x, h)\n z_0, eps_0_xh = self.noised_representation(xh, masks, gamma_0)\n net_eps_0_xh, net_eps_0_edge_attr = self.dynamics(\n xh=z_0,\n edge_index=edge_index,\n t=t_zeros,\n conditions=conditions,\n n_frag_switch=n_frag_switch,\n combined_mask=combined_mask,\n edge_attr=None, # TODO: no edge_attr is considered now\n )\n\n log_p_h_given_z0 = self.log_pxh_given_z0_without_constants(\n representations=representations,\n z_t=z_0,\n eps_xh=eps_0_xh,\n net_eps_xh=net_eps_0_xh,\n gamma_t=gamma_0,\n epsilon=1e-10,\n )\n loss_0_x = [-_log_p_fragment for _log_p_fragment in log_p_h_given_z0[0]]\n loss_0_cat = [-_log_p_fragment for _log_p_fragment in log_p_h_given_z0[1]]\n loss_0_charge = [\n -_log_p_fragment for _log_p_fragment in log_p_h_given_z0[2]\n ]\n\n loss_terms = {\n \"delta_log_px\": delta_log_px,\n \"error_t\": error_t,\n \"SNR_weight\": SNR_weight,\n \"loss_0_x\": loss_0_x,\n \"loss_0_cat\": loss_0_cat,\n \"loss_0_charge\": loss_0_charge,\n \"neg_log_constants\": neg_log_constants,\n \"kl_prior\": kl_prior,\n \"log_pN\": torch.zeros_like(kl_prior),\n \"t_int\": t_int.squeeze(),\n \"net_eps_xh\": net_eps_xh,\n \"eps_xh\": eps_xh,\n }\n return loss_terms\n\n def delta_log_px(self, num_nodes):\n return -self.subspace_dimensionality(num_nodes) * np.log(self.norm_values[0])\n\n def subspace_dimensionality(self, input_size):\n r\"\"\"\n Compute the dimensionality on translation-invariant linear subspace\n where distributions on x are defined.\n \"\"\"\n return (input_size - 1) * self.pos_dim\n\n def noised_representation(\n self,\n xh: List[Tensor],\n masks: List[Tensor],\n gamma_t: Tensor,\n ) -> Tuple[List[Tensor], List[Tensor]]:\n # Compute alpha_t and sigma_t from gamma.\n alpha_t = self.schedule.alpha(gamma_t, xh[0])\n sigma_t = self.schedule.sigma(gamma_t, xh[0])\n\n # Sample zt ~ Normal(alpha_t x, sigma_t)\n eps_xh = self.sample_combined_position_feature_noise(masks)\n\n # Sample z_t given x, h for timestep t, from q(z_t | x, h)\n z_t = [\n alpha_t[masks[ii]] * xh[ii] + sigma_t[masks[ii]] * eps_xh[ii]\n for ii in range(len(masks))\n ]\n\n return z_t, eps_xh\n\n def sample_combined_position_feature_noise(\n self,\n masks: List[Tensor],\n ) -> List[Tensor]:\n r\"\"\"\n Samples mean-centered normal noise for z_x, and standard normal noise for z_h.\n Note that we only need to put the center of gravity of *each fragment* to the origin.\n \"\"\"\n eps_xh = []\n for ii, mask in enumerate(masks):\n _eps_x = utils.sample_center_gravity_zero_gaussian_batch(\n size=(len(mask), self.pos_dim),\n indices=[mask],\n )\n _eps_h = utils.sample_gaussian(\n size=(len(mask), self.node_nfs[ii] - self.pos_dim),\n device=mask.device,\n )\n if self.pos_only:\n _eps_h = torch.zeros_like(_eps_h, device=mask.device)\n eps_xh.append(torch.cat([_eps_x, _eps_h], dim=1))\n for idx in self.fixed_idx:\n eps_xh[idx] = torch.zeros_like(eps_xh[idx], device=mask.device)\n return eps_xh\n\n def log_constants_p_x_given_z0(self, n_nodes, device):\n r\"\"\"Computes p(x|z0).\"\"\"\n\n batch_size = len(n_nodes)\n degrees_of_freedom_x = self.subspace_dimensionality(n_nodes).to(device)\n\n zeros = torch.zeros((batch_size, 1), device=device)\n gamma_0 = self.schedule.gamma_module(zeros)\n\n # Recall that sigma_x = sqrt(sigma_0^2 / alpha_0^2) = SNR(-0.5 gamma_0).\n log_sigma_x = 0.5 * gamma_0.view(batch_size)\n return degrees_of_freedom_x * (-log_sigma_x - 0.5 * np.log(2 * np.pi))\n\n def kl_prior(self):\n return NotImplementedError\n\n @staticmethod\n def gaussian_KL(q_mu_minus_p_mu_squared, q_sigma, p_sigma, d):\n \"\"\"Computes the KL distance between two normal distributions.\n Args:\n q_mu_minus_p_mu_squared: Squared difference between mean of\n distribution q and distribution p: ||mu_q - mu_p||^2\n q_sigma: Standard deviation of distribution q.\n p_sigma: Standard deviation of distribution p.\n d: dimension\n Returns:\n The KL distance\n \"\"\"\n return (\n d * torch.log(p_sigma / q_sigma)\n + 0.5 * (d * q_sigma**2 + q_mu_minus_p_mu_squared) / (p_sigma**2)\n - 0.5 * d\n )\n\n def log_pxh_given_z0_without_constants(\n self,\n representations: List[Dict],\n z_t: List[Tensor],\n eps_xh: List[Tensor],\n net_eps_xh: List[Tensor],\n gamma_t: Tensor,\n epsilon: float = 1e-10,\n ) -> List[List[Tensor]]:\n # Compute sigma_0 and rescale to the integer scale of the data.\n # for pos\n log_p_x_given_z0_without_constants = [\n -0.5\n * (\n utils.sum_except_batch(\n (eps_xh[ii][:, : self.pos_dim] - net_eps_xh[ii][:, : self.pos_dim])\n ** 2,\n representations[ii][\"mask\"],\n dim_size=representations[0][\"size\"].size(0),\n )\n )\n for ii in range(len(representations))\n ]\n\n # only keep first several elements\n z_t = [_z_t[:, : 3 + 5 + 1] for _z_t in z_t]\n for ii, repr in enumerate(representations):\n representations[ii][\"charge\"] = representations[ii][\"charge\"][:, :1]\n # for ohe of atom types\n sigma_0 = self.schedule.sigma(gamma_t, target_tensor=z_t[0])\n sigma_0_cat = sigma_0 * self.normalizer.norm_values[1]\n atoms = [\n self.normalizer.unnormalize(repr[\"one_hot\"], ind=1)\n for repr in representations\n ]\n est_atoms = [\n self.normalizer.unnormalize(_z_t[:, self.pos_dim : -1], ind=1)\n for _z_t in z_t\n ]\n centered_atoms = [_est_atoms - 1 for _est_atoms in est_atoms]\n log_ph_cat_proportionals = [\n torch.log(\n utils.cdf_standard_gaussian(\n (centered_atoms[ii] + 0.5)\n / sigma_0_cat[representations[ii][\"mask\"]]\n )\n - utils.cdf_standard_gaussian(\n (centered_atoms[ii] - 0.5)\n / sigma_0_cat[representations[ii][\"mask\"]]\n )\n + epsilon\n )\n for ii in range(len(representations))\n ]\n log_probabilities = [\n _log_ph_cat_proportionals\n - torch.logsumexp(\n _log_ph_cat_proportionals,\n dim=1,\n keepdim=True,\n )\n for _log_ph_cat_proportionals in log_ph_cat_proportionals\n ]\n log_p_hcat_given_z0 = [\n utils.sum_except_batch(\n log_probabilities[ii] * atoms[ii],\n representations[ii][\"mask\"],\n dim_size=representations[0][\"size\"].size(0),\n )\n for ii in range(len(representations))\n ]\n\n # for atom charge\n sigma_0_charge = sigma_0 * self.normalizer.norm_values[2]\n charges = [\n self.normalizer.unnormalize(repr[\"charge\"], ind=2)\n for repr in representations\n ]\n est_charges = [\n self.normalizer.unnormalize(_z_t[:, -1:], ind=2).long() for _z_t in z_t\n ]\n for ii in range(len(representations)):\n assert charges[ii].size() == est_charges[ii].size()\n centered_charges = [\n charges[ii] - est_charges[ii] for ii in range(len(representations))\n ]\n log_ph_charge_proportionals = [\n torch.log(\n utils.cdf_standard_gaussian(\n (centered_charges[ii] + 0.5)\n / sigma_0_charge[representations[ii][\"mask\"]]\n )\n - utils.cdf_standard_gaussian(\n (centered_charges[ii] - 0.5)\n / sigma_0_charge[representations[ii][\"mask\"]]\n )\n + epsilon\n )\n for ii in range(len(representations))\n ]\n log_p_hcharge_given_z0 = [\n utils.sum_except_batch(\n log_ph_charge_proportionals[ii],\n representations[ii][\"mask\"],\n dim_size=representations[0][\"size\"].size(0),\n )\n for ii in range(len(representations))\n ]\n\n log_p_h_given_z0 = [\n log_p_x_given_z0_without_constants,\n log_p_hcat_given_z0,\n log_p_hcharge_given_z0,\n ]\n return log_p_h_given_z0\n\n # ------ INVERSE PASS ------\n\n @torch.no_grad()\n def sample(\n self,\n n_samples: int,\n fragments_nodes: List[torch.tensor],\n conditions: Optional[Tensor] = None,\n return_frames: int = 1,\n timesteps: Optional[int] = None,\n h0: Optional[List[Tensor]] = None,\n ):\n r\"\"\"\n Draw samples from the generative model. Optionally, return intermediate\n states for visualization purposes.\n \"\"\"\n timesteps = self.T if timesteps is None else timesteps\n assert 0 < return_frames <= timesteps\n assert timesteps % return_frames == 0\n assert h0 is not None if self.pos_only else True\n\n fragments_masks = [\n get_mask_for_frag(natm_nodes) for natm_nodes in fragments_nodes\n ]\n combined_mask = torch.cat(fragments_masks)\n edge_index = get_edges_index(combined_mask, remove_self_edge=True)\n n_frag_switch = get_n_frag_switch(fragments_nodes)\n\n zt_xh = self.sample_combined_position_feature_noise(masks=fragments_masks)\n if self.pos_only:\n zt_xh = [\n torch.cat([zt_xh[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_zt_xh[:, : self.pos_dim] for _zt_xh in zt_xh],\n dim=0,\n ),\n combined_mask,\n )\n\n out_samples = [\n [\n torch.zeros((return_frames,) + _zt_xh.size(), device=_zt_xh.device)\n for _zt_xh in zt_xh\n ]\n for _ in range(return_frames)\n ]\n\n # Iteratively sample p(z_s | z_t) for t = 1, ..., T, with s = t - 1.\n for s in reversed(range(0, timesteps)):\n s_array = torch.full((n_samples, 1), fill_value=s, device=zt_xh[0].device)\n t_array = s_array + 1\n s_array = s_array / timesteps\n t_array = t_array / timesteps\n\n # print(s, zt_xh)\n\n zt_xh = self.sample_p_zs_given_zt(\n s=s_array,\n t=t_array,\n zt_xh=zt_xh,\n edge_index=edge_index,\n n_frag_switch=n_frag_switch,\n masks=fragments_masks,\n conditions=conditions,\n fix_noise=False,\n )\n if self.pos_only:\n zt_xh = [\n torch.cat([zt_xh[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n\n # save frame\n if (s * return_frames) % timesteps == 0:\n idx = (s * return_frames) // timesteps\n out_samples[idx] = self.normalizer.unnormalize_z(zt_xh)\n\n pos, cat, charge = self.sample_p_xh_given_z0(\n z0_xh=zt_xh,\n edge_index=edge_index,\n n_frag_switch=n_frag_switch,\n masks=fragments_masks,\n batch_size=n_samples,\n conditions=conditions,\n )\n if self.pos_only:\n cat = [_h0[:, :-1] for _h0 in h0]\n charge = [_h0[:, -1:] for _h0 in h0]\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_pos[:, : self.pos_dim] for _pos in pos],\n dim=0,\n ),\n combined_mask,\n )\n\n # Overwrite last frame with the resulting x and h.\n out_samples[0] = [\n torch.cat([pos[ii], cat[ii], charge[ii]], dim=1) for ii in range(len(pos))\n ]\n return out_samples, fragments_masks\n\n def sample_p_zs_given_zt(\n self,\n s: Tensor,\n t: Tensor,\n zt_xh: List[Tensor],\n edge_index: Tensor,\n n_frag_switch: Tensor,\n masks: List[Tensor],\n conditions: Optional[Tensor] = None,\n fix_noise: bool = False,\n ):\n \"\"\"Samples from zs ~ p(zs | zt). Only used during sampling.\"\"\"\n gamma_s = self.schedule.gamma_module(s)\n gamma_t = self.schedule.gamma_module(t)\n\n (\n sigma2_t_given_s,\n sigma_t_given_s,\n alpha_t_given_s,\n ) = self.schedule.sigma_and_alpha_t_given_s(gamma_t, gamma_s, zt_xh[0])\n\n sigma_s = self.schedule.sigma(gamma_s, target_tensor=zt_xh[0])\n sigma_t = self.schedule.sigma(gamma_t, target_tensor=zt_xh[0])\n\n # Neural net prediction.\n combined_mask = torch.cat(masks)\n net_eps_xh, net_eps_edge_attr = self.dynamics(\n xh=zt_xh,\n edge_index=edge_index,\n t=t,\n conditions=conditions,\n n_frag_switch=n_frag_switch,\n combined_mask=combined_mask,\n edge_attr=None, # TODO: no edge_attr is considered now\n )\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_zt_xh[:, : self.pos_dim] for _zt_xh in zt_xh],\n dim=0,\n ),\n combined_mask,\n )\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_net_eps_xh[:, : self.pos_dim] for _net_eps_xh in net_eps_xh],\n dim=0,\n ),\n combined_mask,\n )\n\n # Note: mu_{t->s} = 1 / alpha_{t|s} z_t - sigma_{t|s}^2 / sigma_t / alpha_{t|s} epsilon\n # follows from the definition of mu_{t->s} and Equ. (7) in the EDM paper\n mu = [\n zt_xh[ii] / alpha_t_given_s[masks[ii]]\n - net_eps_xh[ii] * (sigma2_t_given_s / alpha_t_given_s / sigma_t)[masks[ii]]\n for ii in range(len(zt_xh))\n ]\n\n # Compute sigma for p(zs | zt).\n sigma = sigma_t_given_s * sigma_s / sigma_t\n\n # Sample zs given the paramters derived from zt.\n zs_xh = self.sample_normal(mu=mu, sigma=sigma, masks=masks, fix_noise=fix_noise)\n\n # Project down to avoid numerical runaway of the center of gravity.\n for ii in range(len(masks)):\n zs_xh[ii][:, : self.pos_dim] = utils.remove_mean_batch(\n zs_xh[ii][:, : self.pos_dim],\n masks[ii],\n )\n return zs_xh\n\n def sample_normal(\n self,\n mu: List[Tensor],\n sigma: Tensor,\n masks: List[Tensor],\n fix_noise: bool = False,\n ) -> List[Tensor]:\n r\"\"\"Samples from a Normal distribution.\"\"\"\n if fix_noise:\n # bs = 1 if fix_noise else mu.size(0)\n raise NotImplementedError(\"fix_noise option isn't implemented yet\")\n eps_xh = self.sample_combined_position_feature_noise(masks=masks)\n zs_xh = [mu[ii] + sigma[masks[ii]] * eps_xh[ii] for ii in range(len(masks))]\n return zs_xh\n\n def sample_p_xh_given_z0(\n self,\n z0_xh: List[Tensor],\n edge_index: Tensor,\n n_frag_switch: Tensor,\n masks: List[Tensor],\n batch_size: int,\n conditions: Optional[Tensor] = None,\n fix_noise: bool = False,\n ) -> Tuple[List[Tensor]]:\n \"\"\"Samples x ~ p(x|z0).\"\"\"\n t_zeros = torch.zeros(size=(batch_size, 1), device=z0_xh[0].device)\n gamma_0 = self.schedule.gamma_module(t_zeros)\n # Computes sqrt(sigma_0^2 / alpha_0^2)\n sigma_x = self.schedule.SNR(-0.5 * gamma_0)\n net_eps_xh, net_eps_edge_attr = self.dynamics(\n xh=z0_xh,\n edge_index=edge_index,\n t=t_zeros,\n conditions=conditions,\n n_frag_switch=n_frag_switch,\n combined_mask=torch.cat(masks),\n edge_attr=None, # TODO: no edge_attr is considered now\n )\n\n # Compute mu for p(zs | zt).\n mu_x = self.compute_x_pred(\n net_eps_xh=net_eps_xh,\n zt_xh=z0_xh,\n gamma_t=gamma_0,\n masks=masks,\n )\n x0_xh = self.sample_normal(\n mu=mu_x, sigma=sigma_x, masks=masks, fix_noise=fix_noise\n )\n\n pos_0 = [\n self.normalizer.unnormalize(x0_xh[ii][:, : self.pos_dim], ii)\n for ii in range(len(masks))\n ]\n cat_0 = [\n self.normalizer.unnormalize(x0_xh[ii][:, self.pos_dim : -1], ii)\n for ii in range(len(masks))\n ]\n charge_0 = [\n torch.round(self.normalizer.unnormalize(x0_xh[ii][:, -1:], ii)).long()\n for ii in range(len(masks))\n ]\n\n cat_0 = [\n F.one_hot(torch.argmax(cat_0[ii], dim=1), self.node_nfs[ii] - 4).long()\n for ii in range(len(masks))\n ]\n return pos_0, cat_0, charge_0\n\n def compute_x_pred(\n self,\n net_eps_xh: List[Tensor],\n zt_xh: List[Tensor],\n gamma_t: Tensor,\n masks: List[Tensor],\n ) -> List[Tensor]:\n \"\"\"Commputes x_pred, i.e. the most likely prediction of x.\"\"\"\n sigma_t = self.schedule.sigma(gamma_t, target_tensor=net_eps_xh[0])\n alpha_t = self.schedule.alpha(gamma_t, target_tensor=net_eps_xh[0])\n x_pred = [\n 1.0 / alpha_t[masks[ii]] * (zt_xh[ii] - sigma_t[masks[ii]] * net_eps_xh[ii])\n for ii in range(len(masks))\n ]\n return x_pred\n\n # ------ INPAINT ------\n @torch.no_grad()\n def inpaint(\n self,\n n_samples: int,\n fragments_nodes: List[torch.tensor],\n conditions: Optional[Tensor] = None,\n return_frames: int = 1,\n resamplings: int = 1,\n jump_length: int = 1,\n timesteps: Optional[int] = None,\n xh_fixed: Optional[List[Tensor]] = None,\n frag_fixed: Optional[List] = None,\n ):\n r\"\"\"\n Draw samples from the generative model. Optionally, return intermediate\n states for visualization purposes.\n \"\"\"\n timesteps = self.T if timesteps is None else timesteps\n assert 0 < return_frames <= timesteps\n assert timesteps % return_frames == 0\n assert len(xh_fixed)\n\n fragments_masks = [\n get_mask_for_frag(natm_nodes) for natm_nodes in fragments_nodes\n ]\n combined_mask = torch.cat(fragments_masks)\n edge_index = get_edges_index(combined_mask, remove_self_edge=True)\n n_frag_switch = get_n_frag_switch(fragments_nodes)\n\n h0 = [_xh_fixed[:, self.pos_dim :].long() for _xh_fixed in xh_fixed]\n\n for ii, _ in enumerate(xh_fixed):\n xh_fixed[ii][:, : self.pos_dim] = utils.remove_mean_batch(\n xh_fixed[ii][:, : self.pos_dim],\n fragments_masks[ii],\n )\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_xh_fixed[:, : self.pos_dim] for _xh_fixed in xh_fixed],\n dim=0,\n ),\n combined_mask,\n )\n\n zt_xh = self.sample_combined_position_feature_noise(masks=fragments_masks)\n if self.pos_only:\n zt_xh = [\n torch.cat([zt_xh[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_zt_xh[:, : self.pos_dim] for _zt_xh in zt_xh],\n dim=0,\n ),\n combined_mask,\n )\n\n out_samples = [\n [\n torch.zeros((return_frames,) + _zt_xh.size(), device=_zt_xh.device)\n for _zt_xh in zt_xh\n ]\n for _ in range(return_frames)\n ]\n\n schedule = get_repaint_schedule(resamplings, jump_length, timesteps)\n s = timesteps - 1\n for i, n_denoise_steps in enumerate(schedule):\n for j in range(n_denoise_steps):\n s_array = torch.full(\n (n_samples, 1), fill_value=s, device=zt_xh[0].device\n )\n t_array = s_array + 1\n s_array = s_array / timesteps\n t_array = t_array / timesteps\n\n gamma_s = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(s_array), xh_fixed[0]\n )\n\n zt_known, _ = self.noised_representation(\n xh_fixed, fragments_masks, gamma_s\n )\n zt_unknown = self.sample_p_zs_given_zt(\n s=s_array,\n t=t_array,\n zt_xh=zt_xh,\n edge_index=edge_index,\n n_frag_switch=n_frag_switch,\n masks=fragments_masks,\n conditions=conditions,\n fix_noise=False,\n )\n\n if self.pos_only:\n zt_known = [\n torch.cat([zt_known[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n zt_unknown = [\n torch.cat([zt_unknown[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n\n zt_xh = [\n zt_known[ii] if ii in frag_fixed else zt_unknown[ii]\n for ii in range(len(h0))\n ]\n\n # Noise combined representation, i.e., resample\n if j == n_denoise_steps - 1 and i < len(schedule) - 1:\n # Go back jump_length steps\n t = s + jump_length\n t_array = torch.full(\n (n_samples, 1), fill_value=t, device=zt_xh[0].device\n )\n t_array = t_array / timesteps\n\n gamma_s = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(s_array), xh_fixed[0]\n )\n gamma_t = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(t_array), xh_fixed[0]\n )\n\n zt_xh = self.sample_p_zt_given_zs(\n zt_xh, fragments_masks, gamma_t, gamma_s\n )\n s = t\n\n s = s - 1\n\n # # save frame\n # if (s * return_frames) % timesteps == 0:\n # idx = (s * return_frames) // timesteps\n # out_samples[idx] = self.normalizer.unnormalize_z(zt_xh)\n\n pos, cat, charge = self.sample_p_xh_given_z0(\n z0_xh=zt_xh,\n edge_index=edge_index,\n n_frag_switch=n_frag_switch,\n masks=fragments_masks,\n batch_size=n_samples,\n conditions=conditions,\n )\n if self.pos_only:\n cat = [_h0[:, :-1] for _h0 in h0]\n charge = [_h0[:, -1:] for _h0 in h0]\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_pos[:, : self.pos_dim] for _pos in pos],\n dim=0,\n ),\n combined_mask,\n )\n\n # Overwrite last frame with the resulting x and h.\n out_samples[0] = [\n torch.cat([pos[ii], cat[ii], charge[ii]], dim=1) for ii in range(len(pos))\n ]\n return out_samples, fragments_masks\n\n # ------ INPAINT ------\n @torch.no_grad()\n def inpaint_fixed(\n self,\n n_samples: int,\n fragments_nodes: List[torch.tensor],\n conditions: Optional[Tensor] = None,\n return_frames: int = 1,\n resamplings: int = 1,\n jump_length: int = 1,\n timesteps: Optional[int] = None,\n xh_fixed: Optional[List[Tensor]] = None,\n frag_fixed: Optional[List] = None,\n ):\n r\"\"\"\n Draw samples from the generative model. Optionally, return intermediate\n states for visualization purposes.\n \"\"\"\n timesteps = self.T if timesteps is None else timesteps\n assert 0 < return_frames <= timesteps\n assert timesteps % return_frames == 0\n assert len(xh_fixed)\n\n fragments_masks = [\n get_mask_for_frag(natm_nodes) for natm_nodes in fragments_nodes\n ]\n combined_mask = torch.cat(fragments_masks)\n edge_index = get_edges_index(combined_mask, remove_self_edge=True)\n n_frag_switch = get_n_frag_switch(fragments_nodes)\n\n h0 = [_xh_fixed[:, self.pos_dim :].long() for _xh_fixed in xh_fixed]\n\n for ii, _ in enumerate(xh_fixed):\n xh_fixed[ii][:, : self.pos_dim] = utils.remove_mean_batch(\n xh_fixed[ii][:, : self.pos_dim],\n fragments_masks[ii],\n )\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_xh_fixed[:, : self.pos_dim] for _xh_fixed in xh_fixed],\n dim=0,\n ),\n combined_mask,\n )\n\n zt_xh = self.sample_combined_position_feature_noise(masks=fragments_masks)\n if self.pos_only:\n zt_xh = [\n torch.cat([zt_xh[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_zt_xh[:, : self.pos_dim] for _zt_xh in zt_xh],\n dim=0,\n ),\n combined_mask,\n )\n\n out_samples = [\n [\n torch.zeros((return_frames,) + _zt_xh.size(), device=_zt_xh.device)\n for _zt_xh in zt_xh\n ]\n for _ in range(return_frames)\n ]\n\n schedule = get_repaint_schedule(resamplings, jump_length, timesteps)\n s = timesteps - 1\n for i, n_denoise_steps in enumerate(schedule):\n for j in range(n_denoise_steps):\n s_array = torch.full(\n (n_samples, 1), fill_value=s, device=zt_xh[0].device\n )\n t_array = s_array + 1\n s_array = s_array / timesteps\n t_array = t_array / timesteps\n\n gamma_s = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(s_array), xh_fixed[0]\n )\n\n zt_known, _ = self.noised_representation(\n xh_fixed, fragments_masks, gamma_s\n )\n zt_unknown = self.sample_p_zs_given_zt(\n s=s_array,\n t=t_array,\n zt_xh=zt_xh,\n edge_index=edge_index,\n n_frag_switch=n_frag_switch,\n masks=fragments_masks,\n conditions=conditions,\n fix_noise=False,\n )\n\n if self.pos_only:\n zt_known = [\n torch.cat([zt_known[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n zt_unknown = [\n torch.cat([zt_unknown[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n\n zt_xh = [\n zt_known[ii] if ii in frag_fixed else zt_unknown[ii]\n for ii in range(len(h0))\n ]\n\n # Noise combined representation, i.e., resample\n if j == n_denoise_steps - 1 and i < len(schedule) - 1:\n # Go back jump_length steps\n t = s + jump_length\n t_array = torch.full(\n (n_samples, 1), fill_value=t, device=zt_xh[0].device\n )\n t_array = t_array / timesteps\n\n gamma_s = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(s_array), xh_fixed[0]\n )\n gamma_t = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(t_array), xh_fixed[0]\n )\n\n zt_xh = self.sample_p_zt_given_zs(\n zt_xh, fragments_masks, gamma_t, gamma_s\n )\n s = t\n\n s = s - 1\n\n # # save frame\n # if (s * return_frames) % timesteps == 0:\n # idx = (s * return_frames) // timesteps\n # out_samples[idx] = self.normalizer.unnormalize_z(zt_xh)\n\n pos, cat, charge = self.sample_p_xh_given_z0(\n z0_xh=zt_xh,\n edge_index=edge_index,\n n_frag_switch=n_frag_switch,\n masks=fragments_masks,\n batch_size=n_samples,\n conditions=conditions,\n )\n if self.pos_only:\n cat = [_h0[:, :-1] for _h0 in h0]\n charge = [_h0[:, -1:] for _h0 in h0]\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_pos[:, : self.pos_dim] for _pos in pos],\n dim=0,\n ),\n combined_mask,\n )\n\n # Overwrite last frame with the resulting x and h.\n out_samples[0] = [\n torch.cat([pos[ii], cat[ii], charge[ii]], dim=1) for ii in range(len(pos))\n ]\n return out_samples, fragments_masks\n\n def sample_p_zt_given_zs(\n self,\n zs: List[Tensor],\n masks: List[Tensor],\n gamma_t: Tensor,\n gamma_s: Tensor,\n fix_noise: bool = False,\n ) -> List[Tensor]:\n (\n sigma2_t_given_s,\n sigma_t_given_s,\n alpha_t_given_s,\n ) = self.schedule.sigma_and_alpha_t_given_s(gamma_t, gamma_s, zs[0])\n\n mu = [alpha_t_given_s[masks[ii]] * zs[ii] for ii in range(len(masks))]\n zt = self.sample_normal(\n mu=mu, sigma=sigma_t_given_s, masks=masks, fix_noise=fix_noise\n )\n\n for ii in range(len(masks)):\n zt[ii][:, : self.pos_dim] = utils.remove_mean_batch(\n zt[ii][:, : self.pos_dim],\n masks[ii],\n )\n return zt" }, { "identifier": "average_over_batch_metrics", "path": "oa_reactdiff/trainer/_metrics.py", "snippet": "def average_over_batch_metrics(batch_metrics: List[Dict], allowed: List = []):\n epoch_metrics = {}\n effective_batch = {}\n for ii, out in enumerate(batch_metrics):\n for k, v in out.items():\n if not (k in allowed or len(allowed) == 0):\n continue\n if ii == 0:\n epoch_metrics[k] = v\n effective_batch[k] = 1\n else:\n if not np.isnan(v):\n epoch_metrics[k] += v\n effective_batch[k] += 1\n for k in epoch_metrics:\n epoch_metrics[k] /= effective_batch[k]\n return epoch_metrics" }, { "identifier": "pretty_print", "path": "oa_reactdiff/trainer/_metrics.py", "snippet": "def pretty_print(epoch, metric_dict, prefix=\"Train\"):\n out = f\"{prefix} epoch {epoch} \"\n for k, v in metric_dict.items():\n out += f\"{k} {v:.2f} \"\n print(out)" }, { "identifier": "batch_rmsd", "path": "oa_reactdiff/analyze/rmsd.py", "snippet": "def batch_rmsd(\n fragments_nodes: List[Tensor],\n out_samples: List[Tensor],\n xh: List[Tensor],\n idx: int = 1,\n threshold=0.5,\n):\n rmsds = []\n out_samples_use = out_samples[idx]\n xh_use = xh[idx]\n nodes = fragments_nodes[idx].long().cpu().numpy()\n start_ind, end_ind = 0, 0\n for jj, natoms in enumerate(nodes):\n end_ind += natoms\n mol1 = xh2pmg(out_samples_use[start_ind:end_ind])\n mol2 = xh2pmg(xh_use[start_ind:end_ind])\n try:\n rmsd = pymatgen_rmsd(mol1, mol2, ignore_chirality=True, threshold=threshold)\n except:\n rmsd = 1.0\n rmsds.append(min(rmsd, 1.0))\n start_ind = end_ind\n return rmsds" } ]
from typing import Dict, List, Optional, Tuple from pathlib import Path from torch import nn from torch.utils.data import DataLoader from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, StepLR from pytorch_lightning import LightningModule from torchmetrics.classification import ( BinaryAccuracy, BinaryAUROC, BinaryF1Score, BinaryPrecision, BinaryCohenKappa, ) from torchmetrics import PearsonCorrCoef, SpearmanCorrCoef, MeanAbsoluteError from oa_reactdiff.dataset import ( ProcessedQM9, ProcessedDoubleQM9, ProcessedTripleQM9, ProcessedTS1x, ) from oa_reactdiff.dynamics import EGNNDynamics, Confidence from oa_reactdiff.diffusion._schedule import DiffSchedule, PredefinedNoiseSchedule from oa_reactdiff.diffusion._normalizer import Normalizer, FEATURE_MAPPING from oa_reactdiff.diffusion.en_diffusion import EnVariationalDiffusion from oa_reactdiff.trainer._metrics import average_over_batch_metrics, pretty_print from oa_reactdiff.analyze.rmsd import batch_rmsd import torch import copy import torch.nn.functional as F import numpy as np import pandas as pd import oa_reactdiff.utils.training_tools as utils
20,037
PROCESS_FUNC = { "QM9": ProcessedQM9, "DoubleQM9": ProcessedDoubleQM9, "TripleQM9": ProcessedTripleQM9, "TS1x": ProcessedTS1x, } FILE_TYPE = { "QM9": ".npz", "DoubleQM9": ".npz", "TripleQM9": ".npz", "TS1x": ".pkl", } LR_SCHEDULER = { "cos": CosineAnnealingWarmRestarts, "step": StepLR, } class DDPMModule(LightningModule): def __init__( self, model_config: Dict, optimizer_config: Dict, training_config: Dict, node_nfs: List[int] = [9] * 3, edge_nf: int = 4, condition_nf: int = 3, fragment_names: List[str] = ["inorg_node", "org_edge", "org_node"], pos_dim: int = 3, update_pocket_coords: bool = True, condition_time: bool = True, edge_cutoff: Optional[float] = None, norm_values: Tuple = (1.0, 1.0, 1.0), norm_biases: Tuple = (0.0, 0.0, 0.0), noise_schedule: str = "polynomial_2", timesteps: int = 1000, precision: float = 1e-5, loss_type: str = "l2", pos_only: bool = False, process_type: Optional[str] = None, model: nn.Module = None, enforce_same_encoding: Optional[List] = None, scales: List[float] = [1.0, 1.0, 1.0], eval_epochs: int = 20, source: Optional[Dict] = None, fixed_idx: Optional[List] = None, ) -> None: super().__init__() egnn_dynamics = EGNNDynamics( model_config=model_config, node_nfs=node_nfs, edge_nf=edge_nf, condition_nf=condition_nf, fragment_names=fragment_names, pos_dim=pos_dim, update_pocket_coords=update_pocket_coords, condition_time=condition_time, edge_cutoff=edge_cutoff, model=model, enforce_same_encoding=enforce_same_encoding, source=source, ) normalizer = Normalizer( norm_values=norm_values, norm_biases=norm_biases, pos_dim=pos_dim, ) gamma_module = PredefinedNoiseSchedule( noise_schedule=noise_schedule, timesteps=timesteps, precision=precision, )
PROCESS_FUNC = { "QM9": ProcessedQM9, "DoubleQM9": ProcessedDoubleQM9, "TripleQM9": ProcessedTripleQM9, "TS1x": ProcessedTS1x, } FILE_TYPE = { "QM9": ".npz", "DoubleQM9": ".npz", "TripleQM9": ".npz", "TS1x": ".pkl", } LR_SCHEDULER = { "cos": CosineAnnealingWarmRestarts, "step": StepLR, } class DDPMModule(LightningModule): def __init__( self, model_config: Dict, optimizer_config: Dict, training_config: Dict, node_nfs: List[int] = [9] * 3, edge_nf: int = 4, condition_nf: int = 3, fragment_names: List[str] = ["inorg_node", "org_edge", "org_node"], pos_dim: int = 3, update_pocket_coords: bool = True, condition_time: bool = True, edge_cutoff: Optional[float] = None, norm_values: Tuple = (1.0, 1.0, 1.0), norm_biases: Tuple = (0.0, 0.0, 0.0), noise_schedule: str = "polynomial_2", timesteps: int = 1000, precision: float = 1e-5, loss_type: str = "l2", pos_only: bool = False, process_type: Optional[str] = None, model: nn.Module = None, enforce_same_encoding: Optional[List] = None, scales: List[float] = [1.0, 1.0, 1.0], eval_epochs: int = 20, source: Optional[Dict] = None, fixed_idx: Optional[List] = None, ) -> None: super().__init__() egnn_dynamics = EGNNDynamics( model_config=model_config, node_nfs=node_nfs, edge_nf=edge_nf, condition_nf=condition_nf, fragment_names=fragment_names, pos_dim=pos_dim, update_pocket_coords=update_pocket_coords, condition_time=condition_time, edge_cutoff=edge_cutoff, model=model, enforce_same_encoding=enforce_same_encoding, source=source, ) normalizer = Normalizer( norm_values=norm_values, norm_biases=norm_biases, pos_dim=pos_dim, ) gamma_module = PredefinedNoiseSchedule( noise_schedule=noise_schedule, timesteps=timesteps, precision=precision, )
schedule = DiffSchedule(gamma_module=gamma_module, norm_values=norm_values)
6
2023-10-30 02:53:38+00:00
24k
nv-tlabs/pacer
poselib/poselib/skeleton/tests/test_skeleton.py
[ { "identifier": "SkeletonTree", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonTree(Serializable):\n \"\"\"\n A skeleton tree gives a complete description of a rigid skeleton. It describes a tree structure\n over a list of nodes with their names indicated by strings. Each edge in the tree has a local\n translation associated with it which describes the distance between the two nodes that it\n connects. \n\n Basic Usage:\n >>> t = SkeletonTree.from_mjcf(SkeletonTree.__example_mjcf_path__)\n >>> t\n SkeletonTree(\n node_names=['torso', 'front_left_leg', 'aux_1', 'front_left_foot', 'front_right_leg', 'aux_2', 'front_right_foot', 'left_back_leg', 'aux_3', 'left_back_foot', 'right_back_leg', 'aux_4', 'right_back_foot'],\n parent_indices=tensor([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11]),\n local_translation=tensor([[ 0.0000, 0.0000, 0.7500],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, -0.2000, 0.0000],\n [ 0.2000, -0.2000, 0.0000]])\n )\n >>> t.node_names\n ['torso', 'front_left_leg', 'aux_1', 'front_left_foot', 'front_right_leg', 'aux_2', 'front_right_foot', 'left_back_leg', 'aux_3', 'left_back_foot', 'right_back_leg', 'aux_4', 'right_back_foot']\n >>> t.parent_indices\n tensor([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11])\n >>> t.local_translation\n tensor([[ 0.0000, 0.0000, 0.7500],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, -0.2000, 0.0000],\n [ 0.2000, -0.2000, 0.0000]])\n >>> t.parent_of('front_left_leg')\n 'torso'\n >>> t.index('front_right_foot')\n 6\n >>> t[2]\n 'aux_1'\n \"\"\"\n\n __example_mjcf_path__ = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), \"tests/ant.xml\"\n )\n\n def __init__(self, node_names, parent_indices, local_translation, local_xml_rotation):\n \"\"\"\n :param node_names: a list of names for each tree node\n :type node_names: List[str]\n :param parent_indices: an int32-typed tensor that represents the edge to its parent.\\\n -1 represents the root node\n :type parent_indices: Tensor\n :param local_translation: a 3d vector that gives local translation information\n :type local_translation: Tensor\n \"\"\"\n ln, lp, ll = len(node_names), len(parent_indices), len(local_translation)\n assert len(set((ln, lp, ll))) == 1\n self._node_names = node_names\n self._parent_indices = parent_indices.long()\n self._local_translation = local_translation\n self._local_xml_rotation = local_xml_rotation\n self._node_indices = {self.node_names[i]: i for i in range(len(self))}\n\n def __len__(self):\n \"\"\" number of nodes in the skeleton tree \"\"\"\n return len(self.node_names)\n\n def __iter__(self):\n \"\"\" iterator that iterate through the name of each node \"\"\"\n yield from self.node_names\n\n def __getitem__(self, item):\n \"\"\" get the name of the node given the index \"\"\"\n return self.node_names[item]\n\n def __repr__(self):\n return (\n \"SkeletonTree(\\n node_names={},\\n parent_indices={},\"\n \"\\n local_translation={}\\n)\".format(\n self._indent(repr(self.node_names)),\n self._indent(repr(self.parent_indices)),\n self._indent(repr(self.local_translation)),\n )\n )\n\n def _indent(self, s):\n return \"\\n \".join(s.split(\"\\n\"))\n\n @property\n def node_names(self):\n return self._node_names\n\n @property\n def parent_indices(self):\n return self._parent_indices\n\n @property\n def local_translation(self):\n return self._local_translation\n\n @property\n def num_joints(self):\n \"\"\" number of nodes in the skeleton tree \"\"\"\n return len(self)\n\n @classmethod\n def from_dict(cls, dict_repr, *args, **kwargs):\n return cls(\n list(map(str, dict_repr[\"node_names\"])),\n TensorUtils.from_dict(dict_repr[\"parent_indices\"], *args,\n **kwargs),\n TensorUtils.from_dict(dict_repr[\"local_translation\"], *args,\n **kwargs),\n TensorUtils.from_dict(dict_repr[\"local_xml_rotation\"], *args,\n **kwargs),\n )\n\n def to_dict(self):\n return OrderedDict(\n [\n (\"node_names\", self.node_names),\n (\"parent_indices\", tensor_to_dict(self.parent_indices)),\n (\"local_translation\", tensor_to_dict(self.local_translation)),\n (\"local_xml_rotation\", tensor_to_dict(self._local_xml_rotation)),\n ]\n )\n\n @classmethod\n def from_mjcf(cls, path: str) -> \"SkeletonTree\":\n \"\"\"\n Parses a mujoco xml scene description file and returns a Skeleton Tree.\n We use the model attribute at the root as the name of the tree.\n \n :param path:\n :type path: string\n :return: The skeleton tree constructed from the mjcf file\n :rtype: SkeletonTree\n \"\"\"\n tree = ET.parse(path)\n xml_doc_root = tree.getroot()\n xml_world_body = xml_doc_root.find(\"worldbody\")\n if xml_world_body is None:\n raise ValueError(\"MJCF parsed incorrectly please verify it.\")\n # assume this is the root\n xml_body_root = xml_world_body.find(\"body\")\n if xml_body_root is None:\n raise ValueError(\"MJCF parsed incorrectly please verify it.\")\n\n node_names = []\n parent_indices = []\n local_translation = []\n local_xml_rotation = []\n\n # recursively adding all nodes into the skel_tree\n def _add_xml_node(xml_node, parent_index, node_index):\n node_name = xml_node.attrib.get(\"name\")\n # parse the local translation into float list\n pos = np.fromstring(xml_node.attrib.get(\"pos\"), dtype=float, sep=\" \")\n quat = np.fromstring(xml_node.attrib.get(\"quat\", \"1 0 0 0\"), dtype=float, sep=\" \")[[1, 2, 3, 0]]\n node_names.append(node_name)\n parent_indices.append(parent_index)\n local_translation.append(pos)\n local_xml_rotation.append(quat)\n curr_index = node_index\n node_index += 1\n for next_node in xml_node.findall(\"body\"):\n node_index = _add_xml_node(next_node, curr_index, node_index)\n return node_index\n\n _add_xml_node(xml_body_root, -1, 0)\n\n return cls(\n node_names,\n torch.from_numpy(np.array(parent_indices, dtype=np.int32)),\n torch.from_numpy(np.array(local_translation, dtype=np.float32)),\n torch.from_numpy(np.array(local_xml_rotation, dtype=np.float32)),\n )\n\n def parent_of(self, node_name):\n \"\"\" get the name of the parent of the given node\n\n :param node_name: the name of the node\n :type node_name: string\n :rtype: string\n \"\"\"\n return self[int(self.parent_indices[self.index(node_name)].item())]\n\n def index(self, node_name):\n \"\"\" get the index of the node\n \n :param node_name: the name of the node\n :type node_name: string\n :rtype: int\n \"\"\"\n return self._node_indices[node_name]\n\n def drop_nodes_by_names(\n self, node_names: List[str], pairwise_translation=None\n ) -> \"SkeletonTree\":\n new_length = len(self) - len(node_names)\n new_node_names = []\n new_local_translation = torch.zeros(\n new_length, 3, dtype=self.local_translation.dtype\n )\n new_parent_indices = torch.zeros(new_length, dtype=self.parent_indices.dtype)\n parent_indices = self.parent_indices.numpy()\n new_node_indices: dict = {}\n new_node_index = 0\n for node_index in range(len(self)):\n if self[node_index] in node_names:\n continue\n tb_node_index = parent_indices[node_index]\n if tb_node_index != -1:\n local_translation = self.local_translation[node_index, :]\n while tb_node_index != -1 and self[tb_node_index] in node_names:\n local_translation += self.local_translation[tb_node_index, :]\n tb_node_index = parent_indices[tb_node_index]\n assert tb_node_index != -1, \"the root node cannot be dropped\"\n\n if pairwise_translation is not None:\n local_translation = pairwise_translation[\n tb_node_index, node_index, :\n ]\n else:\n local_translation = self.local_translation[node_index, :]\n\n new_node_names.append(self[node_index])\n new_local_translation[new_node_index, :] = local_translation\n if tb_node_index == -1:\n new_parent_indices[new_node_index] = -1\n else:\n new_parent_indices[new_node_index] = new_node_indices[\n self[tb_node_index]\n ]\n new_node_indices[self[node_index]] = new_node_index\n new_node_index += 1\n\n return SkeletonTree(new_node_names, new_parent_indices, new_local_translation)\n\n def keep_nodes_by_names(\n self, node_names: List[str], pairwise_translation=None\n ) -> \"SkeletonTree\":\n nodes_to_drop = list(filter(lambda x: x not in node_names, self))\n return self.drop_nodes_by_names(nodes_to_drop, pairwise_translation)" }, { "identifier": "SkeletonState", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonState(Serializable):\n \"\"\"\n A skeleton state contains all the information needed to describe a static state of a skeleton.\n It requires a skeleton tree, local/global rotation at each joint and the root translation.\n\n Example:\n >>> t = SkeletonTree.from_mjcf(SkeletonTree.__example_mjcf_path__)\n >>> zero_pose = SkeletonState.zero_pose(t)\n >>> plot_skeleton_state(zero_pose) # can be imported from `.visualization.common`\n [plot of the ant at zero pose\n >>> local_rotation = zero_pose.local_rotation.clone()\n >>> local_rotation[2] = torch.tensor([0, 0, 1, 0])\n >>> new_pose = SkeletonState.from_rotation_and_root_translation(\n ... skeleton_tree=t,\n ... r=local_rotation,\n ... t=zero_pose.root_translation,\n ... is_local=True\n ... )\n >>> new_pose.local_rotation\n tensor([[0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 1., 0., 0.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.]])\n >>> plot_skeleton_state(new_pose) # you should be able to see one of ant's leg is bent\n [plot of the ant with the new pose\n >>> new_pose.global_rotation # the local rotation is propagated to the global rotation at joint #3\n tensor([[0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 1., 0., 0.],\n [0., 1., 0., 0.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.]])\n\n Global/Local Representation (cont. from the previous example)\n >>> new_pose.is_local\n True\n >>> new_pose.tensor # this will return the local rotation followed by the root translation\n tensor([0., 0., 0., 1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0.,\n 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.,\n 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0.,\n 0.])\n >>> new_pose.tensor.shape # 4 * 13 (joint rotation) + 3 (root translatio\n torch.Size([55])\n >>> new_pose.global_repr().is_local\n False\n >>> new_pose.global_repr().tensor # this will return the global rotation followed by the root translation instead\n tensor([0., 0., 0., 1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 1., 0., 0., 0., 0.,\n 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.,\n 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0.,\n 0.])\n >>> new_pose.global_repr().tensor.shape # 4 * 13 (joint rotation) + 3 (root translation\n torch.Size([55])\n \"\"\"\n\n def __init__(self, tensor_backend, skeleton_tree, is_local):\n self._skeleton_tree = skeleton_tree\n self._is_local = is_local\n self.tensor = tensor_backend.clone()\n\n def __len__(self):\n return self.tensor.shape[0]\n\n @property\n def rotation(self):\n if not hasattr(self, \"_rotation\"):\n self._rotation = self.tensor[..., : self.num_joints * 4].reshape(\n *(self.tensor.shape[:-1] + (self.num_joints, 4))\n )\n return self._rotation\n\n @property\n def _local_rotation(self):\n if self._is_local:\n return self.rotation\n else:\n return None\n\n @property\n def _global_rotation(self):\n if not self._is_local:\n return self.rotation\n else:\n return None\n\n @property\n def is_local(self):\n \"\"\" is the rotation represented in local frame? \n \n :rtype: bool\n \"\"\"\n return self._is_local\n\n @property\n def invariant_property(self):\n return {\"skeleton_tree\": self.skeleton_tree, \"is_local\": self.is_local}\n\n @property\n def num_joints(self):\n \"\"\" number of joints in the skeleton tree \n \n :rtype: int\n \"\"\"\n return self.skeleton_tree.num_joints\n\n @property\n def skeleton_tree(self):\n \"\"\" skeleton tree \n \n :rtype: SkeletonTree\n \"\"\"\n return self._skeleton_tree\n\n @property\n def root_translation(self):\n \"\"\" root translation \n \n :rtype: Tensor\n \"\"\"\n if not hasattr(self, \"_root_translation\"):\n self._root_translation = self.tensor[\n ..., self.num_joints * 4 : self.num_joints * 4 + 3\n ]\n return self._root_translation\n\n @property\n def global_transformation(self):\n \"\"\" global transformation of each joint (transform from joint frame to global frame) \"\"\"\n # Forward kinemaitcs.\n \n if not hasattr(self, \"_global_transformation\"):\n local_transformation = self.local_transformation.clone()\n global_transformation = []\n parent_indices = self.skeleton_tree.parent_indices.numpy()\n # global_transformation = local_transformation.identity_like()\n \n local_transformation[..., :4] = quat_mul(\n self.skeleton_tree._local_xml_rotation,\n local_transformation[..., :4])\n\n for node_index in range(len(self.skeleton_tree)):\n parent_index = parent_indices[node_index]\n if parent_index == -1:\n global_transformation.append(\n local_transformation[..., node_index, :]\n )\n else:\n # Here to factor in the local xml rotation\n\n global_transformation.append(\n transform_mul(\n global_transformation[parent_index],\n local_transformation[..., node_index, :],\n )\n )\n self._global_transformation = torch.stack(global_transformation, axis=-2)\n return self._global_transformation\n\n @property\n def global_rotation(self):\n \"\"\" global rotation of each joint (rotation matrix to rotate from joint's F.O.R to global\n F.O.R) \"\"\"\n if self._global_rotation is None:\n if not hasattr(self, \"_comp_global_rotation\"):\n self._comp_global_rotation = transform_rotation(\n self.global_transformation\n )\n return self._comp_global_rotation\n else:\n return self._global_rotation\n\n @property\n def global_translation(self):\n \"\"\" global translation of each joint \"\"\"\n if not hasattr(self, \"_global_translation\"):\n self._global_translation = transform_translation(self.global_transformation)\n return self._global_translation\n\n @property\n def global_translation_xy(self):\n \"\"\" global translation in xy \"\"\"\n trans_xy_data = self.global_translation.zeros_like()\n trans_xy_data[..., 0:2] = self.global_translation[..., 0:2]\n return trans_xy_data\n\n @property\n def global_translation_xz(self):\n \"\"\" global translation in xz \"\"\"\n trans_xz_data = self.global_translation.zeros_like()\n trans_xz_data[..., 0:1] = self.global_translation[..., 0:1]\n trans_xz_data[..., 2:3] = self.global_translation[..., 2:3]\n return trans_xz_data\n\n @property\n def local_rotation(self):\n \"\"\" the rotation from child frame to parent frame given in the order of child nodes appeared\n in `.skeleton_tree.node_names` \"\"\"\n if self._local_rotation is None:\n if not hasattr(self, \"_comp_local_rotation\"):\n local_rotation = quat_identity_like(self.global_rotation)\n for node_index in range(len(self.skeleton_tree)):\n parent_index = self.skeleton_tree.parent_indices[node_index]\n if parent_index == -1:\n local_rotation[..., node_index, :] = self.global_rotation[\n ..., node_index, :\n ]\n else:\n local_rotation[..., node_index, :] = quat_mul_norm(\n quat_inverse(self.global_rotation[..., parent_index, :]),\n self.global_rotation[..., node_index, :],\n )\n self._comp_local_rotation = local_rotation\n return self._comp_local_rotation\n else:\n return self._local_rotation\n\n @property\n def local_transformation(self):\n \"\"\" local translation + local rotation. It describes the transformation from child frame to \n parent frame given in the order of child nodes appeared in `.skeleton_tree.node_names` \"\"\"\n if not hasattr(self, \"_local_transformation\"):\n self._local_transformation = transform_from_rotation_translation(\n r=self.local_rotation, t=self.local_translation\n )\n return self._local_transformation\n\n @property\n def local_translation(self):\n \"\"\" local translation of the skeleton state. It is identical to the local translation in\n `.skeleton_tree.local_translation` except the root translation. The root translation is\n identical to `.root_translation` \"\"\"\n if not hasattr(self, \"_local_translation\"):\n broadcast_shape = (\n tuple(self.tensor.shape[:-1])\n + (len(self.skeleton_tree),)\n + tuple(self.skeleton_tree.local_translation.shape[-1:])\n )\n local_translation = self.skeleton_tree.local_translation.broadcast_to(\n *broadcast_shape\n ).clone()\n local_translation[..., 0, :] = self.root_translation\n self._local_translation = local_translation\n return self._local_translation\n\n # Root Properties\n @property\n def root_translation_xy(self):\n \"\"\" root translation on xy \"\"\"\n if not hasattr(self, \"_root_translation_xy\"):\n self._root_translation_xy = self.global_translation_xy[..., 0, :]\n return self._root_translation_xy\n\n @property\n def global_root_rotation(self):\n \"\"\" root rotation \"\"\"\n if not hasattr(self, \"_global_root_rotation\"):\n self._global_root_rotation = self.global_rotation[..., 0, :]\n return self._global_root_rotation\n\n @property\n def global_root_yaw_rotation(self):\n \"\"\" root yaw rotation \"\"\"\n if not hasattr(self, \"_global_root_yaw_rotation\"):\n self._global_root_yaw_rotation = self.global_root_rotation.yaw_rotation()\n return self._global_root_yaw_rotation\n\n # Properties relative to root\n @property\n def local_translation_to_root(self):\n \"\"\" The 3D translation from joint frame to the root frame. \"\"\"\n if not hasattr(self, \"_local_translation_to_root\"):\n self._local_translation_to_root = (\n self.global_translation - self.root_translation.unsqueeze(-1)\n )\n return self._local_translation_to_root\n\n @property\n def local_rotation_to_root(self):\n \"\"\" The 3D rotation from joint frame to the root frame. It is equivalent to \n The root_R_world * world_R_node \"\"\"\n return (\n quat_inverse(self.global_root_rotation).unsqueeze(-1) * self.global_rotation\n )\n\n def compute_forward_vector(\n self,\n left_shoulder_index,\n right_shoulder_index,\n left_hip_index,\n right_hip_index,\n gaussian_filter_width=20,\n ):\n \"\"\" Computes forward vector based on cross product of the up vector with \n average of the right->left shoulder and hip vectors \"\"\"\n global_positions = self.global_translation\n # Perpendicular to the forward direction.\n # Uses the shoulders and hips to find this.\n side_direction = (\n global_positions[:, left_shoulder_index].numpy()\n - global_positions[:, right_shoulder_index].numpy()\n + global_positions[:, left_hip_index].numpy()\n - global_positions[:, right_hip_index].numpy()\n )\n side_direction = (\n side_direction\n / np.sqrt((side_direction ** 2).sum(axis=-1))[..., np.newaxis]\n )\n\n # Forward direction obtained by crossing with the up direction.\n forward_direction = np.cross(side_direction, np.array([[0, 1, 0]]))\n\n # Smooth the forward direction with a Gaussian.\n # Axis 0 is the time/frame axis.\n forward_direction = filters.gaussian_filter1d(\n forward_direction, gaussian_filter_width, axis=0, mode=\"nearest\"\n )\n forward_direction = (\n forward_direction\n / np.sqrt((forward_direction ** 2).sum(axis=-1))[..., np.newaxis]\n )\n\n return torch.from_numpy(forward_direction)\n\n @staticmethod\n def _to_state_vector(rot, rt):\n # Tensorbackend: local rotation and translation, rotation is is in quat 33 * 4 + 3\n state_shape = rot.shape[:-2]\n vr = rot.reshape(*(state_shape + (-1,)))\n vt = rt.broadcast_to(*state_shape + rt.shape[-1:]).reshape(\n *(state_shape + (-1,))\n )\n v = torch.cat([vr, vt], axis=-1)\n return v\n\n @classmethod\n def from_dict(\n cls: Type[\"SkeletonState\"], dict_repr: OrderedDict, *args, **kwargs\n ) -> \"SkeletonState\":\n rot = TensorUtils.from_dict(dict_repr[\"rotation\"], *args, **kwargs)\n rt = TensorUtils.from_dict(dict_repr[\"root_translation\"], *args, **kwargs)\n return cls(\n SkeletonState._to_state_vector(rot, rt),\n SkeletonTree.from_dict(dict_repr[\"skeleton_tree\"], *args, **kwargs),\n dict_repr[\"is_local\"],\n )\n\n def to_dict(self) -> OrderedDict:\n return OrderedDict(\n [\n (\"rotation\", tensor_to_dict(self.rotation)),\n (\"root_translation\", tensor_to_dict(self.root_translation)),\n (\"skeleton_tree\", self.skeleton_tree.to_dict()),\n (\"is_local\", self.is_local),\n ]\n )\n\n @classmethod\n def from_rotation_and_root_translation(cls, skeleton_tree, r, t, is_local=True):\n \"\"\"\n Construct a skeleton state from rotation and root translation\n\n :param skeleton_tree: the skeleton tree\n :type skeleton_tree: SkeletonTree\n :param r: rotation (either global or local)\n :type r: Tensor\n :param t: root translation\n :type t: Tensor\n :param is_local: to indicate that whether the rotation is local or global\n :type is_local: bool, optional, default=True\n \"\"\"\n assert (\n r.dim() > 0\n ), \"the rotation needs to have at least 1 dimension (dim = {})\".format(r.dim)\n return cls(\n SkeletonState._to_state_vector(r, t),\n skeleton_tree=skeleton_tree,\n is_local=is_local,\n )\n\n @classmethod\n def zero_pose(cls, skeleton_tree):\n \"\"\"\n Construct a zero-pose skeleton state from the skeleton tree by assuming that all the local\n rotation is 0 and root translation is also 0.\n\n :param skeleton_tree: the skeleton tree as the rigid body\n :type skeleton_tree: SkeletonTree\n \"\"\"\n return cls.from_rotation_and_root_translation(\n skeleton_tree=skeleton_tree,\n r=quat_identity([skeleton_tree.num_joints]),\n t=torch.zeros(3, dtype=skeleton_tree.local_translation.dtype),\n is_local=True,\n )\n\n def local_repr(self):\n \"\"\" \n Convert the skeleton state into local representation. This will only affects the values of\n .tensor. If the skeleton state already has `is_local=True`. This method will do nothing. \n\n :rtype: SkeletonState\n \"\"\"\n if self.is_local:\n return self\n return SkeletonState.from_rotation_and_root_translation(\n self.skeleton_tree,\n r=self.local_rotation,\n t=self.root_translation,\n is_local=True,\n )\n\n def global_repr(self):\n \"\"\" \n Convert the skeleton state into global representation. This will only affects the values of\n .tensor. If the skeleton state already has `is_local=False`. This method will do nothing. \n\n :rtype: SkeletonState\n \"\"\"\n if not self.is_local:\n return self\n return SkeletonState.from_rotation_and_root_translation(\n self.skeleton_tree,\n r=self.global_rotation,\n t=self.root_translation,\n is_local=False,\n )\n\n def _get_pairwise_average_translation(self):\n global_transform_inv = transform_inverse(self.global_transformation)\n p1 = global_transform_inv.unsqueeze(-2)\n p2 = self.global_transformation.unsqueeze(-3)\n\n pairwise_translation = (\n transform_translation(transform_mul(p1, p2))\n .reshape(-1, len(self.skeleton_tree), len(self.skeleton_tree), 3)\n .mean(axis=0)\n )\n return pairwise_translation\n\n def _transfer_to(self, new_skeleton_tree: SkeletonTree):\n old_indices = list(map(self.skeleton_tree.index, new_skeleton_tree))\n return SkeletonState.from_rotation_and_root_translation(\n new_skeleton_tree,\n r=self.global_rotation[..., old_indices, :],\n t=self.root_translation,\n is_local=False,\n )\n\n def drop_nodes_by_names(\n self, node_names: List[str], estimate_local_translation_from_states: bool = True\n ) -> \"SkeletonState\":\n \"\"\" \n Drop a list of nodes from the skeleton and re-compute the local rotation to match the \n original joint position as much as possible. \n\n :param node_names: a list node names that specifies the nodes need to be dropped\n :type node_names: List of strings\n :param estimate_local_translation_from_states: the boolean indicator that specifies whether\\\n or not to re-estimate the local translation from the states (avg.)\n :type estimate_local_translation_from_states: boolean\n :rtype: SkeletonState\n \"\"\"\n if estimate_local_translation_from_states:\n pairwise_translation = self._get_pairwise_average_translation()\n else:\n pairwise_translation = None\n new_skeleton_tree = self.skeleton_tree.drop_nodes_by_names(\n node_names, pairwise_translation\n )\n return self._transfer_to(new_skeleton_tree)\n\n def keep_nodes_by_names(\n self, node_names: List[str], estimate_local_translation_from_states: bool = True\n ) -> \"SkeletonState\":\n \"\"\" \n Keep a list of nodes and drop all other nodes from the skeleton and re-compute the local \n rotation to match the original joint position as much as possible. \n\n :param node_names: a list node names that specifies the nodes need to be dropped\n :type node_names: List of strings\n :param estimate_local_translation_from_states: the boolean indicator that specifies whether\\\n or not to re-estimate the local translation from the states (avg.)\n :type estimate_local_translation_from_states: boolean\n :rtype: SkeletonState\n \"\"\"\n return self.drop_nodes_by_names(\n list(filter(lambda x: (x not in node_names), self)),\n estimate_local_translation_from_states,\n )\n\n def _remapped_to(\n self, joint_mapping: Dict[str, str], target_skeleton_tree: SkeletonTree\n ):\n joint_mapping_inv = {target: source for source, target in joint_mapping.items()}\n reduced_target_skeleton_tree = target_skeleton_tree.keep_nodes_by_names(\n list(joint_mapping_inv)\n )\n n_joints = (\n len(joint_mapping),\n len(self.skeleton_tree),\n len(reduced_target_skeleton_tree),\n )\n assert (\n len(set(n_joints)) == 1\n ), \"the joint mapping is not consistent with the skeleton trees\"\n source_indices = list(\n map(\n lambda x: self.skeleton_tree.index(joint_mapping_inv[x]),\n reduced_target_skeleton_tree,\n )\n )\n target_local_rotation = self.local_rotation[..., source_indices, :]\n return SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=reduced_target_skeleton_tree,\n r=target_local_rotation,\n t=self.root_translation,\n is_local=True,\n )\n\n def retarget_to(\n self,\n joint_mapping: Dict[str, str],\n source_tpose_local_rotation,\n source_tpose_root_translation: np.ndarray,\n target_skeleton_tree: SkeletonTree,\n target_tpose_local_rotation,\n target_tpose_root_translation: np.ndarray,\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n z_up: bool = True,\n ) -> \"SkeletonState\":\n \"\"\" \n Retarget the skeleton state to a target skeleton tree. This is a naive retarget\n implementation with rough approximations. The function follows the procedures below.\n\n Steps:\n 1. Drop the joints from the source (self) that do not belong to the joint mapping\\\n with an implementation that is similar to \"keep_nodes_by_names()\" - take a\\\n look at the function doc for more details (same for source_tpose)\n \n 2. Rotate the source state and the source tpose by \"rotation_to_target_skeleton\"\\\n to align the source with the target orientation\n \n 3. Extract the root translation and normalize it to match the scale of the target\\\n skeleton\n \n 4. Extract the global rotation from source state relative to source tpose and\\\n re-apply the relative rotation to the target tpose to construct the global\\\n rotation after retargetting\n \n 5. Combine the computed global rotation and the root translation from 3 and 4 to\\\n complete the retargeting.\n \n 6. Make feet on the ground (global translation z)\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose_local_rotation: the local rotation of the source skeleton\n :type source_tpose_local_rotation: Tensor\n \n :param source_tpose_root_translation: the root translation of the source tpose\n :type source_tpose_root_translation: np.ndarray\n \n :param target_skeleton_tree: the target skeleton tree\n :type target_skeleton_tree: SkeletonTree\n \n :param target_tpose_local_rotation: the local rotation of the target skeleton\n :type target_tpose_local_rotation: Tensor\n \n :param target_tpose_root_translation: the root translation of the target tpose\n :type target_tpose_root_translation: Tensor\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonState\n \"\"\"\n\n # STEP 0: Preprocess\n source_tpose = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=self.skeleton_tree,\n r=source_tpose_local_rotation,\n t=source_tpose_root_translation,\n is_local=True,\n )\n target_tpose = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=target_skeleton_tree,\n r=target_tpose_local_rotation,\n t=target_tpose_root_translation,\n is_local=True,\n )\n\n # STEP 1: Drop the irrelevant joints\n pairwise_translation = self._get_pairwise_average_translation()\n node_names = list(joint_mapping)\n new_skeleton_tree = self.skeleton_tree.keep_nodes_by_names(\n node_names, pairwise_translation\n )\n\n # TODO: combine the following steps before STEP 3\n source_tpose = source_tpose._transfer_to(new_skeleton_tree)\n source_state = self._transfer_to(new_skeleton_tree)\n\n source_tpose = source_tpose._remapped_to(joint_mapping, target_skeleton_tree)\n source_state = source_state._remapped_to(joint_mapping, target_skeleton_tree)\n\n # STEP 2: Rotate the source to align with the target\n new_local_rotation = source_tpose.local_rotation.clone()\n new_local_rotation[..., 0, :] = quat_mul_norm(\n rotation_to_target_skeleton, source_tpose.local_rotation[..., 0, :]\n )\n\n source_tpose = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=source_tpose.skeleton_tree,\n r=new_local_rotation,\n t=quat_rotate(rotation_to_target_skeleton, source_tpose.root_translation),\n is_local=True,\n )\n\n new_local_rotation = source_state.local_rotation.clone()\n new_local_rotation[..., 0, :] = quat_mul_norm(\n rotation_to_target_skeleton, source_state.local_rotation[..., 0, :]\n )\n source_state = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=source_state.skeleton_tree,\n r=new_local_rotation,\n t=quat_rotate(rotation_to_target_skeleton, source_state.root_translation),\n is_local=True,\n )\n\n # STEP 3: Normalize to match the target scale\n root_translation_diff = (\n source_state.root_translation - source_tpose.root_translation\n ) * scale_to_target_skeleton\n\n # STEP 4: the global rotation from source state relative to source tpose and\n # re-apply to the target\n current_skeleton_tree = source_state.skeleton_tree\n target_tpose_global_rotation = source_state.global_rotation[0, :].clone()\n for current_index, name in enumerate(current_skeleton_tree):\n if name in target_tpose.skeleton_tree:\n target_tpose_global_rotation[\n current_index, :\n ] = target_tpose.global_rotation[\n target_tpose.skeleton_tree.index(name), :\n ]\n\n global_rotation_diff = quat_mul_norm(\n source_state.global_rotation, quat_inverse(source_tpose.global_rotation)\n )\n new_global_rotation = quat_mul_norm(\n global_rotation_diff, target_tpose_global_rotation\n )\n\n # STEP 5: Putting 3 and 4 together\n current_skeleton_tree = source_state.skeleton_tree\n shape = source_state.global_rotation.shape[:-1]\n shape = shape[:-1] + target_tpose.global_rotation.shape[-2:-1]\n new_global_rotation_output = quat_identity(shape)\n for current_index, name in enumerate(target_skeleton_tree):\n while name not in current_skeleton_tree:\n name = target_skeleton_tree.parent_of(name)\n parent_index = current_skeleton_tree.index(name)\n new_global_rotation_output[:, current_index, :] = new_global_rotation[\n :, parent_index, :\n ]\n\n source_state = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=target_skeleton_tree,\n r=new_global_rotation_output,\n t=target_tpose.root_translation + root_translation_diff,\n is_local=False,\n ).local_repr()\n\n return source_state\n\n def retarget_to_by_tpose(\n self,\n joint_mapping: Dict[str, str],\n source_tpose: \"SkeletonState\",\n target_tpose: \"SkeletonState\",\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n ) -> \"SkeletonState\":\n \"\"\" \n Retarget the skeleton state to a target skeleton tree. This is a naive retarget\n implementation with rough approximations. See the method `retarget_to()` for more information\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose: t-pose of the source skeleton\n :type source_tpose: SkeletonState\n \n :param target_tpose: t-pose of the target skeleton\n :type target_tpose: SkeletonState\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonState\n \"\"\"\n assert (\n len(source_tpose.shape) == 0 and len(target_tpose.shape) == 0\n ), \"the retargeting script currently doesn't support vectorized operations\"\n return self.retarget_to(\n joint_mapping,\n source_tpose.local_rotation,\n source_tpose.root_translation,\n target_tpose.skeleton_tree,\n target_tpose.local_rotation,\n target_tpose.root_translation,\n rotation_to_target_skeleton,\n scale_to_target_skeleton,\n )" }, { "identifier": "SkeletonMotion", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonMotion(SkeletonState):\n def __init__(self, tensor_backend, skeleton_tree, is_local, fps, *args, **kwargs):\n self._fps = fps\n super().__init__(tensor_backend, skeleton_tree, is_local, *args, **kwargs)\n\n def clone(self):\n return SkeletonMotion(\n self.tensor.clone(), self.skeleton_tree, self._is_local, self._fps\n )\n\n @property\n def invariant_property(self):\n return {\n \"skeleton_tree\": self.skeleton_tree,\n \"is_local\": self.is_local,\n \"fps\": self.fps,\n }\n\n @property\n def global_velocity(self):\n \"\"\" global velocity \"\"\"\n curr_index = self.num_joints * 4 + 3\n return self.tensor[..., curr_index : curr_index + self.num_joints * 3].reshape(\n *(self.tensor.shape[:-1] + (self.num_joints, 3))\n )\n\n @property\n def global_angular_velocity(self):\n \"\"\" global angular velocity \"\"\"\n curr_index = self.num_joints * 7 + 3\n return self.tensor[..., curr_index : curr_index + self.num_joints * 3].reshape(\n *(self.tensor.shape[:-1] + (self.num_joints, 3))\n )\n\n @property\n def fps(self):\n \"\"\" number of frames per second \"\"\"\n return self._fps\n\n @property\n def time_delta(self):\n \"\"\" time between two adjacent frames \"\"\"\n return 1.0 / self.fps\n\n @property\n def global_root_velocity(self):\n \"\"\" global root velocity \"\"\"\n return self.global_velocity[..., 0, :]\n\n @property\n def global_root_angular_velocity(self):\n \"\"\" global root angular velocity \"\"\"\n return self.global_angular_velocity[..., 0, :]\n\n @classmethod\n def from_state_vector_and_velocity(\n cls,\n skeleton_tree,\n state_vector,\n global_velocity,\n global_angular_velocity,\n is_local,\n fps,\n ):\n \"\"\"\n Construct a skeleton motion from a skeleton state vector, global velocity and angular\n velocity at each joint.\n\n :param skeleton_tree: the skeleton tree that the motion is based on \n :type skeleton_tree: SkeletonTree\n :param state_vector: the state vector from the skeleton state by `.tensor`\n :type state_vector: Tensor\n :param global_velocity: the global velocity at each joint\n :type global_velocity: Tensor\n :param global_angular_velocity: the global angular velocity at each joint\n :type global_angular_velocity: Tensor\n :param is_local: if the rotation ins the state vector is given in local frame\n :type is_local: boolean\n :param fps: number of frames per second\n :type fps: int\n\n :rtype: SkeletonMotion\n \"\"\"\n state_shape = state_vector.shape[:-1]\n v = global_velocity.reshape(*(state_shape + (-1,)))\n av = global_angular_velocity.reshape(*(state_shape + (-1,)))\n new_state_vector = torch.cat([state_vector, v, av], axis=-1)\n return cls(\n new_state_vector, skeleton_tree=skeleton_tree, is_local=is_local, fps=fps,\n )\n\n @classmethod\n def from_skeleton_state(\n cls: Type[\"SkeletonMotion\"], skeleton_state: SkeletonState, fps: int\n ):\n \"\"\"\n Construct a skeleton motion from a skeleton state. The velocities are estimated using second\n order guassian filter along the last axis. The skeleton state must have at least .dim >= 1\n\n :param skeleton_state: the skeleton state that the motion is based on \n :type skeleton_state: SkeletonState\n :param fps: number of frames per second\n :type fps: int\n\n :rtype: SkeletonMotion\n \"\"\"\n\n assert (\n type(skeleton_state) == SkeletonState\n ), \"expected type of {}, got {}\".format(SkeletonState, type(skeleton_state))\n\n global_velocity = SkeletonMotion._compute_velocity(\n p=skeleton_state.global_translation, time_delta=1 / fps\n )\n global_angular_velocity = SkeletonMotion._compute_angular_velocity(\n r=skeleton_state.global_rotation, time_delta=1 / fps\n )\n\n return cls.from_state_vector_and_velocity(\n skeleton_tree=skeleton_state.skeleton_tree,\n state_vector=skeleton_state.tensor,\n global_velocity=global_velocity,\n global_angular_velocity=global_angular_velocity,\n is_local=skeleton_state.is_local,\n fps=fps,\n )\n\n @staticmethod\n def _to_state_vector(rot, rt, vel, avel):\n state_shape = rot.shape[:-2]\n skeleton_state_v = SkeletonState._to_state_vector(rot, rt)\n v = vel.reshape(*(state_shape + (-1,)))\n av = avel.reshape(*(state_shape + (-1,)))\n skeleton_motion_v = torch.cat([skeleton_state_v, v, av], axis=-1)\n return skeleton_motion_v\n\n @classmethod\n def from_dict(\n cls: Type[\"SkeletonMotion\"], dict_repr: OrderedDict, *args, **kwargs\n ) -> \"SkeletonMotion\":\n rot = TensorUtils.from_dict(dict_repr[\"rotation\"], *args, **kwargs)\n rt = TensorUtils.from_dict(dict_repr[\"root_translation\"], *args, **kwargs)\n vel = TensorUtils.from_dict(dict_repr[\"global_velocity\"], *args, **kwargs)\n avel = TensorUtils.from_dict(\n dict_repr[\"global_angular_velocity\"], *args, **kwargs\n )\n return cls(\n SkeletonMotion._to_state_vector(rot, rt, vel, avel),\n skeleton_tree=SkeletonTree.from_dict(\n dict_repr[\"skeleton_tree\"], *args, **kwargs\n ),\n is_local=dict_repr[\"is_local\"],\n fps=dict_repr[\"fps\"],\n )\n\n def to_dict(self) -> OrderedDict:\n return OrderedDict(\n [\n (\"rotation\", tensor_to_dict(self.rotation)),\n (\"root_translation\", tensor_to_dict(self.root_translation)),\n (\"global_velocity\", tensor_to_dict(self.global_velocity)),\n (\"global_angular_velocity\", tensor_to_dict(self.global_angular_velocity)),\n (\"skeleton_tree\", self.skeleton_tree.to_dict()),\n (\"is_local\", self.is_local),\n (\"fps\", self.fps),\n ]\n )\n\n @classmethod\n def from_fbx(\n cls: Type[\"SkeletonMotion\"],\n fbx_file_path,\n fbx_configs,\n skeleton_tree=None,\n is_local=True,\n fps=120,\n root_joint=\"\",\n root_trans_index=0,\n *args,\n **kwargs,\n ) -> \"SkeletonMotion\":\n \"\"\"\n Construct a skeleton motion from a fbx file (TODO - generalize this). If the skeleton tree\n is not given, it will use the first frame of the mocap to construct the skeleton tree.\n\n :param fbx_file_path: the path of the fbx file\n :type fbx_file_path: string\n :param fbx_configs: the configuration in terms of {\"tmp_path\": ..., \"fbx_py27_path\": ...}\n :type fbx_configs: dict\n :param skeleton_tree: the optional skeleton tree that the rotation will be applied to\n :type skeleton_tree: SkeletonTree, optional\n :param is_local: the state vector uses local or global rotation as the representation\n :type is_local: bool, optional, default=True\n :rtype: SkeletonMotion\n \"\"\"\n joint_names, joint_parents, transforms, fps = fbx_to_array(\n fbx_file_path, fbx_configs, root_joint, fps\n )\n # swap the last two axis to match the convention\n local_transform = euclidean_to_transform(\n transformation_matrix=torch.from_numpy(\n np.swapaxes(np.array(transforms), -1, -2),\n ).float()\n )\n local_rotation = transform_rotation(local_transform)\n root_translation = transform_translation(local_transform)[..., root_trans_index, :]\n joint_parents = torch.from_numpy(np.array(joint_parents)).int()\n\n if skeleton_tree is None:\n local_translation = transform_translation(local_transform).reshape(\n -1, len(joint_parents), 3\n )[0]\n skeleton_tree = SkeletonTree(joint_names, joint_parents, local_translation)\n skeleton_state = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree, r=local_rotation, t=root_translation, is_local=True\n )\n if not is_local:\n skeleton_state = skeleton_state.global_repr()\n return cls.from_skeleton_state(\n skeleton_state=skeleton_state, fps=fps\n )\n\n @staticmethod\n def _compute_velocity(p, time_delta, guassian_filter=True):\n velocity = torch.from_numpy(\n filters.gaussian_filter1d(\n np.gradient(p.numpy(), axis=-3), 2, axis=-3, mode=\"nearest\"\n )\n / time_delta,\n )\n return velocity\n\n @staticmethod\n def _compute_angular_velocity(r, time_delta: float, guassian_filter=True):\n # assume the second last dimension is the time axis\n diff_quat_data = quat_identity_like(r)\n diff_quat_data[..., :-1, :, :] = quat_mul_norm(\n r[..., 1:, :, :], quat_inverse(r[..., :-1, :, :])\n )\n diff_angle, diff_axis = quat_angle_axis(diff_quat_data)\n angular_velocity = diff_axis * diff_angle.unsqueeze(-1) / time_delta\n angular_velocity = torch.from_numpy(\n filters.gaussian_filter1d(\n angular_velocity.numpy(), 2, axis=-3, mode=\"nearest\"\n ),\n )\n return angular_velocity\n\n def crop(self, start: int, end: int, fps: Optional[int] = None):\n \"\"\"\n Crop the motion along its last axis. This is equivalent to performing a slicing on the\n object with [..., start: end: skip_every] where skip_every = old_fps / fps. Note that the\n new fps provided must be a factor of the original fps. \n\n :param start: the beginning frame index\n :type start: int\n :param end: the ending frame index\n :type end: int\n :param fps: number of frames per second in the output (if not given the original fps will be used)\n :type fps: int, optional\n :rtype: SkeletonMotion\n \"\"\"\n if fps is None:\n new_fps = int(self.fps)\n old_fps = int(self.fps)\n else:\n new_fps = int(fps)\n old_fps = int(self.fps)\n assert old_fps % fps == 0, (\n \"the resampling doesn't support fps with non-integer division \"\n \"from the original fps: {} => {}\".format(old_fps, fps)\n )\n skip_every = old_fps // new_fps\n s = slice(start, end, skip_every)\n z = self[..., s]\n\n rot = z.local_rotation if z.is_local else z.global_rotation\n rt = z.root_translation\n vel = z.global_velocity\n avel = z.global_angular_velocity\n return SkeletonMotion(\n SkeletonMotion._to_state_vector(rot, rt, vel, avel),\n skeleton_tree=z.skeleton_tree,\n is_local=z.is_local,\n fps=new_fps,\n )\n\n def retarget_to(\n self,\n joint_mapping: Dict[str, str],\n source_tpose_local_rotation,\n source_tpose_root_translation: np.ndarray,\n target_skeleton_tree: \"SkeletonTree\",\n target_tpose_local_rotation,\n target_tpose_root_translation: np.ndarray,\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n z_up: bool = True,\n ) -> \"SkeletonMotion\":\n \"\"\" \n Same as the one in :class:`SkeletonState`. This method discards all velocity information before\n retargeting and re-estimate the velocity after the retargeting. The same fps is used in the\n new retargetted motion.\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose_local_rotation: the local rotation of the source skeleton\n :type source_tpose_local_rotation: Tensor\n \n :param source_tpose_root_translation: the root translation of the source tpose\n :type source_tpose_root_translation: np.ndarray\n \n :param target_skeleton_tree: the target skeleton tree\n :type target_skeleton_tree: SkeletonTree\n \n :param target_tpose_local_rotation: the local rotation of the target skeleton\n :type target_tpose_local_rotation: Tensor\n \n :param target_tpose_root_translation: the root translation of the target tpose\n :type target_tpose_root_translation: Tensor\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonMotion\n \"\"\"\n return SkeletonMotion.from_skeleton_state(\n super().retarget_to(\n joint_mapping,\n source_tpose_local_rotation,\n source_tpose_root_translation,\n target_skeleton_tree,\n target_tpose_local_rotation,\n target_tpose_root_translation,\n rotation_to_target_skeleton,\n scale_to_target_skeleton,\n z_up,\n ),\n self.fps,\n )\n\n def retarget_to_by_tpose(\n self,\n joint_mapping: Dict[str, str],\n source_tpose: \"SkeletonState\",\n target_tpose: \"SkeletonState\",\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n z_up: bool = True,\n ) -> \"SkeletonMotion\":\n \"\"\" \n Same as the one in :class:`SkeletonState`. This method discards all velocity information before\n retargeting and re-estimate the velocity after the retargeting. The same fps is used in the\n new retargetted motion.\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose: t-pose of the source skeleton\n :type source_tpose: SkeletonState\n \n :param target_tpose: t-pose of the target skeleton\n :type target_tpose: SkeletonState\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonMotion\n \"\"\"\n return self.retarget_to(\n joint_mapping,\n source_tpose.local_rotation,\n source_tpose.root_translation,\n target_tpose.skeleton_tree,\n target_tpose.local_rotation,\n target_tpose.root_translation,\n rotation_to_target_skeleton,\n scale_to_target_skeleton,\n z_up,\n )" }, { "identifier": "plot_skeleton_state", "path": "poselib/poselib/visualization/common.py", "snippet": "def plot_skeleton_state(skeleton_state, task_name=\"\"):\n \"\"\"\n Visualize a skeleton state\n\n :param skeleton_state:\n :param task_name:\n :type skeleton_state: SkeletonState\n :type task_name: string, optional\n \"\"\"\n logger.info(\"plotting {}\".format(task_name))\n task = Draw3DSkeletonState(task_name=task_name, skeleton_state=skeleton_state)\n plotter = Matplotlib3DPlotter(task)\n plotter.show()" }, { "identifier": "plot_skeleton_motion_interactive", "path": "poselib/poselib/visualization/common.py", "snippet": "def plot_skeleton_motion_interactive(skeleton_motion, task_name=\"\"):\n \"\"\"\n Visualize a skeleton motion along its first dimension interactively.\n\n :param skeleton_motion:\n :param task_name:\n :type skeleton_motion: SkeletonMotion\n :type task_name: string, optional\n \"\"\"\n for _ in plot_skeleton_motion_interactive_base(skeleton_motion, task_name):\n pass" }, { "identifier": "Matplotlib3DPlotter", "path": "poselib/poselib/visualization/plt_plotter.py", "snippet": "class Matplotlib3DPlotter(BasePlotter):\n _fig: plt.figure # plt figure\n _ax: p3.Axes3D # plt 3d axis\n # stores artist objects for each task (task name as the key)\n _artist_cache: Dict[str, Any]\n # callables for each task primitives\n _create_impl_callables: Dict[str, Callable]\n _update_impl_callables: Dict[str, Callable]\n\n def __init__(self, task: \"BasePlotterTask\") -> None:\n self._fig = plt.figure()\n self._ax = p3.Axes3D(self._fig)\n self._artist_cache = {}\n\n self._create_impl_callables = {\n \"Draw3DLines\": self._lines_create_impl,\n \"Draw3DDots\": self._dots_create_impl,\n \"Draw3DTrail\": self._trail_create_impl,\n }\n self._update_impl_callables = {\n \"Draw3DLines\": self._lines_update_impl,\n \"Draw3DDots\": self._dots_update_impl,\n \"Draw3DTrail\": self._trail_update_impl,\n }\n self._init_lim()\n super().__init__(task)\n\n @property\n def ax(self):\n return self._ax\n\n @property\n def fig(self):\n return self._fig\n\n def show(self):\n plt.show()\n\n def _min(self, x, y):\n if x is None:\n return y\n if y is None:\n return x\n return min(x, y)\n\n def _max(self, x, y):\n if x is None:\n return y\n if y is None:\n return x\n return max(x, y)\n\n def _init_lim(self):\n self._curr_x_min = None\n self._curr_y_min = None\n self._curr_z_min = None\n self._curr_x_max = None\n self._curr_y_max = None\n self._curr_z_max = None\n\n def _update_lim(self, xs, ys, zs):\n self._curr_x_min = self._min(np.min(xs), self._curr_x_min)\n self._curr_y_min = self._min(np.min(ys), self._curr_y_min)\n self._curr_z_min = self._min(np.min(zs), self._curr_z_min)\n self._curr_x_max = self._max(np.max(xs), self._curr_x_max)\n self._curr_y_max = self._max(np.max(ys), self._curr_y_max)\n self._curr_z_max = self._max(np.max(zs), self._curr_z_max)\n\n def _set_lim(self):\n if not (\n self._curr_x_min is None\n or self._curr_x_max is None\n or self._curr_y_min is None\n or self._curr_y_max is None\n or self._curr_z_min is None\n or self._curr_z_max is None\n ):\n self._ax.set_xlim3d(self._curr_x_min, self._curr_x_max)\n self._ax.set_ylim3d(self._curr_y_min, self._curr_y_max)\n self._ax.set_zlim3d(self._curr_z_min, self._curr_z_max)\n self._init_lim()\n\n @staticmethod\n def _lines_extract_xyz_impl(index, lines_task):\n return lines_task[index, :, 0], lines_task[index, :, 1], lines_task[index, :, 2]\n\n @staticmethod\n def _trail_extract_xyz_impl(index, trail_task):\n return (\n trail_task[index : index + 2, 0],\n trail_task[index : index + 2, 1],\n trail_task[index : index + 2, 2],\n )\n\n def _lines_create_impl(self, lines_task):\n color = lines_task.color\n self._artist_cache[lines_task.task_name] = [\n self._ax.plot(\n *Matplotlib3DPlotter._lines_extract_xyz_impl(i, lines_task),\n color=color,\n linewidth=lines_task.line_width,\n alpha=lines_task.alpha\n )[0]\n for i in range(len(lines_task))\n ]\n\n def _lines_update_impl(self, lines_task):\n lines_artists = self._artist_cache[lines_task.task_name]\n for i in range(len(lines_task)):\n artist = lines_artists[i]\n xs, ys, zs = Matplotlib3DPlotter._lines_extract_xyz_impl(i, lines_task)\n artist.set_data(xs, ys)\n artist.set_3d_properties(zs)\n if lines_task.influence_lim:\n self._update_lim(xs, ys, zs)\n\n def _dots_create_impl(self, dots_task):\n color = dots_task.color\n self._artist_cache[dots_task.task_name] = self._ax.plot(\n dots_task[:, 0],\n dots_task[:, 1],\n dots_task[:, 2],\n c=color,\n linestyle=\"\",\n marker=\".\",\n markersize=dots_task.marker_size,\n alpha=dots_task.alpha,\n )[0]\n\n def _dots_update_impl(self, dots_task):\n dots_artist = self._artist_cache[dots_task.task_name]\n dots_artist.set_data(dots_task[:, 0], dots_task[:, 1])\n dots_artist.set_3d_properties(dots_task[:, 2])\n if dots_task.influence_lim:\n self._update_lim(dots_task[:, 0], dots_task[:, 1], dots_task[:, 2])\n\n def _trail_create_impl(self, trail_task):\n color = trail_task.color\n trail_length = len(trail_task) - 1\n self._artist_cache[trail_task.task_name] = [\n self._ax.plot(\n *Matplotlib3DPlotter._trail_extract_xyz_impl(i, trail_task),\n color=trail_task.color,\n linewidth=trail_task.line_width,\n alpha=trail_task.alpha * (1.0 - i / (trail_length - 1))\n )[0]\n for i in range(trail_length)\n ]\n\n def _trail_update_impl(self, trail_task):\n trails_artists = self._artist_cache[trail_task.task_name]\n for i in range(len(trail_task) - 1):\n artist = trails_artists[i]\n xs, ys, zs = Matplotlib3DPlotter._trail_extract_xyz_impl(i, trail_task)\n artist.set_data(xs, ys)\n artist.set_3d_properties(zs)\n if trail_task.influence_lim:\n self._update_lim(xs, ys, zs)\n\n def _create_impl(self, task_list):\n for task in task_list:\n self._create_impl_callables[task.task_type](task)\n self._draw()\n\n def _update_impl(self, task_list):\n for task in task_list:\n self._update_impl_callables[task.task_type](task)\n self._draw()\n\n def _set_aspect_equal_3d(self):\n xlim = self._ax.get_xlim3d()\n ylim = self._ax.get_ylim3d()\n zlim = self._ax.get_zlim3d()\n\n xmean = np.mean(xlim)\n ymean = np.mean(ylim)\n zmean = np.mean(zlim)\n\n plot_radius = max(\n [\n abs(lim - mean_)\n for lims, mean_ in ((xlim, xmean), (ylim, ymean), (zlim, zmean))\n for lim in lims\n ]\n )\n\n self._ax.set_xlim3d([xmean - plot_radius, xmean + plot_radius])\n self._ax.set_ylim3d([ymean - plot_radius, ymean + plot_radius])\n self._ax.set_zlim3d([zmean - plot_radius, zmean + plot_radius])\n\n def _draw(self):\n self._set_lim()\n self._set_aspect_equal_3d()\n self._fig.canvas.draw()\n self._fig.canvas.flush_events()\n plt.pause(0.00001)" }, { "identifier": "Draw3DSkeletonMotion", "path": "poselib/poselib/visualization/skeleton_plotter_tasks.py", "snippet": "class Draw3DSkeletonMotion(BasePlotterTask):\n def __init__(\n self,\n task_name: str,\n skeleton_motion,\n frame_index=None,\n joints_color=\"red\",\n lines_color=\"blue\",\n velocity_color=\"green\",\n angular_velocity_color=\"purple\",\n trail_color=\"black\",\n trail_length=10,\n alpha=1.0,\n ) -> None:\n super().__init__(task_name=task_name, task_type=\"3DSkeletonMotion\")\n self._trail_length = trail_length\n self._skeleton_motion = skeleton_motion\n # if frame_index is None:\n curr_skeleton_motion = self._skeleton_motion.clone()\n if frame_index is not None:\n curr_skeleton_motion.tensor = self._skeleton_motion.tensor[frame_index, :]\n # else:\n # curr_skeleton_motion = self._skeleton_motion[frame_index, :]\n self._skeleton_state_task = Draw3DSkeletonState(\n self.get_scoped_name(\"skeleton_state\"),\n curr_skeleton_motion,\n joints_color=joints_color,\n lines_color=lines_color,\n alpha=alpha,\n )\n vel_lines, avel_lines = Draw3DSkeletonMotion._get_vel_and_avel(\n curr_skeleton_motion\n )\n self._com_pos = curr_skeleton_motion.root_translation.numpy()[\n np.newaxis, ...\n ].repeat(trail_length, axis=0)\n self._vel_task = Draw3DLines(\n self.get_scoped_name(\"velocity\"),\n vel_lines,\n velocity_color,\n influence_lim=False,\n alpha=alpha,\n )\n self._avel_task = Draw3DLines(\n self.get_scoped_name(\"angular_velocity\"),\n avel_lines,\n angular_velocity_color,\n influence_lim=False,\n alpha=alpha,\n )\n self._com_trail_task = Draw3DTrail(\n self.get_scoped_name(\"com_trail\"),\n self._com_pos,\n trail_color,\n marker_size=2,\n influence_lim=True,\n alpha=alpha,\n )\n\n @property\n def name(self):\n return \"3DSkeletonMotion\"\n\n def update(self, frame_index=None, reset_trail=False, skeleton_motion=None) -> None:\n if skeleton_motion is not None:\n self._skeleton_motion = skeleton_motion\n\n curr_skeleton_motion = self._skeleton_motion.clone()\n if frame_index is not None:\n curr_skeleton_motion.tensor = curr_skeleton_motion.tensor[frame_index, :]\n if reset_trail:\n self._com_pos = curr_skeleton_motion.root_translation.numpy()[\n np.newaxis, ...\n ].repeat(self._trail_length, axis=0)\n else:\n self._com_pos = np.concatenate(\n (\n curr_skeleton_motion.root_translation.numpy()[np.newaxis, ...],\n self._com_pos[:-1],\n ),\n axis=0,\n )\n self._skeleton_state_task.update(curr_skeleton_motion)\n self._com_trail_task.update(self._com_pos)\n self._update(*Draw3DSkeletonMotion._get_vel_and_avel(curr_skeleton_motion))\n\n @staticmethod\n def _get_vel_and_avel(skeleton_motion):\n \"\"\"Get all the velocity and angular velocity lines\n \"\"\"\n pos = skeleton_motion.global_translation.numpy()\n vel = skeleton_motion.global_velocity.numpy()\n avel = skeleton_motion.global_angular_velocity.numpy()\n\n vel_lines = np.stack((pos, pos + vel * 0.02), axis=1)\n avel_lines = np.stack((pos, pos + avel * 0.01), axis=1)\n return vel_lines, avel_lines\n\n def _update(self, vel_lines, avel_lines) -> None:\n self._vel_task.update(vel_lines)\n self._avel_task.update(avel_lines)\n\n def __iter__(self):\n yield from self._skeleton_state_task\n yield from self._vel_task\n yield from self._avel_task\n yield from self._com_trail_task" }, { "identifier": "Draw3DSkeletonState", "path": "poselib/poselib/visualization/skeleton_plotter_tasks.py", "snippet": "class Draw3DSkeletonState(BasePlotterTask):\n _lines_task: Draw3DLines # sub-task for drawing lines\n _dots_task: Draw3DDots # sub-task for drawing dots\n\n def __init__(\n self,\n task_name: str,\n skeleton_state,\n joints_color: str = \"red\",\n lines_color: str = \"blue\",\n alpha=1.0,\n ) -> None:\n super().__init__(task_name=task_name, task_type=\"3DSkeletonState\")\n lines, dots = Draw3DSkeletonState._get_lines_and_dots(skeleton_state)\n self._lines_task = Draw3DLines(\n self.get_scoped_name(\"bodies\"), lines, joints_color, alpha=alpha\n )\n self._dots_task = Draw3DDots(\n self.get_scoped_name(\"joints\"), dots, lines_color, alpha=alpha\n )\n\n @property\n def name(self):\n return \"3DSkeleton\"\n\n def update(self, skeleton_state) -> None:\n self._update(*Draw3DSkeletonState._get_lines_and_dots(skeleton_state))\n\n @staticmethod\n def _get_lines_and_dots(skeleton_state):\n \"\"\"Get all the lines and dots needed to draw the skeleton state\n \"\"\"\n assert (\n len(skeleton_state.tensor.shape) == 1\n ), \"the state has to be zero dimensional\"\n dots = skeleton_state.global_translation.numpy()\n skeleton_tree = skeleton_state.skeleton_tree\n parent_indices = skeleton_tree.parent_indices.numpy()\n lines = []\n for node_index in range(len(skeleton_tree)):\n parent_index = parent_indices[node_index]\n if parent_index != -1:\n lines.append([dots[node_index], dots[parent_index]])\n lines = np.array(lines)\n return lines, dots\n\n def _update(self, lines, dots) -> None:\n self._lines_task.update(lines)\n self._dots_task.update(dots)\n\n def __iter__(self):\n yield from self._lines_task\n yield from self._dots_task" } ]
from ...core import * from ..skeleton3d import SkeletonTree, SkeletonState, SkeletonMotion from ...visualization.common import ( plot_skeleton_state, plot_skeleton_motion_interactive, ) from ...visualization.plt_plotter import Matplotlib3DPlotter from ...visualization.skeleton_plotter_tasks import ( Draw3DSkeletonMotion, Draw3DSkeletonState, ) import numpy as np import torch
18,248
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. def test_skel_tree(): skel_tree = SkeletonTree.from_mjcf( "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml", backend="pytorch", ) skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch") # assert skel_tree.to_str() == skel_tree_rec.to_str() print(skel_tree.node_names) print(skel_tree.local_translation) print(skel_tree.parent_indices) skel_state = SkeletonState.zero_pose(skeleton_tree=skel_tree) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) skel_state = skel_state.drop_nodes_by_names(["right_hip", "left_hip"]) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) def test_skel_motion(): skel_motion = SkeletonMotion.from_file( "/tmp/tmp.npy", backend="pytorch", load_context=True )
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # NVIDIA CORPORATION and its licensors retain all intellectual property # and proprietary rights in and to this software, related documentation # and any modifications thereto. Any use, reproduction, disclosure or # distribution of this software and related documentation without an express # license agreement from NVIDIA CORPORATION is strictly prohibited. def test_skel_tree(): skel_tree = SkeletonTree.from_mjcf( "/home/serfcx/DL_Animation/rl_mimic/data/skeletons/humanoid_mimic_mod_2_noind.xml", backend="pytorch", ) skel_tree_rec = SkeletonTree.from_dict(skel_tree.to_dict(), backend="pytorch") # assert skel_tree.to_str() == skel_tree_rec.to_str() print(skel_tree.node_names) print(skel_tree.local_translation) print(skel_tree.parent_indices) skel_state = SkeletonState.zero_pose(skeleton_tree=skel_tree) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) skel_state = skel_state.drop_nodes_by_names(["right_hip", "left_hip"]) plot_skeleton_state(task_name="draw_skeleton", skeleton_state=skel_state) def test_skel_motion(): skel_motion = SkeletonMotion.from_file( "/tmp/tmp.npy", backend="pytorch", load_context=True )
plot_skeleton_motion_interactive(skel_motion)
4
2023-10-31 20:47:12+00:00
24k
Improbable-AI/dexenv
dexenv/envs/dclaw_multiobjs.py
[ { "identifier": "DClawBase", "path": "dexenv/envs/dclaw_base.py", "snippet": "class DClawBase(VecTask):\n\n def __init__(self, cfg, sim_device, rl_device, graphics_device_id):\n\n self.cfg = cfg\n headless = self.cfg.headless\n self.randomize = self.cfg[\"task\"][\"randomize\"]\n if self.randomize:\n logger.warning(f'Domain randomization is enabled!')\n self.randomization_params = self.cfg[\"task\"][\"randomization_params\"]\n self.aggregate_mode = self.cfg[\"env\"][\"aggregateMode\"]\n\n self.dist_reward_scale = self.cfg[\"env\"][\"rew\"][\"distRewardScale\"]\n self.rot_reward_scale = self.cfg[\"env\"][\"rew\"][\"rotRewardScale\"]\n self.success_tolerance = self.cfg[\"env\"][\"rew\"][\"successTolerance\"]\n self.reach_goal_bonus = self.cfg[\"env\"][\"rew\"][\"reachGoalBonus\"]\n self.fall_dist = self.cfg[\"env\"][\"rew\"][\"fallDistance\"]\n self.fall_penalty = self.cfg[\"env\"][\"rew\"][\"fallPenalty\"]\n self.rot_eps = self.cfg[\"env\"][\"rew\"][\"rotEps\"]\n\n self.vel_obs_scale = 0.2 # scale factor of velocity based observations\n self.force_torque_obs_scale = 10.0 # scale factor of velocity based observations\n\n self.reset_position_noise = self.cfg[\"env\"][\"resetPositionNoise\"]\n self.reset_rotation_noise = self.cfg[\"env\"][\"resetRotationNoise\"]\n self.reset_dof_pos_noise = self.cfg[\"env\"][\"resetDofPosRandomInterval\"]\n self.reset_dof_vel_noise = self.cfg[\"env\"][\"resetDofVelRandomInterval\"]\n\n self.force_scale = self.cfg[\"env\"].get(\"forceScale\", 0.0)\n self.force_prob_range = self.cfg[\"env\"].get(\"forceProbRange\", [0.001, 0.1])\n self.force_decay = self.cfg[\"env\"].get(\"forceDecay\", 0.99)\n self.force_decay_interval = self.cfg[\"env\"].get(\"forceDecayInterval\", 0.08)\n\n self.dclaw_dof_speed_scale = self.cfg[\"env\"][\"dofSpeedScale\"]\n # self.act_moving_average = self.cfg[\"env\"][\"actionsMovingAverage\"]\n\n self.debug_viz = self.cfg[\"env\"][\"enableDebugVis\"]\n\n self.max_episode_length = self.cfg[\"env\"][\"episodeLength\"]\n self.reset_time = self.cfg[\"env\"].get(\"resetTime\", -1.0)\n self.print_success_stat = self.cfg[\"env\"][\"printNumSuccesses\"]\n self.max_consecutive_successes = self.cfg[\"env\"][\"maxConsecutiveSuccesses\"]\n self.av_factor = self.cfg[\"env\"].get(\"averFactor\", 0.1)\n\n self.object_type = self.cfg[\"env\"][\"objectType\"]\n\n self.asset_files_dict = {\n \"block\": \"urdf/objects/cube_multicolor.urdf\",\n \"egg\": \"mjcf/open_ai_assets/hand/egg.xml\",\n \"airplane\": \"single_objects/airplane/model.urdf\",\n 'power_drill': 'single_objects/power_drill/model.urdf',\n 'mug': 'single_objects/mug/model.urdf',\n 'elephant': 'asymm/train/elephant/var_000/model.urdf',\n 'train': 'asymm/train/train/var_000/model.urdf',\n 'stanford_bunny': 'asymm/train/stanford_bunny/var_004/model.urdf'\n\n }\n self.objs_in_isaacgym = ['block', 'egg']\n\n if \"asset\" in self.cfg[\"env\"]:\n self.asset_files_dict[\"block\"] = self.cfg[\"env\"][\"asset\"].get(\"assetFileNameBlock\",\n self.asset_files_dict[\"block\"])\n self.asset_files_dict[\"egg\"] = self.cfg[\"env\"][\"asset\"].get(\"assetFileNameEgg\",\n self.asset_files_dict[\"egg\"])\n\n self.obs_type = self.cfg[\"env\"][\"observationType\"]\n\n if not (self.obs_type in [\"full_no_vel\", \"full\", \"full_state\"]):\n raise Exception(\n \"Unknown type of observations!\\nobservationType should be one of: [openai, full_no_vel, full, full_state]\")\n\n print(\"Obs type:\", self.obs_type)\n\n ## TODO: change value here\n self.num_obs_dict = {\n \"full_no_vel\": 42,\n \"full\": 87,\n \"full_state\": 114\n }\n\n self.up_axis = 'z'\n\n num_states = 0\n\n self.cfg[\"env\"][\"numObservations\"] = self.num_obs_dict[self.obs_type]\n self.cfg[\"env\"][\"numStates\"] = num_states\n self.cfg[\"env\"][\"numActions\"] = 12\n self.hist_buf_reset_env_ids = None\n\n super().__init__(config=self.cfg,\n sim_device=sim_device,\n rl_device=rl_device,\n graphics_device_id=graphics_device_id,\n headless=headless)\n\n self.dt = self.sim_params.dt\n control_freq_inv = self.cfg[\"env\"].get(\"controlFrequencyInv\", 1)\n if self.reset_time > 0.0:\n self.max_episode_length = int(round(self.reset_time / (control_freq_inv * self.dt)))\n print(\"Reset time: \", self.reset_time)\n print(\"New episode length: \", self.max_episode_length)\n\n if self.viewer != None:\n cam_pos = gymapi.Vec3(0.16, -0.5, 0.5)\n cam_target = gymapi.Vec3(0.0, 0.0, 0.15)\n self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)\n\n actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)\n dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)\n rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)\n dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim)\n\n if self.obs_type == \"full_state\":\n sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim)\n self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, self.num_fingertips * 6)\n\n dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim)\n self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs,\n self.num_dclaw_dofs)\n\n self.gym.refresh_actor_root_state_tensor(self.sim)\n self.gym.refresh_dof_state_tensor(self.sim)\n if self.cfg.env.dof_torque_on:\n self.gym.refresh_dof_force_tensor(self.sim)\n self.gym.refresh_rigid_body_state_tensor(self.sim)\n\n self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)\n self.dclaw_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, :self.num_dclaw_dofs]\n self.dclaw_dof_pos = self.dclaw_dof_state[..., 0]\n self.dclaw_dof_vel = self.dclaw_dof_state[..., 1]\n if self.cfg.env.dof_torque_on:\n self.dclaw_dof_torque = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, -1)\n else:\n self.dclaw_dof_torque = None\n\n self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13)\n self.num_bodies = self.rigid_body_states.shape[1]\n\n self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13)\n\n if self.cfg.env.rew.pen_tb_contact:\n _net_cf = self.gym.acquire_net_contact_force_tensor(self.sim)\n self.net_contact_force = gymtorch.wrap_tensor(_net_cf).view(self.num_envs, -1, 3)\n table_handle = self.gym.find_actor_handle(self.envs[0], 'table')\n self.table_body_index = self.gym.find_actor_rigid_body_index(self.envs[0],\n table_handle,\n 'table',\n gymapi.DOMAIN_ENV)\n logger.warning(f'Table body index:{self.table_body_index}')\n self.table_contact_force = self.net_contact_force[:, self.table_body_index]\n\n self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs\n self.prev_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device)\n self.cur_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device)\n\n self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view(self.num_envs, -1)\n\n self.reset_goal_buf = self.reset_buf.clone()\n self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)\n self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device)\n\n self.av_factor = to_torch(self.av_factor, dtype=torch.float, device=self.device)\n\n self.total_successes = 0\n self.total_resets = 0\n\n self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device)\n self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device)\n self.random_force_prob = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))\n * torch.rand(self.num_envs, device=self.device) + torch.log(\n self.force_prob_range[1]))\n\n self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device)\n\n self.num_actions = self.num_dclaw_dofs\n self.actions = self.zero_actions()\n DClawBase.compute_observations(self)\n self.num_observations = self.obs_buf.shape[-1]\n self.cfg.env.numObservations = self.num_observations\n self.create_ob_act_space()\n\n def create_sim(self):\n self.dt = self.cfg[\"sim\"][\"dt\"]\n self.up_axis_idx = self.set_sim_params_up_axis(self.sim_params, self.up_axis)\n\n self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)\n self._create_ground_plane()\n self._create_envs(self.num_envs, self.cfg[\"env\"]['envSpacing'], int(np.sqrt(self.num_envs)))\n\n if self.randomize:\n self.apply_randomizations(self.randomization_params)\n\n def _create_ground_plane(self):\n plane_params = gymapi.PlaneParams()\n plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)\n plane_params.distance = 0.1\n self.gym.add_ground(self.sim, plane_params)\n\n def _create_envs(self, num_envs, spacing, num_per_row):\n lower = gymapi.Vec3(-spacing, -spacing, 0.0)\n upper = gymapi.Vec3(spacing, spacing, spacing)\n\n asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw').as_posix()\n object_asset_file = self.asset_files_dict[self.object_type]\n\n dclaw_asset, dclaw_dof_props = self.get_dclaw_asset(asset_root=asset_root)\n table_asset = self.get_table_asset()\n table_pose = self.get_table_pose()\n\n if self.obs_type == \"full_state\":\n sensor_pose = gymapi.Transform()\n for ft_handle in self.fingertip_handles:\n self.gym.create_asset_force_sensor(dclaw_asset, ft_handle, sensor_pose)\n\n if self.object_type in self.objs_in_isaacgym:\n asset_root = get_module_path('isaacgymenvs').parent.joinpath('assets').as_posix()\n else:\n asset_root = dexenv.LIB_PATH.joinpath('assets').as_posix()\n\n object_asset_options = gymapi.AssetOptions()\n if self.cfg.env.vhacd:\n object_asset_options.convex_decomposition_from_submeshes = True\n\n object_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options)\n\n object_asset_options.disable_gravity = True\n goal_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options)\n\n dclaw_start_pose = self.get_dclaw_start_pose()\n object_start_pose = self.get_object_start_pose(dclaw_start_pose)\n\n goal_start_pose = self.get_goal_object_start_pose(object_start_pose=object_start_pose)\n\n self.dclaws = []\n self.envs = []\n\n self.object_init_state = []\n self.hand_start_states = []\n\n self.hand_indices = []\n self.fingertip_indices = []\n self.object_indices = []\n self.goal_object_indices = []\n\n self.render_camera_handles = []\n if self.cfg.rgb_render:\n render_cam_pose, render_cam_params = self.get_visual_render_camera_setup()\n\n self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in\n self.fingertips]\n print(f'Fingertip handles:{self.fingertip_handles}')\n\n dclaw_rb_count = self.gym.get_asset_rigid_body_count(dclaw_asset)\n object_rb_count = self.gym.get_asset_rigid_body_count(object_asset)\n object_rs_count = self.gym.get_asset_rigid_shape_count(object_asset)\n self.object_rb_handles = list(range(dclaw_rb_count, dclaw_rb_count + object_rb_count))\n self.object_handles = []\n\n max_agg_bodies = self.num_dclaw_bodies + 2 * object_rb_count + 1\n max_agg_shapes = self.num_dclaw_shapes + 2 * object_rs_count + 1\n\n for i in range(self.num_envs):\n env_ptr = self.gym.create_env(\n self.sim, lower, upper, num_per_row\n )\n\n if self.aggregate_mode >= 1:\n self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)\n\n self.create_hand_actor(env_ptr=env_ptr,\n dclaw_asset=dclaw_asset,\n dclaw_start_pose=dclaw_start_pose,\n dclaw_dof_props=dclaw_dof_props,\n env_id=i)\n\n object_handle = self.gym.create_actor(env_ptr, object_asset, object_start_pose, \"object\", i, 0, 1)\n self.object_handles.append(object_handle)\n self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z,\n object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z,\n object_start_pose.r.w,\n 0, 0, 0, 0, 0, 0])\n object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM)\n self.object_indices.append(object_idx)\n\n goal_handle = self.gym.create_actor(env_ptr, goal_asset, goal_start_pose, \"goal_object\", i + self.num_envs,\n 0, 2)\n goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM)\n self.goal_object_indices.append(goal_object_idx)\n\n if self.cfg.env.blockscale is not None and self.cfg.env.objectType == 'block':\n blockscale = float(self.cfg.env.blockscale)\n self.gym.set_actor_scale(env_ptr, object_handle, blockscale)\n self.gym.set_actor_scale(env_ptr, goal_handle, blockscale)\n\n if self.object_type != \"block\":\n self.gym.set_rigid_body_color(\n env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))\n self.gym.set_rigid_body_color(\n env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))\n table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, \"table\", i, 0)\n\n if self.cfg.rgb_render:\n render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params)\n self.render_camera_handles.append(render_camera_handle[0])\n\n if self.aggregate_mode > 0:\n self.gym.end_aggregate(env_ptr)\n\n self.envs.append(env_ptr)\n\n self.setup_torch_states()\n\n def create_camera(self, camera_poses, env_ptr, camera_params):\n cam_handles = []\n for ic in range(min(len(camera_poses), self.cfg.cam.cam_num)):\n camera_handle = self.gym.create_camera_sensor(env_ptr, camera_params)\n if isinstance(camera_poses[ic], tuple):\n self.gym.set_camera_location(camera_handle, env_ptr, camera_poses[ic][0], camera_poses[ic][1])\n else:\n self.gym.set_camera_transform(camera_handle, env_ptr, camera_poses[ic])\n cam_handles.append(camera_handle)\n return cam_handles\n\n def get_visual_render_camera_setup(self):\n cam_pos = np.array([-0.7, 0, 0.5])\n cam_focus_pt = np.array([0.08, 0, 0.15])\n cam_focus_pt = gymapi.Vec3(*cam_focus_pt)\n cam_pos = gymapi.Vec3(*cam_pos)\n camera_poses = [(cam_pos, cam_focus_pt)]\n camera_params = get_camera_params(width=self.cfg.cam.visual_render_width,\n height=self.cfg.cam.visual_render_height,\n hov=45,\n cuda=False)\n return camera_poses, camera_params\n\n def create_hand_actor(self, env_ptr, dclaw_asset, dclaw_start_pose, dclaw_dof_props, env_id):\n dclaw_actor = self.gym.create_actor(env_ptr, dclaw_asset, dclaw_start_pose, \"hand\", env_id, 0, 0)\n if self.cfg.env.dof_torque_on:\n self.gym.enable_actor_dof_force_sensors(env_ptr, dclaw_actor)\n self.hand_start_states.append(\n [dclaw_start_pose.p.x, dclaw_start_pose.p.y, dclaw_start_pose.p.z,\n dclaw_start_pose.r.x, dclaw_start_pose.r.y, dclaw_start_pose.r.z,\n dclaw_start_pose.r.w,\n 0, 0, 0, 0, 0, 0])\n self.gym.set_actor_dof_properties(env_ptr, dclaw_actor, dclaw_dof_props)\n hand_idx = self.gym.get_actor_index(env_ptr, dclaw_actor, gymapi.DOMAIN_SIM)\n self.hand_indices.append(hand_idx)\n\n self.gym.set_actor_dof_states(env_ptr, dclaw_actor, self.dclaw_default_dof_states, gymapi.STATE_ALL)\n if self.obs_type == \"full_state\":\n self.gym.enable_actor_dof_force_sensors(env_ptr, dclaw_actor)\n self.dclaws.append(dclaw_actor)\n self.set_hand_color(env_ptr, dclaw_actor)\n\n def set_hand_color(self, env_ptr, dclaw_actor):\n rgd_dict = self.gym.get_actor_rigid_body_dict(env_ptr, dclaw_actor)\n for bd, bd_id in rgd_dict.items():\n if bd not in dclaw_body_color_mapping:\n continue\n color = gymapi.Vec3(*dclaw_body_color_mapping[bd])\n self.gym.set_rigid_body_color(env_ptr, dclaw_actor,\n bd_id, gymapi.MESH_VISUAL,\n color)\n\n def get_table_asset(self):\n asset_options = gymapi.AssetOptions()\n asset_options.armature = 0.001\n asset_options.fix_base_link = True\n asset_options.thickness = 0.001\n asset_options.disable_gravity = True\n table_dims = gymapi.Vec3(0.6, 0.6, 0.1)\n table_asset = self.gym.create_box(self.sim,\n table_dims.x,\n table_dims.y,\n table_dims.z,\n asset_options)\n table_props = self.gym.get_asset_rigid_shape_properties(table_asset)\n for p in table_props:\n p.friction = self.cfg.env.table.friction\n p.torsion_friction = self.cfg.env.table.torsion_friction\n p.restitution = self.cfg.env.table.restitution\n p.rolling_friction = self.cfg.env.table.rolling_friction\n self.gym.set_asset_rigid_shape_properties(table_asset, table_props)\n return table_asset\n\n def get_table_pose(self):\n object_start_pose = gymapi.Transform()\n object_start_pose.p = gymapi.Vec3()\n object_start_pose.p.x = 0\n object_start_pose.p.y = 0\n object_start_pose.p.z = -0.05\n return object_start_pose\n\n def get_dclaw_start_pose(self):\n dclaw_start_pose = gymapi.Transform()\n dclaw_start_pose.p = gymapi.Vec3(*get_axis_params(0.25, self.up_axis_idx))\n dclaw_start_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 1, 0), np.pi)\n return dclaw_start_pose\n\n def setup_torch_states(self):\n self.render_rgb_obs_buf = None\n if self.cfg.rgb_render:\n self.gym.set_light_parameters(self.sim, 0, gymapi.Vec3(0.9, 0.9, 0.9),\n gymapi.Vec3(0.9, 0.9, 0.9), gymapi.Vec3(0, 0, 0))\n else:\n self.gym.set_light_parameters(self.sim, 0, gymapi.Vec3(0.9, 0.9, 0.9),\n gymapi.Vec3(0.7, 0.7, 0.7), gymapi.Vec3(0, 0, 0))\n self.object_init_state = to_torch(self.object_init_state, device=self.device, dtype=torch.float).view(\n self.num_envs, 13)\n self.goal_states = self.object_init_state.clone()\n self.goal_states[:, self.up_axis_idx] -= 0.04\n self.goal_init_state = self.goal_states.clone()\n self.hand_start_states = to_torch(self.hand_start_states, device=self.device).view(self.num_envs, 13)\n\n self.fingertip_handles = to_torch(self.fingertip_handles, dtype=torch.long, device=self.device)\n self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device)\n self.object_rb_masses = None\n self.update_obj_mass()\n self.hand_indices = to_torch(self.hand_indices, dtype=torch.long, device=self.device)\n self.object_indices = to_torch(self.object_indices, dtype=torch.long, device=self.device)\n self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device)\n\n def get_dclaw_asset(self, asset_root=None, asset_options=None):\n # load dclaw asset\n if asset_options is None:\n asset_options = gymapi.AssetOptions()\n asset_options.flip_visual_attachments = False\n asset_options.fix_base_link = True\n asset_options.collapse_fixed_joints = False\n asset_options.disable_gravity = False\n asset_options.thickness = 0.001\n asset_options.angular_damping = 0.01\n asset_options.override_inertia = True\n asset_options.override_com = True\n logger.info(f'VHACD:{self.cfg.env.vhacd}')\n if self.cfg.env.vhacd:\n asset_options.convex_decomposition_from_submeshes = True\n if self.cfg.physics_engine == \"physx\":\n # if self.physics_engine == gymapi.SIM_PHYSX:\n asset_options.use_physx_armature = True\n asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS\n\n if asset_root is None:\n asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw_4f').as_posix()\n robot_name = self.cfg.env.robot\n asset_root = pathlib_file(asset_root).parent.joinpath(f'{robot_name}').as_posix()\n dclaw_asset = self.gym.load_asset(self.sim, asset_root, f\"{robot_name}.urdf\", asset_options)\n print(f'Dclaw asset root:{asset_root} robot name:{robot_name}')\n\n self.num_dclaw_bodies = self.gym.get_asset_rigid_body_count(dclaw_asset)\n self.num_dclaw_shapes = self.gym.get_asset_rigid_shape_count(dclaw_asset)\n self.num_dclaw_dofs = self.gym.get_asset_dof_count(dclaw_asset)\n\n print(f'D-Claw:')\n print(f'\\t Number of bodies: {self.num_dclaw_bodies}')\n print(f'\\t Number of shapes: {self.num_dclaw_shapes}')\n print(f'\\t Number of dofs: {self.num_dclaw_dofs}')\n\n self.dclaw_asset_dof_dict = self.gym.get_asset_dof_dict(dclaw_asset)\n joint_names = self.dclaw_asset_dof_dict.keys()\n logger.info(f'Joint names:{joint_names}')\n\n self.dof_joint_indices = list(self.dclaw_asset_dof_dict.values())\n dinds = np.array(self.dof_joint_indices)\n assert np.all(np.diff(dinds) > 0) # check if it's in a sorted order (ascending)\n\n rb_links = self.gym.get_asset_rigid_body_names(dclaw_asset)\n self.fingertips = [x for x in rb_links if 'tip_link' in x] # [\"one_tip_link\", \"two_tip_link\", \"three_tip_link\"]\n self.num_fingertips = len(self.fingertips)\n\n print(f'Number of fingertips:{self.num_fingertips} Fingertips:{self.fingertips}')\n\n print(f'Actuator --- DoF Index')\n for act_name, act_index in zip(joint_names, self.dof_joint_indices):\n print(f'\\t {act_name} {act_index}')\n\n dclaw_dof_props = self.gym.get_asset_dof_properties(dclaw_asset)\n\n def set_dof_prop(props, prop_name, val):\n if np.isscalar(val):\n props[prop_name].fill(val)\n elif len(val) == 3:\n props[prop_name] = np.array(list(val) * int(len(props[prop_name]) / 3))\n else:\n props[prop_name] = np.array(val)\n\n if self.cfg[\"env\"][\"dof_vel_hard_limit\"] is not None:\n vel_hard_limit = self.cfg[\"env\"][\"dof_vel_hard_limit\"] if not self.cfg.env.soft_control else self.cfg[\"env\"][\"soft_dof_vel_hard_limit\"]\n print(f'Setting DOF velocity limit to:{vel_hard_limit}')\n set_dof_prop(dclaw_dof_props, 'velocity', vel_hard_limit)\n if self.cfg[\"env\"][\"effort_limit\"] is not None:\n effort_limit = self.cfg[\"env\"][\"effort_limit\"] if not self.cfg.env.soft_control else self.cfg[\"env\"][\"soft_effort_limit\"]\n print(f'Setting DOF effort limit to:{effort_limit}')\n set_dof_prop(dclaw_dof_props, 'effort', effort_limit)\n if self.cfg[\"env\"][\"stiffness\"] is not None:\n stiffness = self.cfg[\"env\"][\"stiffness\"] if not self.cfg.env.soft_control else self.cfg[\"env\"][\"soft_stiffness\"]\n print(f'Setting stiffness to:{stiffness}')\n set_dof_prop(dclaw_dof_props, 'stiffness', stiffness)\n if self.cfg[\"env\"][\"damping\"] is not None:\n damping = self.cfg[\"env\"][\"damping\"] if not self.cfg.env.soft_control else self.cfg[\"env\"][\"soft_damping\"]\n print(f'Setting damping to:{damping}')\n set_dof_prop(dclaw_dof_props, 'damping', damping)\n\n self.dclaw_dof_lower_limits = []\n self.dclaw_dof_upper_limits = []\n\n self.dclaw_default_dof_states = np.zeros(self.num_dclaw_dofs, dtype=gymapi.DofState.dtype)\n self.dclaw_default_dof_pos = self.dclaw_default_dof_states['pos']\n self.dclaw_default_dof_vel = self.dclaw_default_dof_states['vel']\n for i in range(self.num_dclaw_dofs):\n self.dclaw_dof_lower_limits.append(dclaw_dof_props['lower'][i])\n self.dclaw_dof_upper_limits.append(dclaw_dof_props['upper'][i])\n if i % 3 == 1:\n self.dclaw_default_dof_pos[i] = 0.8\n elif i % 3 == 2:\n self.dclaw_default_dof_pos[i] = -1.1\n else:\n self.dclaw_default_dof_pos[i] = 0.\n self.dclaw_default_dof_vel[i] = 0.0\n\n self.dof_joint_indices = to_torch(self.dof_joint_indices, dtype=torch.long, device=self.device)\n self.dclaw_dof_lower_limits = to_torch(self.dclaw_dof_lower_limits, device=self.device)\n self.dclaw_dof_upper_limits = to_torch(self.dclaw_dof_upper_limits, device=self.device)\n self.dclaw_default_dof_pos = to_torch(self.dclaw_default_dof_pos, device=self.device)\n self.dclaw_default_dof_vel = to_torch(self.dclaw_default_dof_vel, device=self.device)\n\n self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in\n self.fingertips]\n\n dclaw_asset_props = self.gym.get_asset_rigid_shape_properties(dclaw_asset)\n for p in dclaw_asset_props:\n p.friction = self.cfg.env.hand.friction\n p.torsion_friction = self.cfg.env.hand.torsion_friction\n p.rolling_friction = self.cfg.env.hand.rolling_friction\n p.restitution = self.cfg.env.hand.restitution\n self.gym.set_asset_rigid_shape_properties(dclaw_asset, dclaw_asset_props)\n return dclaw_asset, dclaw_dof_props\n\n def get_object_start_pose(self, dclaw_start_pose):\n object_start_pose = gymapi.Transform()\n object_start_pose.p = gymapi.Vec3()\n if self.cfg.env.obj_init_delta_pos is not None:\n delta_pos = self.cfg.env.obj_init_delta_pos\n object_start_pose.p.x = dclaw_start_pose.p.x + delta_pos[0]\n object_start_pose.p.y = dclaw_start_pose.p.y + delta_pos[1]\n object_start_pose.p.z = dclaw_start_pose.p.z + delta_pos[2]\n else:\n object_start_pose.p.x = dclaw_start_pose.p.x\n pose_dy, pose_dz = 0., -0.13\n object_start_pose.p.y = dclaw_start_pose.p.y + pose_dy\n object_start_pose.p.z = dclaw_start_pose.p.z + pose_dz\n return object_start_pose\n\n def get_goal_object_start_pose(self, object_start_pose):\n self.goal_displacement = gymapi.Vec3(0., 0, 0.25)\n self.goal_displacement_tensor = to_torch(\n [self.goal_displacement.x, self.goal_displacement.y, self.goal_displacement.z], device=self.device)\n goal_start_pose = gymapi.Transform()\n goal_start_pose.p = object_start_pose.p + self.goal_displacement\n return goal_start_pose\n\n def set_dof_props(self, props_dict):\n param_setters_map = get_property_setter_map(self.gym)\n param_getters_map = get_property_getter_map(self.gym)\n prop_name = 'dof_properties'\n setter = param_setters_map[prop_name]\n for env_id in range(len(self.envs)):\n env = self.envs[env_id]\n handle = self.gym.find_actor_handle(env, 'hand')\n prop = param_getters_map[prop_name](env, handle)\n for dof_prop_name, dof_prop_values in props_dict.items():\n if env_id == 0:\n assert len(dof_prop_values) == len(self.envs)\n prop_val = dof_prop_values[env_id]\n prop[dof_prop_name].fill(prop_val)\n success = setter(env, handle, prop)\n if not success:\n logger.warning(f'Setting dof properties is not successful!')\n\n def update_obj_mass(self, env_ids=None):\n object_rb_masses = []\n env_pool = env_ids if env_ids is not None else list(range(self.num_envs))\n if len(env_pool) < 1:\n return\n for env_id, object_handle in zip(env_pool, self.object_handles):\n env_ptr = self.envs[env_id]\n object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle)\n object_rb_masses.append([prop.mass for prop in object_rb_props])\n if self.object_rb_masses is None:\n self.object_rb_masses = to_torch(object_rb_masses, dtype=torch.float, device=self.device)\n else:\n self.object_rb_masses[env_pool] = to_torch(object_rb_masses, dtype=torch.float, device=self.device)\n\n def reset(self) -> torch.Tensor:\n \"\"\"Reset the environment.\n Returns:\n Observation dictionary\n \"\"\"\n zero_actions = self.zero_actions()\n self.reset_buf.fill_(1)\n self.reset_goal_buf.fill_(1)\n if self.cfg.env.action_ema is not None:\n self.action_ema_val = zero_actions.clone()\n # step the simulator\n\n self.step(zero_actions)\n\n return self.update_obs()\n\n def compute_reward(self, actions):\n res = compute_dclaw_reward(\n self.reset_buf, self.reset_goal_buf, self.progress_buf,\n self.successes, self.max_episode_length,\n self.object_pos, self.object_rot, self.goal_pos, self.goal_rot,\n self.cfg['env']['rew'], self.actions,\n self.fingertip_pos, self.fingertip_vel, self.object_linvel, self.object_angvel,\n self.dclaw_dof_vel, self.dclaw_dof_torque,\n table_cf=self.table_contact_force if self.cfg.env.rew.pen_tb_contact else None\n )\n self.rew_buf[:] = res[0] * self.cfg.env.rew.rew_scale\n self.done_buf[:] = res[1]\n self.reset_buf[:] = res[2]\n self.reset_goal_buf[:] = res[3]\n self.progress_buf[:] = res[4]\n self.successes[:] = res[5]\n abs_rot_dist = res[6]\n reward_terms = res[7]\n timeout_envs = res[8]\n\n self.extras['success'] = self.reset_goal_buf.detach().to(self.rl_device).flatten()\n self.extras['abs_dist'] = abs_rot_dist.detach().to(self.rl_device)\n self.extras['TimeLimit.truncated'] = timeout_envs.detach().to(self.rl_device)\n for reward_key, reward_val in reward_terms.items():\n self.extras[reward_key] = reward_val.detach()\n\n def get_images(self):\n rgb = self.render_rgb_obs_buf\n return rgb\n\n def compute_observations(self):\n self.gym.refresh_dof_state_tensor(self.sim)\n if self.cfg.env.dof_torque_on:\n self.gym.refresh_dof_force_tensor(self.sim)\n self.gym.refresh_actor_root_state_tensor(self.sim)\n self.gym.refresh_rigid_body_state_tensor(self.sim)\n\n if self.obs_type == \"full_state\":\n self.gym.refresh_force_sensor_tensor(self.sim)\n self.gym.refresh_dof_force_tensor(self.sim)\n\n if self.cfg.env.rew.pen_tb_contact:\n self.gym.refresh_net_contact_force_tensor(self.sim)\n\n self.object_pose = self.root_state_tensor[self.object_indices, 0:7]\n self.object_pos = self.root_state_tensor[self.object_indices, 0:3]\n self.object_rot = self.root_state_tensor[self.object_indices, 3:7]\n self.object_linvel = self.root_state_tensor[self.object_indices, 7:10]\n self.object_angvel = self.root_state_tensor[self.object_indices, 10:13]\n\n self.goal_pose = self.goal_states[:, 0:7]\n self.goal_pos = self.goal_states[:, 0:3]\n self.goal_rot = self.goal_states[:, 3:7]\n\n self.fingertip_state = self.rigid_body_states[:, self.fingertip_handles][:, :, 0:13]\n self.fingertip_pos = self.rigid_body_states[:, self.fingertip_handles][:, :, 0:3]\n self.fingertip_vel = self.rigid_body_states[:, self.fingertip_handles][:, :, 7:13]\n\n if self.obs_type == \"full_no_vel\":\n obs_buf = self.compute_full_observations(no_vel=True)\n elif self.obs_type == \"full\":\n obs_buf = self.compute_full_observations()\n elif self.obs_type == \"full_state\":\n obs_buf = self.compute_full_state()\n else:\n print(\"Unkown observations type!\")\n self.obs_buf = obs_buf\n\n if self.cfg.rgb_render:\n self.gym.fetch_results(self.sim, True)\n self.gym.step_graphics(self.sim)\n self.gym.render_all_camera_sensors(self.sim)\n self.gym.start_access_image_tensors(self.sim)\n self.render_rgb_obs_buf = self.get_numpy_rgb_images(self.render_camera_handles)\n self.gym.end_access_image_tensors(self.sim)\n\n def allocate_ob_buffers(self):\n self.obs_buf = torch.zeros(\n (self.num_envs, self.num_obs), device=self.device, dtype=torch.float)\n\n def compute_full_observations(self, no_vel=False):\n scaled_dof_pos = unscale(\n self.dclaw_dof_pos,\n self.dclaw_dof_lower_limits,\n self.dclaw_dof_upper_limits\n )\n quat_dist = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))\n\n if no_vel:\n out = torch.cat(\n [\n scaled_dof_pos,\n self.object_pose,\n self.goal_rot,\n quat_dist,\n self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips),\n self.actions\n ],\n dim=-1\n )\n else:\n out = torch.cat(\n [\n scaled_dof_pos,\n self.vel_obs_scale * self.dclaw_dof_vel,\n self.object_pose,\n self.object_linvel,\n self.vel_obs_scale * self.object_angvel,\n self.goal_rot,\n quat_dist,\n self.fingertip_state.reshape(self.num_envs, 13 * self.num_fingertips),\n self.actions\n ],\n dim=-1\n )\n return out\n\n def compute_full_state(self):\n obs_buf = self.compute_full_observations()\n obs_no_actions = obs_buf[:, :-9]\n actions = obs_buf[:, -9:]\n out = torch.cat(\n [\n obs_no_actions,\n self.force_torque_obs_scale * self.dof_force_tensor,\n self.force_torque_obs_scale * self.vec_sensor_tensor,\n actions\n ],\n dim=-1\n )\n\n return out\n\n def update_obs(self):\n if self.randomize:\n self.obs_buf = self.dr_randomizations['observations']['noise_lambda'](self.obs_buf)\n\n self.obs_dict[\"ob\"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)\n if self.num_states > 0:\n self.obs_dict[\"state\"] = self.get_state()\n return self.obs_dict\n\n def reset_target_pose(self, env_ids, apply_reset=False):\n new_rot = random_quaternions(num=len(env_ids), device=self.device, order='xyzw')\n\n self.goal_states[env_ids, 0:3] = self.goal_init_state[env_ids, 0:3]\n self.goal_states[env_ids, 3:7] = new_rot\n self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3] + self.goal_displacement_tensor\n self.root_state_tensor[self.goal_object_indices[env_ids], 3:7] = self.goal_states[env_ids, 3:7]\n self.root_state_tensor[self.goal_object_indices[env_ids], 7:13] = torch.zeros_like(\n self.root_state_tensor[self.goal_object_indices[env_ids], 7:13])\n\n if apply_reset:\n goal_object_indices = self.goal_object_indices[env_ids].to(torch.int32)\n self.gym.set_actor_root_state_tensor_indexed(self.sim,\n gymtorch.unwrap_tensor(self.root_state_tensor),\n gymtorch.unwrap_tensor(goal_object_indices), len(env_ids))\n self.reset_goal_buf[env_ids] = 0\n\n def reset_idx(self, env_ids, goal_env_ids):\n if self.randomize and not self.cfg.env.rand_once:\n self.apply_randomizations(self.randomization_params)\n\n rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), self.num_dclaw_dofs * 2 + 3), device=self.device)\n\n self.reset_target_pose(env_ids)\n self.rb_forces[env_ids, :, :] = 0.0\n\n self.root_state_tensor[self.object_indices[env_ids]] = self.object_init_state[env_ids].clone()\n self.root_state_tensor[self.object_indices[env_ids], 0:3] = self.object_init_state[env_ids, 0:3] + \\\n self.reset_position_noise * rand_floats[:, 0:3]\n\n new_object_rot = random_quaternions(num=len(env_ids), device=self.device, order='xyzw')\n\n self.root_state_tensor[self.object_indices[env_ids], 3:7] = new_object_rot\n self.root_state_tensor[self.object_indices[env_ids], 7:13] = torch.zeros_like(\n self.root_state_tensor[self.object_indices[env_ids], 7:13])\n\n object_indices = torch.unique(torch.cat([self.object_indices[env_ids],\n self.goal_object_indices[env_ids],\n self.goal_object_indices[goal_env_ids]]).to(torch.int32))\n self.gym.set_actor_root_state_tensor_indexed(self.sim,\n gymtorch.unwrap_tensor(self.root_state_tensor),\n gymtorch.unwrap_tensor(object_indices), len(object_indices))\n self.random_force_prob[env_ids] = torch.exp(\n (torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))\n * torch.rand(len(env_ids), device=self.device) + torch.log(self.force_prob_range[1]))\n\n delta_max = self.dclaw_dof_upper_limits - self.dclaw_default_dof_pos\n delta_min = self.dclaw_dof_lower_limits - self.dclaw_default_dof_pos\n rand_delta = delta_min + (delta_max - delta_min) * rand_floats[:, 3:3 + self.num_dclaw_dofs]\n\n pos = self.dclaw_default_dof_pos + self.reset_dof_pos_noise * rand_delta\n self.dclaw_dof_pos[env_ids, :] = pos\n self.dclaw_dof_vel[env_ids, :] = self.dclaw_default_dof_vel + \\\n self.reset_dof_vel_noise * rand_floats[:,\n 3 + self.num_dclaw_dofs:3 + self.num_dclaw_dofs * 2]\n self.prev_targets[env_ids, :self.num_dclaw_dofs] = pos\n self.cur_targets[env_ids, :self.num_dclaw_dofs] = pos\n\n hand_indices = self.hand_indices[env_ids].to(torch.int32)\n self.gym.set_dof_position_target_tensor_indexed(self.sim,\n gymtorch.unwrap_tensor(self.prev_targets),\n gymtorch.unwrap_tensor(hand_indices), len(env_ids))\n self.gym.set_dof_state_tensor_indexed(self.sim,\n gymtorch.unwrap_tensor(self.dof_state),\n gymtorch.unwrap_tensor(hand_indices), len(env_ids))\n\n self.progress_buf[env_ids] = 0\n self.reset_buf[env_ids] = 0\n self.successes[env_ids] = 0\n\n def get_numpy_rgb_images(self, camera_handles):\n rgb_obs_buf = []\n for cam_handles, env in zip(camera_handles, self.envs):\n cam_ob = []\n if isinstance(cam_handles, list):\n for cam_handle in cam_handles:\n color_image = self.gym.get_camera_image(self.sim, env, cam_handle, gymapi.IMAGE_COLOR)\n color_image = color_image.reshape(color_image.shape[0], -1, 4)[..., :3]\n cam_ob.append(color_image)\n rgb_obs_buf.append(cam_ob)\n else:\n color_image = self.gym.get_camera_image(self.sim, env, cam_handles, gymapi.IMAGE_COLOR)\n color_image = color_image.reshape(color_image.shape[0], -1, 4)[..., :3]\n rgb_obs_buf.append(color_image)\n rgb_obs_buf = np.stack(rgb_obs_buf)\n return rgb_obs_buf\n\n def pre_physics_step(self, actions):\n env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)\n goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1)\n\n if len(goal_env_ids) > 0 and len(env_ids) == 0:\n self.reset_target_pose(goal_env_ids, apply_reset=True)\n elif len(goal_env_ids) > 0:\n self.reset_target_pose(goal_env_ids)\n\n if len(env_ids) > 0:\n self.reset_idx(env_ids, goal_env_ids)\n\n self.actions = actions.clone().to(self.device)\n\n if self.cfg.env.action_ema is not None:\n self.action_ema_val[env_ids] = 0\n self.action_ema_val[goal_env_ids] = 0\n self.actions = self.actions * self.cfg.env.action_ema + self.action_ema_val * (1 - self.cfg.env.action_ema)\n self.action_ema_val = self.actions.clone()\n if self.cfg.env.dof_vel_pol_limit is not None:\n delta_action = self.actions * self.cfg.env.dof_vel_pol_limit * (self.dt * self.cfg.env.controlFrequencyInv)\n else:\n delta_action = self.dclaw_dof_speed_scale * self.dt * self.actions\n if self.cfg.env.relativeToPrevTarget:\n targets = self.prev_targets[:, self.dof_joint_indices] + delta_action\n else:\n targets = self.dclaw_dof_pos + delta_action\n\n self.cur_targets[:, self.dof_joint_indices] = tensor_clamp(targets,\n self.dclaw_dof_lower_limits[\n self.dof_joint_indices],\n self.dclaw_dof_upper_limits[\n self.dof_joint_indices])\n\n self.prev_targets[:, self.dof_joint_indices] = self.cur_targets[:, self.dof_joint_indices]\n self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.cur_targets))\n\n if self.force_scale > 0.0:\n self.rb_forces *= torch.pow(self.force_decay, self.dt / self.force_decay_interval)\n # apply new forces\n force_indices = (torch.rand(self.num_envs, device=self.device) < self.random_force_prob).nonzero()\n rb_force_shape = self.rb_forces[force_indices, self.object_rb_handles, :].shape\n rb_force_dir = torch.randn(rb_force_shape, device=self.device)\n rb_force_dir = rb_force_dir / rb_force_dir.norm(dim=-1, keepdim=True)\n self.rb_forces[force_indices, self.object_rb_handles, :] = rb_force_dir * self.object_rb_masses[force_indices] * self.force_scale\n self.gym.apply_rigid_body_force_tensors(self.sim, gymtorch.unwrap_tensor(self.rb_forces), None,\n gymapi.LOCAL_SPACE)\n\n def post_physics_step(self):\n self.progress_buf += 1\n self.randomize_buf += 1\n\n self.compute_observations()\n self.compute_reward(self.actions)\n\n if self.viewer and self.debug_viz:\n # draw axes on target object\n self.gym.clear_lines(self.viewer)\n self.gym.refresh_rigid_body_state_tensor(self.sim)\n\n for i in range(self.num_envs):\n targetx = (self.goal_pos[i] + quat_apply(self.goal_rot[i],\n to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()\n targety = (self.goal_pos[i] + quat_apply(self.goal_rot[i],\n to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()\n targetz = (self.goal_pos[i] + quat_apply(self.goal_rot[i],\n to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()\n\n p0 = self.goal_pos[i].cpu().numpy() + self.goal_displacement_tensor.cpu().numpy()\n self.gym.add_lines(self.viewer, self.envs[i], 1,\n [p0[0], p0[1], p0[2], targetx[0], targetx[1], targetx[2]], [0.85, 0.1, 0.1])\n self.gym.add_lines(self.viewer, self.envs[i], 1,\n [p0[0], p0[1], p0[2], targety[0], targety[1], targety[2]], [0.1, 0.85, 0.1])\n self.gym.add_lines(self.viewer, self.envs[i], 1,\n [p0[0], p0[1], p0[2], targetz[0], targetz[1], targetz[2]], [0.1, 0.1, 0.85])\n\n objectx = (self.object_pos[i] + quat_apply(self.object_rot[i],\n to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()\n objecty = (self.object_pos[i] + quat_apply(self.object_rot[i],\n to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()\n objectz = (self.object_pos[i] + quat_apply(self.object_rot[i],\n to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()\n\n p0 = self.object_pos[i].cpu().numpy()\n self.gym.add_lines(self.viewer, self.envs[i], 1,\n [p0[0], p0[1], p0[2], objectx[0], objectx[1], objectx[2]], [0.85, 0.1, 0.1])\n self.gym.add_lines(self.viewer, self.envs[i], 1,\n [p0[0], p0[1], p0[2], objecty[0], objecty[1], objecty[2]], [0.1, 0.85, 0.1])\n self.gym.add_lines(self.viewer, self.envs[i], 1,\n [p0[0], p0[1], p0[2], objectz[0], objectz[1], objectz[2]], [0.1, 0.1, 0.85])" }, { "identifier": "chunker_list", "path": "dexenv/utils/common.py", "snippet": "def chunker_list(seq_list, nchunks):\n # split the list into n parts/chunks\n return [seq_list[i::nchunks] for i in range(nchunks)]" }, { "identifier": "get_all_files_with_name", "path": "dexenv/utils/common.py", "snippet": "def get_all_files_with_name(directory, name,\n exclude_patterns=None,\n include_patterns=None,\n sort=True,\n ):\n directory = pathlib_file(directory)\n files = directory.glob(f'**/{name}')\n files = [x for x in files if x.is_file() and x.name == name]\n if exclude_patterns is not None:\n files = filter_with_exclude_patterns(files, exclude_patterns)\n if include_patterns is not None:\n files = filter_with_include_patterns(files, include_patterns)\n if sort:\n files = sorted(files)\n return files" }, { "identifier": "load_from_pickle", "path": "dexenv/utils/common.py", "snippet": "def load_from_pickle(file_name):\n file_name = pathlib_file(file_name)\n with file_name.open('rb') as f:\n data = pkl.load(f)\n return data" }, { "identifier": "load_a_goal_object_asset", "path": "dexenv/utils/isaac_utils.py", "snippet": "@torch.no_grad()\ndef load_a_goal_object_asset(gym, sim, asset_root, object_urdf, asset_options=None, vhacd=True):\n if asset_options is None:\n asset_options = gymapi.AssetOptions()\n if vhacd:\n asset_options.convex_decomposition_from_submeshes = True\n asset_options.thickness = 0.001\n asset_options.disable_gravity = True\n asset_options.override_inertia = True\n # asset_options.override_com = True\n\n rela_file = object_urdf.relative_to(asset_root).as_posix()\n obj_asset = gym.load_asset(sim,\n asset_root.as_posix(),\n rela_file,\n asset_options)\n return obj_asset" }, { "identifier": "load_an_object_asset", "path": "dexenv/utils/isaac_utils.py", "snippet": "@torch.no_grad()\ndef load_an_object_asset(gym, sim, asset_root, object_urdf, asset_options=None, vhacd=True):\n if asset_options is None:\n asset_options = gymapi.AssetOptions()\n asset_options.thickness = 0.001\n asset_options.override_inertia = True\n # asset_options.override_com = True\n if vhacd:\n asset_options.convex_decomposition_from_submeshes = True\n rela_file = object_urdf.relative_to(asset_root).as_posix()\n obj_asset = gym.load_asset(sim,\n asset_root.as_posix(),\n rela_file,\n asset_options)\n return obj_asset" }, { "identifier": "load_obj_texture", "path": "dexenv/utils/isaac_utils.py", "snippet": "@torch.no_grad()\ndef load_obj_texture(gym, sim, object_urdf):\n texture_files = get_all_files_with_suffix(object_urdf.parent, 'png')\n num_textures = len(texture_files)\n if num_textures > 1:\n logger.warning(f'Multiple image files exist, will use the first image as the texture!')\n elif num_textures == 0:\n raise RuntimeError(f'No texture file is found!')\n texture_file = texture_files[0]\n texture_handle = gym.create_texture_from_file(sim,\n texture_file.as_posix(),\n )\n return texture_handle" } ]
import numpy as np import torch import dexenv from gym.utils import seeding from isaacgym import gymapi from loguru import logger from tqdm import tqdm from dexenv.envs.dclaw_base import DClawBase from dexenv.utils.common import chunker_list from dexenv.utils.common import get_all_files_with_name from dexenv.utils.common import load_from_pickle from dexenv.utils.isaac_utils import load_a_goal_object_asset from dexenv.utils.isaac_utils import load_an_object_asset from dexenv.utils.isaac_utils import load_obj_texture
14,864
self.gym.create_asset_force_sensor(dclaw_asset, ft_handle, sensor_pose) dclaw_start_pose = self.get_dclaw_start_pose() object_start_pose = self.get_object_start_pose(dclaw_start_pose) goal_start_pose = self.get_goal_object_start_pose(object_start_pose=object_start_pose) self.dclaws = [] self.envs = [] self.object_init_state = [] self.hand_start_states = [] self.hand_indices = [] self.fingertip_indices = [] self.object_indices = [] self.object_cat_indices = [] self.goal_object_indices = [] self.render_camera_handles = [] if self.cfg.rgb_render: render_cam_pose, render_cam_params = self.get_visual_render_camera_setup() self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in self.fingertips] dclaw_rb_count = self.gym.get_asset_rigid_body_count(dclaw_asset) object_rb_count = self.gym.get_asset_rigid_body_count(object_assets[0]) self.object_rb_handles = list(range(dclaw_rb_count, dclaw_rb_count + object_rb_count)) self.object_handles = [] num_object_assets = len(object_assets) env_obj_ids = [] for i in range(self.num_envs): # create env instance obj_asset_id = i % num_object_assets env_obj_ids.append(object_ids[obj_asset_id]) env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) if self.aggregate_mode >= 1: # compute aggregate size obj_num_bodies = self.gym.get_asset_rigid_body_count(object_assets[obj_asset_id]) obj_num_shapes = self.gym.get_asset_rigid_shape_count(object_assets[obj_asset_id]) max_agg_bodies = self.num_dclaw_bodies + obj_num_bodies * 2 + 1 max_agg_shapes = self.num_dclaw_shapes + obj_num_shapes * 2 + 1 self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) self.create_hand_actor(env_ptr=env_ptr, dclaw_asset=dclaw_asset, dclaw_start_pose=dclaw_start_pose, dclaw_dof_props=dclaw_dof_props, env_id=i) # add object object_handle = self.gym.create_actor(env_ptr, object_assets[obj_asset_id], object_start_pose, "object", i, 0, 1) self.object_handles.append(object_handle) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) self.object_cat_indices.append(object_cat_ids[obj_asset_id]) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_assets[obj_asset_id], goal_start_pose, "goal_object", i + self.num_envs, 0, 2) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.cfg.obj.load_texture: self.gym.set_rigid_body_texture(env_ptr, object_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) self.gym.set_rigid_body_texture(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) else: color = np.array([179, 193, 134]) / 255.0 self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0) self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(180 / 255., 180 / 255., 180 / 255.)) if self.cfg.rgb_render: render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params) self.render_camera_handles.append(render_camera_handle[0]) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.setup_torch_states() self.env_obj_ids = torch.LongTensor(env_obj_ids).to(self.device).view(-1, 1) self.object_cat_indices = torch.LongTensor(self.object_cat_indices).to(self.device).view(-1, 1) def parse_obj_dataset(self, dataset): asset_root = dexenv.LIB_PATH.joinpath('assets') split_dataset_name = dataset.split(':') if len(split_dataset_name) == 1: dataset_path = asset_root.joinpath(dataset, 'train') else: target_object = split_dataset_name[1] dataset_path = asset_root.joinpath(split_dataset_name[0], 'train', target_object) logger.warning(f'Dataset path:{dataset_path}')
class DclawMultiObjs(DClawBase): def __init__(self, cfg, sim_device, rl_device, graphics_device_id): self.set_random_gen() self.object_urdfs, self.dataset_path, self.obj_name_to_cat_id = self.parse_obj_dataset(cfg.obj.dataset) self.num_objects = len(self.object_urdfs) logger.info(f'Object urdf root path:{self.dataset_path}.') logger.info(f'Number of available objects:{self.num_objects}.') super().__init__(cfg=cfg, sim_device=sim_device, rl_device=rl_device, graphics_device_id=graphics_device_id) def set_random_gen(self, seed=12345): self.np_random, seed = seeding.np_random(seed) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw').as_posix() dclaw_asset, dclaw_dof_props = self.get_dclaw_asset(asset_root=asset_root) # load manipulated object and goal assets table_asset = self.get_table_asset() table_pose = self.get_table_pose() object_assets, goal_assets, object_ids, object_textures, object_ptds, object_cat_ids = self.load_object_asset() # create fingertip force sensors, if needed if self.obs_type == "full_state": sensor_pose = gymapi.Transform() for ft_handle in self.fingertip_handles: self.gym.create_asset_force_sensor(dclaw_asset, ft_handle, sensor_pose) dclaw_start_pose = self.get_dclaw_start_pose() object_start_pose = self.get_object_start_pose(dclaw_start_pose) goal_start_pose = self.get_goal_object_start_pose(object_start_pose=object_start_pose) self.dclaws = [] self.envs = [] self.object_init_state = [] self.hand_start_states = [] self.hand_indices = [] self.fingertip_indices = [] self.object_indices = [] self.object_cat_indices = [] self.goal_object_indices = [] self.render_camera_handles = [] if self.cfg.rgb_render: render_cam_pose, render_cam_params = self.get_visual_render_camera_setup() self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in self.fingertips] dclaw_rb_count = self.gym.get_asset_rigid_body_count(dclaw_asset) object_rb_count = self.gym.get_asset_rigid_body_count(object_assets[0]) self.object_rb_handles = list(range(dclaw_rb_count, dclaw_rb_count + object_rb_count)) self.object_handles = [] num_object_assets = len(object_assets) env_obj_ids = [] for i in range(self.num_envs): # create env instance obj_asset_id = i % num_object_assets env_obj_ids.append(object_ids[obj_asset_id]) env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) if self.aggregate_mode >= 1: # compute aggregate size obj_num_bodies = self.gym.get_asset_rigid_body_count(object_assets[obj_asset_id]) obj_num_shapes = self.gym.get_asset_rigid_shape_count(object_assets[obj_asset_id]) max_agg_bodies = self.num_dclaw_bodies + obj_num_bodies * 2 + 1 max_agg_shapes = self.num_dclaw_shapes + obj_num_shapes * 2 + 1 self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) self.create_hand_actor(env_ptr=env_ptr, dclaw_asset=dclaw_asset, dclaw_start_pose=dclaw_start_pose, dclaw_dof_props=dclaw_dof_props, env_id=i) # add object object_handle = self.gym.create_actor(env_ptr, object_assets[obj_asset_id], object_start_pose, "object", i, 0, 1) self.object_handles.append(object_handle) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) self.object_cat_indices.append(object_cat_ids[obj_asset_id]) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_assets[obj_asset_id], goal_start_pose, "goal_object", i + self.num_envs, 0, 2) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.cfg.obj.load_texture: self.gym.set_rigid_body_texture(env_ptr, object_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) self.gym.set_rigid_body_texture(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) else: color = np.array([179, 193, 134]) / 255.0 self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0) self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(180 / 255., 180 / 255., 180 / 255.)) if self.cfg.rgb_render: render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params) self.render_camera_handles.append(render_camera_handle[0]) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.setup_torch_states() self.env_obj_ids = torch.LongTensor(env_obj_ids).to(self.device).view(-1, 1) self.object_cat_indices = torch.LongTensor(self.object_cat_indices).to(self.device).view(-1, 1) def parse_obj_dataset(self, dataset): asset_root = dexenv.LIB_PATH.joinpath('assets') split_dataset_name = dataset.split(':') if len(split_dataset_name) == 1: dataset_path = asset_root.joinpath(dataset, 'train') else: target_object = split_dataset_name[1] dataset_path = asset_root.joinpath(split_dataset_name[0], 'train', target_object) logger.warning(f'Dataset path:{dataset_path}')
urdf_files = get_all_files_with_name(dataset_path, name='model.urdf')
2
2023-10-25 17:22:41+00:00
24k
ai-safety-foundation/sparse_autoencoder
sparse_autoencoder/activation_resampler/tests/test_activation_resampler.py
[ { "identifier": "ActivationResampler", "path": "sparse_autoencoder/activation_resampler/activation_resampler.py", "snippet": "class ActivationResampler:\n \"\"\"Activation resampler.\n\n Collates the number of times each neuron fires over a set number of learned activation vectors,\n and then provides the parameters necessary to reset any dead neurons.\n\n Motivation:\n Over the course of training, a subset of autoencoder neurons will have zero activity across\n a large number of datapoints. The authors of *Towards Monosemanticity: Decomposing Language\n Models With Dictionary Learning* found that “resampling” these dead neurons during training\n improves the number of likely-interpretable features (i.e., those in the high density\n cluster) and reduces total loss. This resampling may be compatible with the Lottery Ticket\n Hypothesis and increase the number of chances the network has to find promising feature\n directions.\n\n An interesting nuance around dead neurons involves the ultralow density cluster. They found\n that if we increase the number of training steps then networks will kill off more of these\n ultralow density neurons. This reinforces the use of the high density cluster as a useful\n metric because there can exist neurons that are de facto dead but will not appear to be when\n looking at the number of dead neurons alone.\n\n This approach is designed to seed new features to fit inputs where the current autoencoder\n performs worst. Resetting the encoder norm and bias are crucial to ensuring this resampled\n neuron will only fire weakly for inputs similar to the one used for its reinitialization.\n This was done to minimize interference with the rest of the network.\n\n Warning:\n The optimizer should be reset after applying this function, as the Adam state will be\n incorrect for the modified weights and biases.\n\n Warning:\n This approach is also known to create sudden loss spikes, and resampling too frequently\n causes training to diverge.\n \"\"\"\n\n _activations_seen_since_last_resample: int = 0\n \"\"\"Number of activations since we last resampled.\"\"\"\n\n _collated_neuron_activity: Float[Tensor, Axis.names(Axis.COMPONENT, Axis.LEARNT_FEATURE)]\n \"\"\"Collated neuron activity, over the current data collection window.\"\"\"\n\n _threshold_is_dead_portion_fires: float\n \"\"\"Threshold for determining if a neuron has fired (or is dead).\"\"\"\n\n _max_n_resamples: int\n \"\"\"Maximum number of times that resampling should be performed.\"\"\"\n\n _n_activations_collated_since_last_resample: int = 0\n \"\"\"Number of activations collated since we last resampled.\n\n Number of vectors used to collate neuron activity, over the current collation window.\n \"\"\"\n\n _n_components: int\n \"\"\"Number of components.\"\"\"\n\n _n_times_resampled: int = 0\n \"\"\"Number of times that resampling has been performed.\"\"\"\n\n neuron_activity_window_end: int\n \"\"\"End of the window for collecting neuron activity.\"\"\"\n\n neuron_activity_window_start: int\n \"\"\"Start of the window for collecting neuron activity.\"\"\"\n\n @validate_call\n def __init__(\n self,\n n_learned_features: PositiveInt,\n n_components: NonNegativeInt = 1,\n resample_interval: PositiveInt = 200_000_000,\n max_n_resamples: NonNegativeInt = 4,\n n_activations_activity_collate: PositiveInt = 100_000_000,\n resample_dataset_size: PositiveInt = 819_200,\n threshold_is_dead_portion_fires: Annotated[float, Field(strict=True, ge=0, le=1)] = 0.0,\n ) -> None:\n r\"\"\"Initialize the activation resampler.\n\n Defaults to values used in the Anthropic Towards Monosemanticity paper.\n\n Args:\n n_learned_features: Number of learned features\n n_components: Number of components that the SAE is being trained on.\n resample_interval: Interval in number of autoencoder input activation vectors trained\n on, before resampling.\n max_n_resamples: Maximum number of resamples to perform throughout the entire pipeline.\n Set to inf if you want to have no limit.\n n_activations_activity_collate: Number of autoencoder learned activation vectors to\n collate before resampling (the activation resampler will start collecting on vector\n $\\text{resample_interval} - \\text{n_steps_collate}$).\n resample_dataset_size: Number of autoencoder input activations to use for calculating\n the loss, as part of the resampling process to create the reset neuron weights.\n threshold_is_dead_portion_fires: Threshold for determining if a neuron is dead (has\n \"fired\" in less than this portion of the collated sample).\n\n Raises:\n ValueError: If any of the arguments are invalid (e.g. negative integers).\n \"\"\"\n if n_activations_activity_collate > resample_interval:\n error_message = (\n \"Number of steps to collate must be less than or equal to the resample interval.\"\n )\n raise ValueError(error_message)\n\n super().__init__()\n self.neuron_activity_window_end = resample_interval\n self.neuron_activity_window_start = resample_interval - n_activations_activity_collate\n self._max_n_resamples = max_n_resamples\n self._collated_neuron_activity = torch.zeros(\n (n_components, n_learned_features), dtype=torch.int64\n )\n self._resample_dataset_size = resample_dataset_size\n self._threshold_is_dead_portion_fires = threshold_is_dead_portion_fires\n self._n_components = n_components\n\n def _get_dead_neuron_indices(\n self,\n ) -> list[Int64[Tensor, Axis.names(Axis.LEARNT_FEATURE_IDX)]]:\n \"\"\"Identify the indices of neurons that are dead.\n\n Identifies any neurons that have fired less than the threshold portion of the collated\n sample size.\n\n Example:\n >>> resampler = ActivationResampler(n_learned_features=6, n_components=2)\n >>> resampler._collated_neuron_activity = torch.tensor(\n ... [[1, 1, 0, 0, 1, 1], [1, 1, 1, 1, 1, 0]]\n ... )\n >>> resampler._get_dead_neuron_indices()\n [tensor([2, 3]), tensor([5])]\n\n Returns:\n List of dead neuron indices for each component.\n\n Raises:\n ValueError: If no neuron activity has been collated yet.\n \"\"\"\n # Check we have already collated some neuron activity\n if torch.all(self._collated_neuron_activity == 0):\n error_message = \"Cannot get dead neuron indices without neuron activity.\"\n raise ValueError(error_message)\n\n # Find any neurons that fire less than the threshold portion of times\n threshold_is_dead_n_fires: int = int(\n self._n_activations_collated_since_last_resample * self._threshold_is_dead_portion_fires\n )\n\n return [\n torch.where(self._collated_neuron_activity[component_idx] <= threshold_is_dead_n_fires)[\n 0\n ].to(dtype=torch.int64)\n for component_idx in range(self._n_components)\n ]\n\n def compute_loss_and_get_activations(\n self,\n store: ActivationStore,\n autoencoder: SparseAutoencoder | DataParallel[SparseAutoencoder] | DeepSpeedEngine,\n loss_fn: AbstractLoss,\n train_batch_size: int,\n ) -> LossInputActivationsTuple:\n \"\"\"Compute the loss on a random subset of inputs.\n\n Motivation:\n Helps find input vectors that have high SAE loss, so that we can resample dead neurons\n in a way that improves performance on these specific input vectors.\n\n Args:\n store: Activation store.\n autoencoder: Sparse autoencoder model.\n loss_fn: Loss function.\n train_batch_size: Train batch size (also used for resampling).\n\n Returns:\n A tuple of loss per item, and all input activations.\n\n Raises:\n ValueError: If the number of items in the store is less than the number of inputs\n \"\"\"\n with torch.no_grad():\n loss_batches: list[Float[Tensor, Axis.BATCH]] = []\n input_activations_batches: list[\n Float[Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)]\n ] = []\n dataloader = DataLoader(store, batch_size=train_batch_size)\n n_inputs = self._resample_dataset_size\n n_batches_required: int = n_inputs // train_batch_size\n model_device: torch.device = get_model_device(autoencoder)\n\n for batch_idx, batch in enumerate(iter(dataloader)):\n input_activations_batches.append(batch)\n source_activations = batch.to(model_device)\n learned_activations, reconstructed_activations = autoencoder(source_activations)\n loss_batches.append(\n loss_fn.forward(\n source_activations, learned_activations, reconstructed_activations\n )\n )\n if batch_idx >= n_batches_required:\n break\n\n loss_per_item = torch.cat(loss_batches).to(model_device)\n input_activations = torch.cat(input_activations_batches).to(model_device)\n\n # Check we generated enough data\n if len(loss_per_item) < n_inputs:\n error_message = (\n f\"Cannot get {n_inputs} items from the store, \"\n f\"as only {len(loss_per_item)} were available.\"\n )\n raise ValueError(error_message)\n\n return LossInputActivationsTuple(loss_per_item, input_activations)\n\n @staticmethod\n def assign_sampling_probabilities(\n loss: Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)],\n ) -> Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)]:\n \"\"\"Assign the sampling probabilities for each input activations vector.\n\n Assign each input vector a probability of being picked that is proportional to the square of\n the autoencoder's loss on that input.\n\n Examples:\n >>> loss = torch.tensor([1.0, 2.0, 3.0])\n >>> ActivationResampler.assign_sampling_probabilities(loss).round(decimals=2)\n tensor([0.0700, 0.2900, 0.6400])\n\n >>> loss = torch.tensor([[1.0, 2], [2, 4], [3, 6]])\n >>> ActivationResampler.assign_sampling_probabilities(loss).round(decimals=2)\n tensor([[0.0700, 0.0700],\n [0.2900, 0.2900],\n [0.6400, 0.6400]])\n\n Args:\n loss: Loss per item.\n\n Returns:\n A tensor of probabilities for each item.\n \"\"\"\n square_loss = loss.pow(2)\n return square_loss / square_loss.sum(0)\n\n @staticmethod\n def sample_input(\n probabilities: Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)],\n input_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n n_samples: list[int],\n ) -> list[Float[Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)]]:\n \"\"\"Sample an input vector based on the provided probabilities.\n\n Example:\n >>> probabilities = torch.tensor([[0.1], [0.2], [0.7]])\n >>> input_activations = torch.tensor([[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]])\n >>> _seed = torch.manual_seed(0) # For reproducibility in example\n >>> sampled_input = ActivationResampler.sample_input(\n ... probabilities, input_activations, [2]\n ... )\n >>> sampled_input[0].tolist()\n [[5.0, 6.0], [3.0, 4.0]]\n\n Args:\n probabilities: Probabilities for each input.\n input_activations: Input activation vectors.\n n_samples: Number of samples to take (number of dead neurons).\n\n Returns:\n Sampled input activation vector.\n\n Raises:\n ValueError: If the number of samples is greater than the number of input activations.\n \"\"\"\n sampled_inputs: list[\n Float[Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)]\n ] = []\n\n for component_idx, component_n_samples in enumerate(n_samples):\n component_probabilities: Float[Tensor, Axis.BATCH] = get_component_slice_tensor(\n input_tensor=probabilities,\n n_dim_with_component=2,\n component_dim=1,\n component_idx=component_idx,\n )\n\n component_input_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)\n ] = get_component_slice_tensor(\n input_tensor=input_activations,\n n_dim_with_component=3,\n component_dim=1,\n component_idx=component_idx,\n )\n\n if component_n_samples > len(component_input_activations):\n exception_message = (\n f\"Cannot sample {component_n_samples} inputs from \"\n f\"{len(component_input_activations)} input activations.\"\n )\n raise ValueError(exception_message)\n\n # Handle the 0 dead neurons case\n if component_n_samples == 0:\n sampled_inputs.append(\n torch.empty(\n (0, component_input_activations.shape[-1]),\n dtype=component_input_activations.dtype,\n device=component_input_activations.device,\n )\n )\n continue\n\n # Handle the 1+ dead neuron case\n component_sample_indices: Int64[Tensor, Axis.LEARNT_FEATURE_IDX] = torch.multinomial(\n component_probabilities, num_samples=component_n_samples\n )\n sampled_inputs.append(component_input_activations[component_sample_indices, :])\n\n return sampled_inputs\n\n @staticmethod\n def renormalize_and_scale(\n sampled_input: Float[Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)],\n neuron_activity: Int64[Tensor, Axis.names(Axis.LEARNT_FEATURE)],\n encoder_weight: Float[Tensor, Axis.names(Axis.LEARNT_FEATURE, Axis.INPUT_OUTPUT_FEATURE)],\n ) -> Float[Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)]:\n \"\"\"Renormalize and scale the resampled dictionary vectors.\n\n Renormalize the input vector to equal the average norm of the encoder weights for alive\n neurons times 0.2.\n\n Example:\n >>> from torch.nn import Parameter\n >>> _seed = torch.manual_seed(0) # For reproducibility in example\n >>> sampled_input = torch.tensor([[3.0, 4.0]])\n >>> neuron_activity = torch.tensor([3, 0, 5, 0, 1, 3])\n >>> encoder_weight = Parameter(torch.ones((6, 2)))\n >>> rescaled_input = ActivationResampler.renormalize_and_scale(\n ... sampled_input,\n ... neuron_activity,\n ... encoder_weight\n ... )\n >>> rescaled_input.round(decimals=1)\n tensor([[0.2000, 0.2000]])\n\n Args:\n sampled_input: Tensor of the sampled input activation.\n neuron_activity: Tensor representing the number of times each neuron fired.\n encoder_weight: Tensor of encoder weights.\n\n Returns:\n Rescaled sampled input.\n\n Raises:\n ValueError: If there are no alive neurons.\n \"\"\"\n alive_neuron_mask: Bool[Tensor, \" learned_features\"] = neuron_activity > 0\n\n # Check there is at least one alive neuron\n if not torch.any(alive_neuron_mask):\n error_message = \"No alive neurons found.\"\n raise ValueError(error_message)\n\n # Handle no dead neurons\n n_dead_neurons = len(sampled_input)\n if n_dead_neurons == 0:\n return torch.empty(\n (0, sampled_input.shape[-1]), dtype=sampled_input.dtype, device=sampled_input.device\n )\n\n # Calculate the average norm of the encoder weights for alive neurons.\n detached_encoder_weight = encoder_weight.detach() # Don't track gradients\n alive_encoder_weights: Float[\n Tensor, Axis.names(Axis.ALIVE_FEATURE, Axis.INPUT_OUTPUT_FEATURE)\n ] = detached_encoder_weight[alive_neuron_mask, :]\n average_alive_norm: Float[Tensor, Axis.SINGLE_ITEM] = alive_encoder_weights.norm(\n dim=-1\n ).mean()\n\n # Renormalize the input vector to equal the average norm of the encoder weights for alive\n # neurons times 0.2.\n renormalized_input: Float[\n Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)\n ] = torch.nn.functional.normalize(sampled_input, dim=-1)\n return renormalized_input * (average_alive_norm * 0.2)\n\n def resample_dead_neurons(\n self,\n activation_store: ActivationStore,\n autoencoder: SparseAutoencoder | DataParallel[SparseAutoencoder] | DeepSpeedEngine,\n loss_fn: AbstractLoss,\n train_batch_size: int,\n ) -> list[ParameterUpdateResults]:\n \"\"\"Resample dead neurons.\n\n Args:\n activation_store: Activation store.\n autoencoder: Sparse autoencoder model.\n loss_fn: Loss function.\n train_batch_size: Train batch size (also used for resampling).\n\n Returns:\n For each component that the SAE is being trained on, the indices of dead neurons and the\n updates for the encoder and decoder weights and biases.\n \"\"\"\n parameter_update_results: list[ParameterUpdateResults] = []\n\n with torch.no_grad():\n dead_neuron_indices: list[\n Int64[Tensor, Axis.names(Axis.LEARNT_FEATURE_IDX)]\n ] = self._get_dead_neuron_indices()\n\n # Compute the loss for the current model on a random subset of inputs and get the\n # activations.\n loss_per_item, input_activations = self.compute_loss_and_get_activations(\n store=activation_store,\n autoencoder=autoencoder,\n loss_fn=loss_fn,\n train_batch_size=train_batch_size,\n )\n\n # Assign each input vector a probability of being picked that is proportional to the\n # square of the autoencoder's loss on that input.\n sample_probabilities: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)\n ] = self.assign_sampling_probabilities(loss_per_item)\n\n # For each dead neuron sample an input according to these probabilities.\n sampled_input: list[\n Float[Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)]\n ] = self.sample_input(\n sample_probabilities, input_activations, [len(dead) for dead in dead_neuron_indices]\n )\n\n for component_idx in range(self._n_components):\n # Renormalize each input vector to have unit L2 norm and set this to be the\n # dictionary vector for the dead autoencoder neuron.\n renormalized_input: Float[\n Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)\n ] = torch.nn.functional.normalize(sampled_input[component_idx], dim=-1)\n\n dead_decoder_weight_updates = rearrange(\n renormalized_input, \"dead_neuron input_feature -> input_feature dead_neuron\"\n )\n\n # For the corresponding encoder vector, renormalize the input vector to equal the\n # average norm of the encoder weights for alive neurons times 0.2. Set the\n # corresponding encoder bias element to zero.\n encoder_weight: Float[\n Tensor, Axis.names(Axis.LEARNT_FEATURE, Axis.INPUT_OUTPUT_FEATURE)\n ] = get_component_slice_tensor(autoencoder.encoder.weight, 3, 0, component_idx)\n\n rescaled_sampled_input = self.renormalize_and_scale(\n sampled_input=sampled_input[component_idx],\n neuron_activity=self._collated_neuron_activity[component_idx],\n encoder_weight=encoder_weight,\n )\n\n dead_encoder_bias_updates = torch.zeros_like(\n dead_neuron_indices[component_idx],\n dtype=dead_decoder_weight_updates.dtype,\n device=dead_decoder_weight_updates.device,\n )\n\n parameter_update_results.append(\n ParameterUpdateResults(\n dead_neuron_indices=dead_neuron_indices[component_idx],\n dead_encoder_weight_updates=rescaled_sampled_input,\n dead_encoder_bias_updates=dead_encoder_bias_updates,\n dead_decoder_weight_updates=dead_decoder_weight_updates,\n )\n )\n\n return parameter_update_results\n\n def step_resampler(\n self,\n batch_neuron_activity: Int64[Tensor, Axis.names(Axis.COMPONENT, Axis.LEARNT_FEATURE)],\n activation_store: ActivationStore,\n autoencoder: SparseAutoencoder | DataParallel[SparseAutoencoder] | DeepSpeedEngine,\n loss_fn: AbstractLoss,\n train_batch_size: int,\n ) -> list[ParameterUpdateResults] | None:\n \"\"\"Step the resampler, collating neuron activity and resampling if necessary.\n\n Args:\n batch_neuron_activity: Number of times each neuron fired in the current batch.\n activation_store: Activation store.\n autoencoder: Sparse autoencoder model.\n loss_fn: Loss function.\n train_batch_size: Train batch size (also used for resampling).\n\n Returns:\n Parameter update results (for each component that the SAE is being trained on) if\n resampling is due. Otherwise None.\n \"\"\"\n # Update the counter\n self._activations_seen_since_last_resample += len(activation_store)\n\n if self._n_times_resampled < self._max_n_resamples:\n # Collate neuron activity, if in the data collection window. For example in the\n # Anthropic Towards Monosemanticity paper, the window started collecting at 100m\n # activations and stopped at 200m (and then repeated this again a few times until the\n # max times to resample was hit).\n if self._activations_seen_since_last_resample >= self.neuron_activity_window_start:\n detached_neuron_activity = batch_neuron_activity.detach().cpu()\n self._collated_neuron_activity.add_(detached_neuron_activity)\n self._n_activations_collated_since_last_resample += train_batch_size\n\n # Check if we should resample.\n if self._activations_seen_since_last_resample >= self.neuron_activity_window_end:\n # Get resampled dictionary vectors\n resample_res = self.resample_dead_neurons(\n activation_store=activation_store,\n autoencoder=autoencoder,\n loss_fn=loss_fn,\n train_batch_size=train_batch_size,\n )\n\n # Update counters\n self._activations_seen_since_last_resample = 0\n self._n_activations_collated_since_last_resample = 0\n self._n_times_resampled += 1\n\n # Reset the collated neuron activity\n self._collated_neuron_activity.zero_()\n\n return resample_res\n\n return None\n\n def __str__(self) -> str:\n \"\"\"Return a string representation of the activation resampler.\"\"\"\n return (\n f\"ActivationResampler(\"\n f\"n_components={self._n_components}, \"\n f\"neuron_activity_window_start={self.neuron_activity_window_end}, \"\n f\"neuron_activity_window_end={self.neuron_activity_window_end}, \"\n f\"max_resamples={self._max_n_resamples}, \"\n f\"resample_dataset_size={self._resample_dataset_size}, \"\n f\"dead_neuron_threshold={self._threshold_is_dead_portion_fires})\"\n )" }, { "identifier": "ActivationStore", "path": "sparse_autoencoder/activation_store/base_store.py", "snippet": "class ActivationStore(\n Dataset[Float[Tensor, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)]], ABC\n):\n \"\"\"Activation Store Abstract Class.\n\n Extends the `torch.utils.data.Dataset` class to provide an activation store, with additional\n :meth:`append` and :meth:`extend` methods (the latter of which should typically be\n non-blocking). The resulting activation store can be used with a `torch.utils.data.DataLoader`\n to iterate over the dataset.\n\n Extend this class if you want to create a new activation store (noting you also need to create\n `__getitem__` and `__len__` methods from the underlying `torch.utils.data.Dataset` class).\n\n Example:\n >>> import torch\n >>> class MyActivationStore(ActivationStore):\n ...\n ... @property\n ... def current_activations_stored_per_component(self):\n ... raise NotImplementedError\n ...\n ... @property\n ... def n_components(self):\n ... raise NotImplementedError\n ...\n ... def __init__(self):\n ... super().__init__()\n ... self._data = [] # In this example, we just store in a list\n ...\n ... def append(self, item) -> None:\n ... self._data.append(item)\n ...\n ... def extend(self, batch):\n ... self._data.extend(batch)\n ...\n ... def empty(self):\n ... self._data = []\n ...\n ... def __getitem__(self, index: int):\n ... return self._data[index]\n ...\n ... def __len__(self) -> int:\n ... return len(self._data)\n ...\n >>> store = MyActivationStore()\n >>> store.append(torch.randn(100))\n >>> print(len(store))\n 1\n \"\"\"\n\n @abstractmethod\n def append(\n self,\n item: Float[Tensor, Axis.names(Axis.INPUT_OUTPUT_FEATURE)],\n component_idx: int,\n ) -> Future | None:\n \"\"\"Add a Single Item to the Store.\"\"\"\n\n @abstractmethod\n def extend(\n self,\n batch: Float[Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)],\n component_idx: int,\n ) -> Future | None:\n \"\"\"Add a Batch to the Store.\"\"\"\n\n @abstractmethod\n def empty(self) -> None:\n \"\"\"Empty the Store.\"\"\"\n\n @property\n @abstractmethod\n def n_components(self) -> int:\n \"\"\"Number of components.\"\"\"\n\n @property\n @abstractmethod\n def current_activations_stored_per_component(self) -> list[int]:\n \"\"\"Current activations stored per component.\"\"\"\n\n @abstractmethod\n def __len__(self) -> int:\n \"\"\"Get the Length of the Store.\"\"\"\n\n @abstractmethod\n def __getitem__(\n self, index: tuple[int, ...] | slice | int\n ) -> Float[Tensor, Axis.names(Axis.ANY)]:\n \"\"\"Get an Item from the Store.\"\"\"\n\n def shuffle(self) -> None:\n \"\"\"Optional shuffle method.\"\"\"\n\n @final\n @validate_call\n def fill_with_test_data(\n self,\n n_batches: PositiveInt = 1,\n batch_size: PositiveInt = 16,\n n_components: PositiveInt = 1,\n input_features: PositiveInt = 256,\n ) -> None:\n \"\"\"Fill the store with test data.\n\n For use when testing your code, to ensure it works with a real activation store.\n\n Warning:\n You may want to use `torch.seed(0)` to make the random data deterministic, if your test\n requires inspecting the data itself.\n\n Example:\n >>> from sparse_autoencoder.activation_store.tensor_store import TensorActivationStore\n >>> store = TensorActivationStore(max_items=100, n_neurons=256, n_components=1)\n >>> store.fill_with_test_data(batch_size=100)\n >>> len(store)\n 100\n\n Args:\n n_batches: Number of batches to fill the store with.\n batch_size: Number of items per batch.\n n_components: Number of source model components the SAE is trained on.\n input_features: Number of input features per item.\n \"\"\"\n for _ in range(n_batches):\n for component_idx in range(n_components):\n sample = torch.rand(batch_size, input_features)\n self.extend(sample, component_idx)" }, { "identifier": "TensorActivationStore", "path": "sparse_autoencoder/activation_store/tensor_store.py", "snippet": "class TensorActivationStore(ActivationStore):\n \"\"\"Tensor Activation Store.\n\n Stores tensors in a (large) tensor of shape (item, neuron). Requires the number of activation\n vectors to be stored to be known in advance. Multiprocess safe.\n\n Extends the `torch.utils.data.Dataset` class to provide a list-based activation store, with\n additional :meth:`append` and :meth:`extend` methods (the latter of which is non-blocking).\n\n Examples:\n Create an empty activation dataset:\n\n >>> import torch\n >>> store = TensorActivationStore(max_items=1000, n_neurons=100, n_components=2)\n\n Add a single activation vector to the dataset (for a component):\n\n >>> store.append(torch.randn(100), component_idx=0)\n >>> store.append(torch.randn(100), component_idx=1)\n >>> len(store)\n 1\n\n Add a [batch, neurons] activation tensor to the dataset:\n\n >>> store.empty()\n >>> batch = torch.randn(10, 100)\n >>> store.extend(batch, component_idx=0)\n >>> store.extend(batch, component_idx=1)\n >>> len(store)\n 10\n\n Shuffle the dataset **before passing it to the DataLoader**:\n\n >>> store.shuffle() # Faster than using the DataLoader shuffle argument\n\n Use the dataloader to iterate over the dataset:\n\n >>> loader = torch.utils.data.DataLoader(store, shuffle=False, batch_size=2)\n >>> next_item = next(iter(loader))\n >>> next_item.shape\n torch.Size([2, 2, 100])\n \"\"\"\n\n _data: Float[Tensor, Axis.names(Axis.ITEMS, Axis.COMPONENT, Axis.INPUT_OUTPUT_FEATURE)]\n \"\"\"Underlying Tensor Data Store.\"\"\"\n\n _items_stored: list[int]\n \"\"\"Number of items stored.\"\"\"\n\n max_items: int\n \"\"\"Maximum Number of Items to Store.\"\"\"\n\n _n_components: int\n \"\"\"Number of components\"\"\"\n\n @property\n def n_components(self) -> int:\n \"\"\"Number of components.\"\"\"\n return self._n_components\n\n @property\n def current_activations_stored_per_component(self) -> list[int]:\n \"\"\"Number of activations stored per component.\"\"\"\n return self._items_stored\n\n @validate_call(config={\"arbitrary_types_allowed\": True})\n def __init__(\n self,\n max_items: PositiveInt,\n n_neurons: PositiveInt,\n n_components: PositiveInt,\n device: torch.device | None = None,\n ) -> None:\n \"\"\"Initialise the Tensor Activation Store.\n\n Args:\n max_items: Maximum number of items to store per component (individual activation\n vectors).\n n_neurons: Number of neurons in each activation vector.\n n_components: Number of components to store (i.e. number of source models).\n device: Device to store the activation vectors on.\n \"\"\"\n self._n_components = n_components\n self._items_stored = [0] * n_components\n self._max_items = max_items\n self._data = torch.empty((max_items, n_components, n_neurons), device=device)\n\n def __len__(self) -> int:\n \"\"\"Length Dunder Method.\n\n Returns the number of activation vectors per component in the dataset.\n\n Example:\n >>> import torch\n >>> store = TensorActivationStore(max_items=10_000_000, n_neurons=100, n_components=1)\n >>> store.append(torch.randn(100), component_idx=0)\n >>> store.append(torch.randn(100), component_idx=0)\n >>> len(store)\n 2\n\n Returns:\n The number of activation vectors in the dataset.\n \"\"\"\n # Min as this is the amount of activations that can be fetched by get_item\n return min(self.current_activations_stored_per_component)\n\n def __sizeof__(self) -> int:\n \"\"\"Sizeof Dunder Method.\n\n Example:\n >>> import torch\n >>> store = TensorActivationStore(max_items=2, n_neurons=100, n_components=1)\n >>> store.__sizeof__() # Pre-allocated tensor of 2x100\n 800\n\n Returns:\n The size of the underlying tensor in bytes.\n \"\"\"\n return self._data.element_size() * self._data.nelement()\n\n def __getitem__(\n self, index: tuple[int, ...] | slice | int\n ) -> Float[Tensor, Axis.names(Axis.ANY)]:\n \"\"\"Get Item Dunder Method.\n\n Examples:\n >>> import torch\n >>> store = TensorActivationStore(max_items=2, n_neurons=5, n_components=1)\n >>> store.append(torch.zeros(5), component_idx=0)\n >>> store.append(torch.ones(5), component_idx=0)\n >>> store[1, 0]\n tensor([1., 1., 1., 1., 1.])\n\n Args:\n index: The index of the tensor to fetch.\n\n Returns:\n The activation store item at the given index.\n \"\"\"\n return self._data[index]\n\n def shuffle(self) -> None:\n \"\"\"Shuffle the Data In-Place.\n\n This is much faster than using the shuffle argument on `torch.utils.data.DataLoader`.\n\n Example:\n >>> import torch\n >>> _seed = torch.manual_seed(42)\n >>> store = TensorActivationStore(max_items=10, n_neurons=1, n_components=1)\n >>> store.append(torch.tensor([0.]), component_idx=0)\n >>> store.append(torch.tensor([1.]), component_idx=0)\n >>> store.append(torch.tensor([2.]), component_idx=0)\n >>> store.shuffle()\n >>> [store[i, 0].item() for i in range(3)]\n [0.0, 2.0, 1.0]\n \"\"\"\n # Generate a permutation of the indices for the active data\n perm = torch.randperm(len(self))\n\n # Use this permutation to shuffle the active data in-place\n self._data[: len(self)] = self._data[perm]\n\n def append(self, item: Float[Tensor, Axis.INPUT_OUTPUT_FEATURE], component_idx: int) -> None:\n \"\"\"Add a single item to the store.\n\n Example:\n >>> import torch\n >>> store = TensorActivationStore(max_items=10, n_neurons=5, n_components=1)\n >>> store.append(torch.zeros(5), component_idx=0)\n >>> store.append(torch.ones(5), component_idx=0)\n >>> store[1, 0]\n tensor([1., 1., 1., 1., 1.])\n\n Args:\n item: The item to append to the dataset.\n component_idx: The component index to append the item to.\n\n Raises:\n IndexError: If there is no space remaining.\n \"\"\"\n # Check we have space\n if self._items_stored[component_idx] + 1 > self._max_items:\n raise StoreFullError\n\n self._data[self._items_stored[component_idx], component_idx] = item.to(\n self._data.device,\n )\n self._items_stored[component_idx] += 1\n\n def extend(\n self,\n batch: Float[Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)],\n component_idx: int,\n ) -> None:\n \"\"\"Add a batch to the store.\n\n Examples:\n >>> import torch\n >>> store = TensorActivationStore(max_items=10, n_neurons=5, n_components=1)\n >>> store.extend(torch.zeros(2, 5), component_idx=0)\n >>> len(store)\n 2\n\n Args:\n batch: The batch to append to the dataset.\n component_idx: The component index to append the batch to.\n\n Raises:\n IndexError: If there is no space remaining.\n \"\"\"\n # Check we have space\n n_activation_tensors: int = batch.shape[0]\n if self._items_stored[component_idx] + n_activation_tensors > self._max_items:\n raise StoreFullError\n\n self._data[\n self._items_stored[component_idx] : self._items_stored[component_idx]\n + n_activation_tensors,\n component_idx,\n ] = batch.to(self._data.device)\n self._items_stored[component_idx] += n_activation_tensors\n\n def empty(self) -> None:\n \"\"\"Empty the store.\n\n Example:\n >>> import torch\n >>> store = TensorActivationStore(max_items=10, n_neurons=5, n_components=1)\n >>> store.extend(torch.zeros(2, 5), component_idx=0)\n >>> len(store)\n 2\n >>> store.empty()\n >>> len(store)\n 0\n \"\"\"\n # We don't need to zero the data, just reset the number of items stored\n self._items_stored = [0 for _ in self._items_stored]" }, { "identifier": "SparseAutoencoder", "path": "sparse_autoencoder/autoencoder/model.py", "snippet": "class SparseAutoencoder(Module):\n \"\"\"Sparse Autoencoder Model.\"\"\"\n\n config: SparseAutoencoderConfig\n \"\"\"Model config.\"\"\"\n\n geometric_median_dataset: Float[\n Tensor, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ]\n \"\"\"Estimated Geometric Median of the Dataset.\n\n Used for initialising :attr:`tied_bias`.\n \"\"\"\n\n tied_bias: Float[\n Parameter, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ]\n \"\"\"Tied Bias Parameter.\n\n The same bias is used pre-encoder and post-decoder.\n \"\"\"\n\n pre_encoder_bias: TiedBias\n \"\"\"Pre-Encoder Bias.\"\"\"\n\n encoder: LinearEncoder\n \"\"\"Encoder.\"\"\"\n\n decoder: UnitNormDecoder\n \"\"\"Decoder.\"\"\"\n\n post_decoder_bias: TiedBias\n \"\"\"Post-Decoder Bias.\"\"\"\n\n def __init__(\n self,\n config: SparseAutoencoderConfig,\n geometric_median_dataset: Float[\n Tensor, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ]\n | None = None,\n ) -> None:\n \"\"\"Initialize the Sparse Autoencoder Model.\n\n Args:\n config: Model config.\n geometric_median_dataset: Estimated geometric median of the dataset.\n \"\"\"\n super().__init__()\n\n self.config = config\n\n # Store the geometric median of the dataset (so that we can reset parameters). This is not a\n # parameter itself (the tied bias parameter is used for that), so gradients are disabled.\n tied_bias_shape = shape_with_optional_dimensions(\n config.n_components, config.n_input_features\n )\n if geometric_median_dataset is not None:\n self.geometric_median_dataset = geometric_median_dataset.clone()\n self.geometric_median_dataset.requires_grad = False\n else:\n self.geometric_median_dataset = torch.zeros(tied_bias_shape)\n self.geometric_median_dataset.requires_grad = False\n\n # Initialize the tied bias\n self.tied_bias = Parameter(torch.empty(tied_bias_shape))\n self.initialize_tied_parameters()\n\n # Initialize the components\n self.pre_encoder_bias = TiedBias(self.tied_bias, TiedBiasPosition.PRE_ENCODER)\n\n self.encoder = LinearEncoder(\n input_features=config.n_input_features,\n learnt_features=config.n_learned_features,\n n_components=config.n_components,\n )\n\n self.decoder = UnitNormDecoder(\n learnt_features=config.n_learned_features,\n decoded_features=config.n_input_features,\n n_components=config.n_components,\n )\n\n self.post_decoder_bias = TiedBias(self.tied_bias, TiedBiasPosition.POST_DECODER)\n\n def forward(\n self,\n x: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n ) -> ForwardPassResult:\n \"\"\"Forward Pass.\n\n Args:\n x: Input activations (e.g. activations from an MLP layer in a transformer model).\n\n Returns:\n Tuple of learned activations and decoded activations.\n \"\"\"\n x = self.pre_encoder_bias(x)\n learned_activations = self.encoder(x)\n x = self.decoder(learned_activations)\n decoded_activations = self.post_decoder_bias(x)\n\n return ForwardPassResult(learned_activations, decoded_activations)\n\n def initialize_tied_parameters(self) -> None:\n \"\"\"Initialize the tied parameters.\"\"\"\n # The tied bias is initialised as the geometric median of the dataset\n self.tied_bias.data = self.geometric_median_dataset\n\n def reset_parameters(self) -> None:\n \"\"\"Reset the parameters.\"\"\"\n self.initialize_tied_parameters()\n for module in self.network:\n if \"reset_parameters\" in dir(module):\n module.reset_parameters()\n\n @property\n def reset_optimizer_parameter_details(self) -> list[ResetOptimizerParameterDetails]:\n \"\"\"Reset optimizer parameter details.\n\n Details of the parameters that should be reset in the optimizer, when resetting\n dictionary vectors.\n\n Returns:\n List of tuples of the form `(parameter, axis)`, where `parameter` is the parameter to\n reset (e.g. encoder.weight), and `axis` is the axis of the parameter to reset.\n \"\"\"\n return (\n self.encoder.reset_optimizer_parameter_details\n + self.decoder.reset_optimizer_parameter_details\n )\n\n def post_backwards_hook(self) -> None:\n \"\"\"Hook to be called after each learning step.\n\n This can be used to e.g. constrain weights to unit norm.\n \"\"\"\n self.decoder.constrain_weights_unit_norm()\n\n @staticmethod\n @validate_call\n def get_single_component_state_dict(\n state: SparseAutoencoderState, component_idx: NonNegativeInt\n ) -> dict[str, Tensor]:\n \"\"\"Get the state dict for a single component.\n\n Args:\n state: Sparse Autoencoder state.\n component_idx: Index of the component to get the state dict for.\n\n Returns:\n State dict for the component.\n\n Raises:\n ValueError: If the state dict doesn't contain a components dimension.\n \"\"\"\n # Check the state has a components dimension\n if state.config.n_components is None:\n error_message = (\n \"Trying to load a single component from the state dict, but the state dict \"\n \"doesn't contain a components dimension.\"\n )\n raise ValueError(error_message)\n\n # Return the state dict for the component\n return {key: value[component_idx] for key, value in state.state_dict.items()}\n\n def save(self, file_path: Path) -> None:\n \"\"\"Save the model config and state dict to a file.\n\n Args:\n file_path: Path to save the model to.\n \"\"\"\n file_path.parent.mkdir(parents=True, exist_ok=True)\n state = SparseAutoencoderState(config=self.config, state_dict=self.state_dict())\n torch.save(state, file_path)\n\n @staticmethod\n def load(\n file_path: FILE_LIKE,\n component_idx: PositiveInt | None = None,\n ) -> \"SparseAutoencoder\":\n \"\"\"Load the model from a file.\n\n Args:\n file_path: Path to load the model from.\n component_idx: If loading a state dict from a model that has been trained on multiple\n components (e.g. all MLP layers) you may want to to load just one component. In this\n case you can set `component_idx` to the index of the component to load. Note you\n should not set this if you want to load a state dict from a model that has been\n trained on a single component (or if you want to load all components).\n\n Returns:\n The loaded model.\n \"\"\"\n # Load the file\n serialized_state = torch.load(file_path, map_location=torch.device(\"cpu\"))\n state = SparseAutoencoderState.model_validate(serialized_state)\n\n # Initialise the model\n config = SparseAutoencoderConfig(\n n_input_features=state.config.n_input_features,\n n_learned_features=state.config.n_learned_features,\n n_components=state.config.n_components if component_idx is None else None,\n )\n state_dict = (\n SparseAutoencoder.get_single_component_state_dict(state, component_idx)\n if component_idx is not None\n else state.state_dict\n )\n model = SparseAutoencoder(config)\n model.load_state_dict(state_dict)\n\n return model\n\n def save_to_wandb(\n self,\n artifact_name: str,\n directory: DirectoryPath = DEFAULT_TMP_DIR,\n ) -> str:\n \"\"\"Save the model to wandb.\n\n Args:\n artifact_name: A human-readable name for this artifact, which is how you can identify\n this artifact in the UI or reference it in use_artifact calls. Names can contain\n letters, numbers, underscores, hyphens, and dots. The name must be unique across a\n project. Example: \"sweep_name 1e9 activations\".\n directory: Directory to save the model to.\n\n Returns:\n Name of the wandb artifact.\n\n Raises:\n ValueError: If wandb is not initialised.\n \"\"\"\n # Save the file\n directory.mkdir(parents=True, exist_ok=True)\n file_name = artifact_name + \".pt\"\n file_path = directory / file_name\n self.save(file_path)\n\n # Upload to wandb\n if wandb.run is None:\n error_message = \"Trying to save the model to wandb, but wandb is not initialised.\"\n raise ValueError(error_message)\n artifact = wandb.Artifact(\n artifact_name,\n type=\"model\",\n description=\"Sparse Autoencoder model state, created with `sparse_autoencoder`.\",\n )\n artifact.add_file(str(file_path), name=\"sae-model-state.pt\")\n artifact.save()\n wandb.log_artifact(artifact)\n artifact.wait()\n\n return artifact.source_qualified_name\n\n @staticmethod\n def load_from_wandb(\n wandb_artifact_name: str,\n component_idx: PositiveInt | None = None,\n ) -> \"SparseAutoencoder\":\n \"\"\"Load the model from wandb.\n\n Args:\n wandb_artifact_name: Name of the wandb artifact to load the model from (e.g.\n \"username/project/artifact_name:version\").\n component_idx: If loading a state dict from a model that has been trained on multiple\n components (e.g. all MLP layers) you may want to to load just one component. In this\n case you can set `component_idx` to the index of the component to load. Note you\n should not set this if you want to load a state dict from a model that has been\n trained on a single component (or if you want to load all components).\n\n Returns:\n The loaded model.\n \"\"\"\n api = wandb.Api()\n artifact = api.artifact(wandb_artifact_name, type=\"model\")\n download_path = artifact.download()\n return SparseAutoencoder.load(Path(download_path) / \"sae-model-state.pt\", component_idx)\n\n def save_to_hugging_face(\n self,\n file_name: str,\n repo_id: str,\n directory: DirectoryPath = DEFAULT_TMP_DIR,\n hf_access_token: str | None = None,\n ) -> None:\n \"\"\"Save the model to Hugging Face.\n\n Args:\n file_name: Name of the file (e.g. \"model-something.pt\").\n repo_id: ID of the repo to save the model to.\n directory: Directory to save the model to.\n hf_access_token: Hugging Face access token.\n \"\"\"\n # Save the file\n directory.mkdir(parents=True, exist_ok=True)\n file_path = directory / file_name\n self.save(file_path)\n\n # Upload to Hugging Face\n api = HfApi(token=hf_access_token)\n api.upload_file(\n path_or_fileobj=file_path,\n path_in_repo=file_name,\n repo_id=repo_id,\n repo_type=\"model\",\n )\n\n @staticmethod\n def load_from_hugging_face(\n file_name: str,\n repo_id: str,\n component_idx: PositiveInt | None = None,\n ) -> \"SparseAutoencoder\":\n \"\"\"Load the model from Hugging Face.\n\n Args:\n file_name: File name of the .pt state file.\n repo_id: ID of the repo to load the model from.\n component_idx: If loading a state dict from a model that has been trained on multiple\n components (e.g. all MLP layers) you may want to to load just one component. In this\n case you can set `component_idx` to the index of the component to load. Note you\n should not set this if you want to load a state dict from a model that has been\n trained on a single component (or if you want to load all components).\n\n Returns:\n The loaded model.\n \"\"\"\n local_file = hf_hub_download(\n repo_id=repo_id,\n repo_type=\"model\",\n filename=file_name,\n revision=\"main\",\n )\n\n return SparseAutoencoder.load(Path(local_file), component_idx)" }, { "identifier": "SparseAutoencoderConfig", "path": "sparse_autoencoder/autoencoder/model.py", "snippet": "class SparseAutoencoderConfig(BaseModel, frozen=True):\n \"\"\"SAE model config.\"\"\"\n\n n_input_features: PositiveInt\n \"\"\"Number of input features.\n\n E.g. `d_mlp` if training on MLP activations from TransformerLens).\n \"\"\"\n\n n_learned_features: PositiveInt\n \"\"\"Number of learned features.\n\n The initial paper experimented with 1 to 256 times the number of input features, and primarily\n used a multiple of 8.\"\"\"\n\n n_components: PositiveInt | None = None\n \"\"\"Number of source model components the SAE is trained on.\"\"\n\n This is useful if you want to train the SAE on several components of the source model at once.\n If `None`, the SAE is assumed to be trained on just one component (in this case the model won't\n contain a component axis in any of the parameters).\n \"\"\"" }, { "identifier": "L2ReconstructionLoss", "path": "sparse_autoencoder/loss/decoded_activations_l2.py", "snippet": "class L2ReconstructionLoss(AbstractLoss):\n \"\"\"L2 Reconstruction loss.\n\n L2 reconstruction loss is calculated as the sum squared error between each each input vector\n and it's corresponding decoded vector. The original paper found that models trained with some\n loss functions such as cross-entropy loss generally prefer to represent features\n polysemantically, whereas models trained with L2 may achieve the same loss for both\n polysemantic and monosemantic representations of true features.\n\n Example:\n >>> import torch\n >>> loss = L2ReconstructionLoss()\n >>> input_activations = torch.tensor([[5.0, 4], [3.0, 4]])\n >>> output_activations = torch.tensor([[1.0, 5], [1.0, 5]])\n >>> unused_activations = torch.zeros_like(input_activations)\n >>> # Outputs both loss and metrics to log\n >>> loss.forward(input_activations, unused_activations, output_activations)\n tensor([8.5000, 2.5000])\n \"\"\"\n\n _reduction: LossReductionType\n \"\"\"MSE reduction type.\"\"\"\n\n def __init__(self, reduction: LossReductionType = LossReductionType.MEAN) -> None:\n \"\"\"Initialise the L2 reconstruction loss.\n\n Args:\n reduction: MSE reduction type.\n \"\"\"\n super().__init__()\n self._reduction = reduction\n\n def log_name(self) -> str:\n \"\"\"Log name.\n\n Returns:\n Name of the loss module for logging.\n \"\"\"\n return \"l2_reconstruction_loss\"\n\n def forward(\n self,\n source_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n learned_activations: Float[ # noqa: ARG002\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)\n ],\n decoded_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n ) -> (\n Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)]\n | Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)]\n ):\n \"\"\"Calculate the L2 reconstruction loss.\n\n Args:\n source_activations: Source activations (input activations to the autoencoder from the\n source model).\n learned_activations: Learned activations (intermediate activations in the autoencoder).\n decoded_activations: Decoded activations.\n\n Returns:\n Loss per batch item.\n \"\"\"\n square_error_loss = mse_loss(source_activations, decoded_activations, reduction=\"none\")\n\n match self._reduction:\n case LossReductionType.MEAN:\n return square_error_loss.mean(dim=-1)\n case LossReductionType.SUM:\n return square_error_loss.sum(dim=-1)\n case LossReductionType.NONE:\n return square_error_loss" }, { "identifier": "LearnedActivationsL1Loss", "path": "sparse_autoencoder/loss/learned_activations_l1.py", "snippet": "class LearnedActivationsL1Loss(AbstractLoss):\n \"\"\"Learned activations L1 (absolute error) loss.\n\n L1 loss penalty is the absolute sum of the learned activations. The L1 penalty is this\n multiplied by the l1_coefficient (designed to encourage sparsity).\n\n Example:\n >>> l1_loss = LearnedActivationsL1Loss(0.1)\n >>> learned_activations = torch.tensor([[2.0, -3], [2.0, -3]])\n >>> unused_activations = torch.zeros_like(learned_activations)\n >>> # Returns loss and metrics to log\n >>> l1_loss.forward(unused_activations, learned_activations, unused_activations)[0]\n tensor(0.5000)\n \"\"\"\n\n l1_coefficient: float | Float[Tensor, Axis.names(Axis.COMPONENT_OPTIONAL)]\n \"\"\"L1 coefficient.\"\"\"\n\n def log_name(self) -> str:\n \"\"\"Log name.\n\n Returns:\n Name of the loss module for logging.\n \"\"\"\n return \"learned_activations_l1_loss_penalty\"\n\n @validate_call(config={\"arbitrary_types_allowed\": True})\n def __init__(\n self, l1_coefficient: PositiveFloat | Float[Tensor, Axis.names(Axis.COMPONENT_OPTIONAL)]\n ) -> None:\n \"\"\"Initialize the absolute error loss.\n\n Args:\n l1_coefficient: L1 coefficient. The original paper experimented with L1 coefficients of\n [0.01, 0.008, 0.006, 0.004, 0.001]. They used 250 tokens per prompt, so as an\n approximate guide if you use e.g. 2x this number of tokens you might consider using\n 0.5x the l1 coefficient.\n \"\"\"\n self.l1_coefficient = l1_coefficient\n super().__init__()\n\n def _l1_loss(\n self,\n source_activations: Float[ # noqa: ARG002\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n learned_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)\n ],\n decoded_activations: Float[ # noqa: ARG002s\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n ) -> _L1LossAndPenalty:\n \"\"\"Learned activations L1 (absolute error) loss.\n\n Args:\n source_activations: Source activations (input activations to the autoencoder from the\n source model).\n learned_activations: Learned activations (intermediate activations in the autoencoder).\n decoded_activations: Decoded activations.\n\n Returns:\n Tuple of itemwise absolute loss, and itemwise absolute loss multiplied by the l1\n coefficient.\n \"\"\"\n # Absolute loss is the summed absolute value of the learned activations (i.e. over the\n # learned feature axis).\n itemwise_absolute_loss: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)\n ] = torch.abs(learned_activations).sum(dim=-1)\n\n itemwise_absolute_loss_penalty: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)\n ] = itemwise_absolute_loss * self.l1_coefficient\n\n return _L1LossAndPenalty(\n itemwise_absolute_loss=itemwise_absolute_loss,\n itemwise_absolute_loss_penalty=itemwise_absolute_loss_penalty,\n )\n\n def forward(\n self,\n source_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n learned_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)\n ],\n decoded_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n ) -> Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)]:\n \"\"\"Learned activations L1 (absolute error) loss.\n\n Args:\n source_activations: Source activations (input activations to the autoencoder from the\n source model).\n learned_activations: Learned activations (intermediate activations in the autoencoder).\n decoded_activations: Decoded activations.\n\n Returns:\n Loss per batch item.\n \"\"\"\n return self._l1_loss(\n source_activations, learned_activations, decoded_activations\n ).itemwise_absolute_loss_penalty\n\n # Override to add both the loss and the penalty to the log\n def scalar_loss_with_log(\n self,\n source_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n learned_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)\n ],\n decoded_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n batch_reduction: LossReductionType = LossReductionType.MEAN,\n component_reduction: LossReductionType = LossReductionType.NONE,\n ) -> LossResultWithMetrics:\n \"\"\"Scalar L1 loss (reduced across the batch and component axis) with logging.\n\n Args:\n source_activations: Source activations (input activations to the autoencoder from the\n source model).\n learned_activations: Learned activations (intermediate activations in the autoencoder).\n decoded_activations: Decoded activations.\n batch_reduction: Batch reduction type. Typically you would choose LossReductionType.MEAN\n to make the loss independent of the batch size.\n component_reduction: Component reduction type.\n\n Returns:\n Tuple of the L1 absolute error batch scalar loss and a dict of the properties to log\n (loss before and after the l1 coefficient).\n\n Raises:\n ValueError: If batch_reduction is LossReductionType.NONE.\n \"\"\"\n itemwise_absolute_loss, itemwise_absolute_loss_penalty = self._l1_loss(\n source_activations, learned_activations, decoded_activations\n )\n\n match batch_reduction:\n case LossReductionType.MEAN:\n batch_scalar_loss = itemwise_absolute_loss.mean(0)\n batch_scalar_loss_penalty = itemwise_absolute_loss_penalty.mean(0)\n case LossReductionType.SUM:\n batch_scalar_loss = itemwise_absolute_loss.sum(0)\n batch_scalar_loss_penalty = itemwise_absolute_loss_penalty.sum(0)\n case LossReductionType.NONE:\n error_message = \"Batch reduction type NONE not supported.\"\n raise ValueError(error_message)\n\n # Create the log\n metrics: list[MetricResult] = [\n MetricResult(\n name=\"loss\",\n postfix=\"learned_activations_l1\",\n component_wise_values=batch_scalar_loss.unsqueeze(0)\n if batch_scalar_loss.ndim == 0\n else batch_scalar_loss,\n location=MetricLocation.TRAIN,\n ),\n MetricResult(\n name=\"loss\",\n postfix=self.log_name(),\n component_wise_values=batch_scalar_loss_penalty.unsqueeze(0)\n if batch_scalar_loss_penalty.ndim == 0\n else batch_scalar_loss_penalty,\n location=MetricLocation.TRAIN,\n ),\n ]\n\n match component_reduction:\n case LossReductionType.MEAN:\n batch_scalar_loss_penalty = batch_scalar_loss_penalty.mean(0)\n case LossReductionType.SUM:\n batch_scalar_loss_penalty = batch_scalar_loss_penalty.sum(0)\n case LossReductionType.NONE:\n pass\n\n return LossResultWithMetrics(loss=batch_scalar_loss_penalty, loss_metrics=metrics)\n\n def extra_repr(self) -> str:\n \"\"\"Extra representation string.\"\"\"\n return f\"l1_coefficient={self.l1_coefficient}\"" }, { "identifier": "LossReducer", "path": "sparse_autoencoder/loss/reducer.py", "snippet": "class LossReducer(AbstractLoss):\n \"\"\"Loss reducer.\n\n Reduces multiple loss algorithms into a single loss algorithm (by summing). Analogous to\n nn.Sequential.\n\n Example:\n >>> from sparse_autoencoder.loss.decoded_activations_l2 import L2ReconstructionLoss\n >>> from sparse_autoencoder.loss.learned_activations_l1 import LearnedActivationsL1Loss\n >>> LossReducer(\n ... L2ReconstructionLoss(),\n ... LearnedActivationsL1Loss(0.001),\n ... )\n LossReducer(\n (0): L2ReconstructionLoss()\n (1): LearnedActivationsL1Loss(l1_coefficient=0.001)\n )\n\n \"\"\"\n\n _modules: dict[str, \"AbstractLoss\"]\n \"\"\"Children loss modules.\"\"\"\n\n def log_name(self) -> str:\n \"\"\"Log name.\n\n Returns:\n Name of the loss module for logging.\n \"\"\"\n return \"total_loss\"\n\n def __init__(\n self,\n *loss_modules: AbstractLoss,\n ):\n \"\"\"Initialize the loss reducer.\n\n Args:\n *loss_modules: Loss modules to reduce.\n\n Raises:\n ValueError: If the loss reducer has no loss modules.\n \"\"\"\n super().__init__()\n\n for idx, loss_module in enumerate(loss_modules):\n self._modules[str(idx)] = loss_module\n\n if len(self) == 0:\n error_message = \"Loss reducer must have at least one loss module.\"\n raise ValueError(error_message)\n\n def forward(\n self,\n source_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n learned_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)\n ],\n decoded_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n ) -> Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)]:\n \"\"\"Reduce loss.\n\n Args:\n source_activations: Source activations (input activations to the autoencoder from the\n source model).\n learned_activations: Learned activations (intermediate activations in the autoencoder).\n decoded_activations: Decoded activations.\n\n Returns:\n Mean loss across the batch, summed across the loss modules.\n \"\"\"\n all_modules_loss: Float[Tensor, \"module train_batch\"] = torch.stack(\n [\n loss_module.forward(source_activations, learned_activations, decoded_activations)\n for loss_module in self._modules.values()\n ]\n )\n\n return all_modules_loss.sum(dim=0)\n\n def __dir__(self) -> list[str]:\n \"\"\"Dir dunder method.\"\"\"\n return list(self._modules.__dir__())\n\n def __getitem__(self, idx: int) -> AbstractLoss:\n \"\"\"Get item dunder method.\"\"\"\n return self._modules[str(idx)]\n\n def __iter__(self) -> Iterator[AbstractLoss]:\n \"\"\"Iterator dunder method.\"\"\"\n return iter(self._modules.values())\n\n def __len__(self) -> int:\n \"\"\"Length dunder method.\"\"\"\n return len(self._modules)" }, { "identifier": "Axis", "path": "sparse_autoencoder/tensor_types.py", "snippet": "class Axis(LowercaseStrEnum):\n \"\"\"Tensor axis names.\n\n Used to annotate tensor types.\n\n Example:\n When used directly it prints a string:\n\n >>> print(Axis.INPUT_OUTPUT_FEATURE)\n input_output_feature\n\n The primary use is to annotate tensor types:\n\n >>> from jaxtyping import Float\n >>> from torch import Tensor\n >>> from typing import TypeAlias\n >>> batch: TypeAlias = Float[Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)]\n >>> print(batch)\n <class 'jaxtyping.Float[Tensor, 'batch input_output_feature']'>\n\n You can also join multiple axis together to represent the dimensions of a tensor:\n\n >>> print(Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE))\n batch input_output_feature\n \"\"\"\n\n # Component idx\n COMPONENT = auto()\n \"\"\"Component index.\"\"\"\n\n COMPONENT_OPTIONAL = \"*component\"\n \"\"\"Optional component index.\"\"\"\n\n # Batches\n SOURCE_DATA_BATCH = auto()\n \"\"\"Batch of prompts used to generate source model activations.\"\"\"\n\n BATCH = auto()\n \"\"\"Batch of items that the SAE is being trained on.\"\"\"\n\n STORE_BATCH = auto()\n \"\"\"Batch of items to be written to the store.\"\"\"\n\n ITEMS = auto()\n \"\"\"Arbitrary number of items.\"\"\"\n\n # Features\n INPUT_OUTPUT_FEATURE = auto()\n \"\"\"Input or output feature (e.g. feature in activation vector from source model).\"\"\"\n\n LEARNT_FEATURE = auto()\n \"\"\"Learn feature (e.g. feature in learnt activation vector).\"\"\"\n\n DEAD_FEATURE = auto()\n \"\"\"Dead feature.\"\"\"\n\n ALIVE_FEATURE = auto()\n \"\"\"Alive feature.\"\"\"\n\n # Feature indices\n INPUT_OUTPUT_FEATURE_IDX = auto()\n \"\"\"Input or output feature index.\"\"\"\n\n LEARNT_FEATURE_IDX = auto()\n \"\"\"Learn feature index.\"\"\"\n\n # Other\n POSITION = auto()\n \"\"\"Token position.\"\"\"\n\n SINGLE_ITEM = \"\"\n \"\"\"Single item axis.\"\"\"\n\n ANY = \"...\"\n \"\"\"Any number of axis.\"\"\"\n\n @staticmethod\n def names(*axis: \"Axis\") -> str:\n \"\"\"Join multiple axis together, to represent the dimensions of a tensor.\n\n Example:\n >>> print(Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE))\n batch input_output_feature\n\n Args:\n *axis: Axis to join.\n\n Returns:\n Joined axis string.\n \"\"\"\n return \" \".join(a.value for a in axis)" } ]
from jaxtyping import Float, Int64 from torch import Tensor from torch.nn import Parameter from sparse_autoencoder.activation_resampler.activation_resampler import ActivationResampler from sparse_autoencoder.activation_store.base_store import ActivationStore from sparse_autoencoder.activation_store.tensor_store import TensorActivationStore from sparse_autoencoder.autoencoder.model import SparseAutoencoder, SparseAutoencoderConfig from sparse_autoencoder.loss.decoded_activations_l2 import L2ReconstructionLoss from sparse_autoencoder.loss.learned_activations_l1 import LearnedActivationsL1Loss from sparse_autoencoder.loss.reducer import LossReducer from sparse_autoencoder.tensor_types import Axis import pytest import torch
16,481
"""Tests for the resample_neurons module.""" DEFAULT_N_ACTIVATIONS_STORE: int = 100 DEFAULT_N_INPUT_FEATURES: int = 3 DEFAULT_N_LEARNED_FEATURES: int = 5 DEFAULT_N_COMPONENTS: int = 2 @pytest.fixture() def full_activation_store() -> ActivationStore: """Create a dummy activation store, pre-populated with data.""" store = TensorActivationStore( max_items=DEFAULT_N_ACTIVATIONS_STORE, n_components=DEFAULT_N_COMPONENTS, n_neurons=DEFAULT_N_INPUT_FEATURES, ) store.fill_with_test_data( batch_size=DEFAULT_N_ACTIVATIONS_STORE, input_features=DEFAULT_N_INPUT_FEATURES, n_batches=1, n_components=DEFAULT_N_COMPONENTS, ) return store @pytest.fixture() def autoencoder_model() -> SparseAutoencoder: """Create a dummy autoencoder model.""" return SparseAutoencoder(
"""Tests for the resample_neurons module.""" DEFAULT_N_ACTIVATIONS_STORE: int = 100 DEFAULT_N_INPUT_FEATURES: int = 3 DEFAULT_N_LEARNED_FEATURES: int = 5 DEFAULT_N_COMPONENTS: int = 2 @pytest.fixture() def full_activation_store() -> ActivationStore: """Create a dummy activation store, pre-populated with data.""" store = TensorActivationStore( max_items=DEFAULT_N_ACTIVATIONS_STORE, n_components=DEFAULT_N_COMPONENTS, n_neurons=DEFAULT_N_INPUT_FEATURES, ) store.fill_with_test_data( batch_size=DEFAULT_N_ACTIVATIONS_STORE, input_features=DEFAULT_N_INPUT_FEATURES, n_batches=1, n_components=DEFAULT_N_COMPONENTS, ) return store @pytest.fixture() def autoencoder_model() -> SparseAutoencoder: """Create a dummy autoencoder model.""" return SparseAutoencoder(
SparseAutoencoderConfig(
4
2023-10-27 07:37:15+00:00
24k
OATML-Markslab/ProteinNPT
scripts/train.py
[ { "identifier": "ProteinNPTModel", "path": "proteinnpt/model.py", "snippet": "class ProteinNPTModel(nn.Module):\n def __init__(self, args, alphabet):\n super().__init__()\n self.args = args\n self.alphabet = alphabet\n self.alphabet_size = len(alphabet)\n self.padding_idx = alphabet.padding_idx\n self.mask_idx = alphabet.mask_idx\n self.cls_idx = alphabet.cls_idx\n self.eos_idx = alphabet.eos_idx\n self.prepend_bos = alphabet.prepend_bos\n self.append_eos = alphabet.append_eos\n self.target_names_input = self.args.target_config.keys()\n self.target_names = [x for x in self.args.target_config.keys() if self.args.target_config[x][\"in_NPT_loss\"]]\n self.num_targets_input = len(self.target_names_input) #Includes all targets, incl. zero-shot fitness predictions\n self.num_targets = len(self.target_names) #Number of actual targets we want to predict\n self.MSA_sample_sequences = None\n self.training_sample_sequences_indices = None\n self.device = None\n self.optimizer = None\n self.model_type = args.model_type\n self.PNPT_ensemble_test_num_seeds = -1\n self.PNPT_no_reconstruction_error = False\n self.deactivate_col_attention = False\n self.tranception_attention = False\n \n assert self.args.embed_dim % self.args.attention_heads ==0, \"Embedding size {} needs to be a multiple of number of heads {}\".format(self.args.embed_dim, self.args.attention_heads)\n if self.args.aa_embeddings in [\"MSA_Transformer\",\"ESM1v\"]:\n model, _ = utils.esm.pretrained.load_model_and_alphabet(args.embedding_model_location)\n self.aa_embedding = model\n self.aa_embedding_dim = self.aa_embedding.embed_tokens.weight.shape[-1]\n elif self.args.aa_embeddings == \"Tranception\":\n self.aa_embedding_dim = 1280\n config = json.load(open(args.embedding_model_location+os.sep+'config.json'))\n config = utils.tranception.config.TranceptionConfig(**config)\n config.tokenizer = self.alphabet\n config.inference_time_retrieval_type = None\n config.retrieval_aggregation_mode = None\n self.aa_embedding = utils.tranception.model_pytorch.TranceptionLMHeadModel.from_pretrained(pretrained_model_name_or_path=args.embedding_model_location,config=config)\n elif self.args.aa_embeddings == \"Linear_embedding\":\n self.aa_embedding = nn.Embedding(\n self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx\n )\n self.aa_positions_embedding = LearnedPositionalEmbedding(\n self.args.max_positions,\n self.args.embed_dim,\n self.padding_idx,\n )\n self.aa_embedding_dim = self.args.embed_dim\n\n if self.aa_embedding_dim != self.args.embed_dim: #Need to project internally\n self.token_embedding_projection = nn.Linear(\n self.aa_embedding_dim,\n self.args.embed_dim\n )\n self.token_embedding_expansion = nn.Linear(\n self.args.embed_dim,\n self.aa_embedding_dim\n )\n\n self.target_embedding = nn.ModuleDict(\n { \n target_name:\n nn.Linear(\n self.args.target_config[target_name][\"dim\"] + 1, #Need to add one as we append the mask flag to each input target \n self.args.embed_dim\n )\n if self.args.target_config[target_name][\"type\"]==\"continuous\"\n else \n nn.Embedding(\n self.args.target_config[target_name][\"dim\"],\n self.args.embed_dim\n )\n for target_name in self.target_names_input\n }\n )\n \n self.dropout_module = nn.Dropout(self.args.dropout)\n\n self.layers = nn.ModuleList(\n [\n AxialTransformerLayer(\n self.args.embed_dim,\n self.args.ffn_embed_dim,\n self.args.attention_heads,\n self.args.dropout,\n self.args.attention_dropout,\n self.args.activation_dropout,\n getattr(self.args, \"max_tokens_per_msa\", self.args.max_tokens_per_msa),\n self.deactivate_col_attention,\n self.tranception_attention,\n self.num_targets_input,\n )\n for _ in range(self.args.num_protein_npt_layers)\n ]\n )\n self.emb_layer_norm_before = ESM1bLayerNorm(self.args.embed_dim)\n self.emb_layer_norm_after = ESM1bLayerNorm(self.args.embed_dim)\n \n if self.args.aa_embeddings in [\"MSA_Transformer\",\"ESM1v\"]:\n weight = self.aa_embedding.embed_tokens.weight\n elif self.args.aa_embeddings == \"Tranception\":\n weight = self.aa_embedding.lm_head.weight\n else:\n weight = self.aa_embedding.weight\n\n self.lm_head = RobertaLMHead(\n embed_dim=self.aa_embedding_dim,\n output_dim=self.alphabet_size,\n weight=weight\n )\n \n target_pred_input_dim = self.args.embed_dim\n\n if args.target_prediction_model==\"MLP\": \n self.layer_pre_head = nn.ModuleDict(\n {\n target_name:\n nn.Sequential(\n nn.Linear(target_pred_input_dim, target_pred_input_dim),\n nn.Dropout(self.args.dropout),\n nn.ReLU()\n ) \n for target_name in self.target_names\n }\n )\n \n if args.target_prediction_model==\"ConvBERT\":\n configuration = ConvBertConfig(\n hidden_size = self.args.embed_dim,\n num_attention_heads = self.args.attention_heads,\n conv_kernel_size = self.args.conv_kernel_size,\n hidden_act = \"gelu\",\n hidden_dropout_prob = self.args.dropout,\n attention_probs_dropout_prob = self.args.dropout\n )\n self.layer_pre_head = ConvBertLayer(configuration)\n \n if args.target_prediction_model==\"CNN\":\n self.layer_pre_head = nn.Sequential(\n nn.Conv1d(in_channels=target_pred_input_dim, out_channels=target_pred_input_dim, kernel_size = self.args.conv_kernel_size, padding='same'),\n nn.Dropout(self.args.dropout),\n nn.ReLU()\n )\n \n if self.args.target_prediction_head == \"Target_embeddings_only\":\n target_pred_input_dim = target_pred_input_dim\n elif self.args.target_prediction_head == \"Target_embeddings_and_AA_embeddings_mean_pooled\":\n target_pred_input_dim = target_pred_input_dim * (1 + self.num_targets_input)\n\n if self.args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n self.zero_shot_fitness_prediction_weight = nn.ModuleDict(\n { \n target_name: nn.Linear(1, self.args.target_config[target_name][\"dim\"], bias=False)\n for target_name in self.target_names\n }\n )\n for target_name in self.target_names:\n torch.nn.init.constant_(self.zero_shot_fitness_prediction_weight[target_name].weight,1e-4)\n\n self.target_pred_head = nn.ModuleDict(\n { \n target_name: nn.Linear(target_pred_input_dim, self.args.target_config[target_name][\"dim\"])\n for target_name in self.target_names\n }\n )\n \n def set_device(self):\n if self.device is None:\n self.device = next(self.parameters()).device\n print(\"Model device: {}\".format(self.device))\n \n def forward(self, tokens, targets=None, zero_shot_fitness_predictions=None, sequence_embeddings=None, repr_layers=[], need_head_weights=False):\n padding_mask = tokens.eq(self.padding_idx) \n if not padding_mask.any(): padding_mask = None\n \n if self.args.aa_embeddings == \"MSA_Transformer\" and self.args.sequence_embeddings_location is None:\n assert tokens.ndim == 3, \"Finding dimension of tokens to be: {}\".format(tokens.ndim)\n num_MSAs_in_batch, num_sequences_in_alignments, seqlen = tokens.size() # N, B, L (seqs with labels, seqs in MSA, seq length)\n batch_size = num_MSAs_in_batch\n else:\n assert tokens.ndim == 2, \"Finding dimension of tokens to be: {}\".format(tokens.ndim)\n batch_size, seqlen = tokens.size() # N, L (seqs with labels, seq length)\n \n if sequence_embeddings is not None:\n x = sequence_embeddings.to(self.device)\n else:\n if self.args.aa_embeddings == \"MSA_Transformer\":\n output = self.aa_embedding(tokens, repr_layers=[12])\n x = output[\"representations\"][12][:] # N, B, L, D\n x = x[:,0,:,:] # N, L, D. #In each MSA batch the first sequence is what we care about. The other MSA sequences were just to compute embeddings and logits\n elif self.args.aa_embeddings == \"ESM1v\":\n last_layer_index = 33\n output = self.aa_embedding(tokens, repr_layers=[last_layer_index])\n x = output[\"representations\"][last_layer_index][:] # N, L, D\n elif self.args.aa_embeddings ==\"Linear_embedding\":\n x = self.aa_embedding(tokens)\n x = x + self.aa_positions_embedding(tokens.view(batch_size, seqlen)).view(x.size()) # Need position embedding in PNPT since we will apply axial attention\n else:\n print(\"AA embeddings not recognized\")\n sys.exit(0)\n \n if self.aa_embedding_dim != self.args.embed_dim: x = self.token_embedding_projection(x)\n \n if self.args.target_prediction_head != \"Target_embeddings_and_AA_embeddings_mean_pooled\": #We mix AA embeddings pre NPT\n if self.args.target_prediction_model == \"CNN\": \n assert len(x.size())==3, \"Size error input\"\n N, L, D = x.size()\n x = x.permute(0,2,1) #N, D, L\n x = self.layer_pre_head(x)\n x = x.permute(0,2,1)\n elif self.args.target_prediction_model == \"ConvBERT\":\n x = self.layer_pre_head(x)[0]\n\n x = x.view(1, batch_size, seqlen, self.args.embed_dim) # 1, N, L, D\n \n #Dimensions for each target (there are self.num_targets of them):\n y = []\n for target_name in self.target_names_input:\n num_sequences_with_target, dim_targets = targets[target_name].shape # N, D_t #In most cases dim_targets = D_t = 2 (original dimension of continuous input + 1 dim for mask)\n y.append(self.target_embedding[target_name](targets[target_name]).view(num_sequences_with_target,1,self.args.embed_dim))\n y = torch.cat(y, dim=-2) #concatenate across second to last dimension # N, num_targets, D\n assert y.shape == (num_sequences_with_target, self.num_targets_input, self.args.embed_dim), \"Error in y shape: {}\".format(y.shape)\n y = y.view(1, num_sequences_with_target, self.num_targets_input, self.args.embed_dim) # 1, N, num_targets, D\n \n #Concatenate AA tokens and targets\n x = torch.cat((x,y),dim=-2) # 1, N, (L+num_targets), D\n x = self.emb_layer_norm_before(x)\n x = self.dropout_module(x)\n\n if padding_mask is not None:\n padding_mask_with_targets = torch.zeros(num_MSAs_in_batch, num_sequences_in_alignments, seqlen + self.num_targets_input)\n padding_mask_with_targets[...,:seqlen] = padding_mask\n padding_mask = padding_mask_with_targets\n x = x * (1 - padding_mask.unsqueeze(-1).type_as(x))\n \n repr_layers = set(repr_layers)\n hidden_representations = {}\n if 0 in repr_layers: hidden_representations[0] = x\n if need_head_weights:\n row_attn_weights = []\n col_attn_weights = []\n\n # 1 x N x L x D -> N x L x 1 x D\n x = x.permute(1, 2, 0, 3)\n for layer_idx, layer in enumerate(self.layers):\n x = layer(\n x,\n self_attn_padding_mask=padding_mask,\n need_head_weights=need_head_weights,\n )\n if need_head_weights:\n x, col_attn, row_attn = x\n col_attn_weights.append(col_attn.permute(2, 0, 1, 3, 4).cpu())\n row_attn_weights.append(row_attn.permute(1, 0, 2, 3).cpu())\n if (layer_idx + 1) in repr_layers:\n hidden_representations[layer_idx + 1] = x.permute(2, 0, 1, 3)\n x = self.emb_layer_norm_after(x)\n x = x.permute(2, 0, 1, 3) # N x L x 1 x D -> 1 x N x L x D\n assert x.shape == (1, num_sequences_with_target, seqlen + self.num_targets_input, self.args.embed_dim), \"Error with axial transformer\"\n # last hidden representation should have layer norm applied\n if (layer_idx + 1) in repr_layers: hidden_representations[layer_idx + 1] = x\n \n # Loss over NPT MLM objective\n if self.aa_embedding_dim != self.args.embed_dim:\n logits_protein_sequence = self.lm_head(self.token_embedding_expansion(x[...,:seqlen,:]))\n else:\n logits_protein_sequence = self.lm_head(x[...,:seqlen,:]) #Remove dependency on targets for final AA predictions. logits size: (1, N, L, Vocab)\n \n x = x.view(num_sequences_with_target, seqlen + self.num_targets_input, self.args.embed_dim)\n x, y = x[:,:seqlen,:], x[:,seqlen:,:] # (N,L,D) and (N,num_targets,D)\n assert y.shape == (num_sequences_with_target, self.num_targets_input, self.args.embed_dim)\n if self.args.target_prediction_head == \"Target_embeddings_and_AA_embeddings_mean_pooled\": \n if self.args.target_prediction_model == \"CNN\": \n assert len(x.size())==3, \"Size error input\"\n N, L, D = x.size()\n x = x.permute(0,2,1) #N, D, L\n x = self.layer_pre_head(x)\n x = x.permute(0,2,1)\n elif self.args.target_prediction_model == \"ConvBERT\":\n x = self.layer_pre_head(x)[0]\n x = x.mean(dim=-2) # N, D\n y = y.view(num_sequences_with_target,self.num_targets_input * self.args.embed_dim)\n y = torch.cat((x,y),dim=-1) # N, (1+num_targets) * D\n \n target_predictions = {}\n for target_index, target_name in enumerate(self.target_names):\n if self.args.target_prediction_head == \"Target_embeddings_and_AA_embeddings_mean_pooled\": \n target_predictions[target_name] = self.target_pred_head[target_name](y).view(-1) #We use the concatenated X and target embeddings (all of them) to predict each target\n else:\n if self.args.target_prediction_model == \"MLP\": y[:,target_index,:] = self.layer_pre_head[target_name](y[:,target_index,:])\n target_predictions[target_name] = self.target_pred_head[target_name](y[:,target_index,:]).view(-1) #input the embedding with the relevant target_index\n if self.args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n target_predictions[target_name] += self.zero_shot_fitness_prediction_weight[target_name](zero_shot_fitness_predictions).squeeze()\n \n result = {\"logits_protein_sequence\": logits_protein_sequence, \"target_predictions\": target_predictions, \"representations\": hidden_representations}\n \n if need_head_weights:\n col_attentions = torch.stack(col_attn_weights, 1)\n row_attentions = torch.stack(row_attn_weights, 1)\n result[\"col_attentions\"] = col_attentions\n result[\"row_attentions\"] = row_attentions\n\n return result\n\n def forward_with_uncertainty(self, tokens, targets, zero_shot_fitness_predictions=None, sequence_embeddings=None, num_MC_dropout_samples=10, number_of_mutated_seqs_to_score=None):\n \"\"\"\n Performs MC dropout to compute predictions and the corresponding uncertainties.\n Assumes 1D predictions (eg., prediction of continuous output)\n \"\"\"\n self.eval() \n for m in self.modules(): #Move all dropout layers in train mode to support MC dropout. Keep everything else in eval mode.\n if m.__class__.__name__.startswith('Dropout'):\n m.train()\n with torch.no_grad():\n predictions_dict = defaultdict(list)\n for _ in range(num_MC_dropout_samples):\n target_predictions_sample = self.forward(tokens, targets, zero_shot_fitness_predictions=zero_shot_fitness_predictions, sequence_embeddings=sequence_embeddings)[\"target_predictions\"]\n for target_name in self.target_names:\n predictions_dict[target_name].append(target_predictions_sample[target_name])\n results_with_uncertainty={}\n for target_name in self.target_names:\n concatenated_target_pred = torch.cat([x.view(-1,1) for x in predictions_dict[target_name]],dim=-1)\n results_with_uncertainty[target_name] = {}\n results_with_uncertainty[target_name]['predictions_avg'] = concatenated_target_pred.mean(dim=-1)\n results_with_uncertainty[target_name]['uncertainty'] = concatenated_target_pred.std(dim=-1)\n return results_with_uncertainty\n \n @property\n def num_layers(self):\n return self.args.num_protein_npt_layers\n \n def max_tokens_per_msa_(self, value: int) -> None:\n \"\"\"\n Batching attention computations when gradients are disabled as per MSA_Transformer\n Set this value to infinity to disable this behavior.\n \"\"\"\n for module in self.modules():\n if isinstance(module, (RowSelfAttention, ColumnSelfAttention)):\n module.max_tokens_per_msa = value\n\n def protein_npt_loss(self, token_predictions_logits, token_labels, target_predictions, target_labels, MLM_reconstruction_loss_weight, label_smoothing=0.0):\n target_prediction_loss_weight = 1.0 - MLM_reconstruction_loss_weight\n total_loss = 0.0\n if (token_labels is not None) and (MLM_reconstruction_loss_weight > 0.0):\n if self.args.aa_embeddings == \"MSA_Transformer\" and self.args.sequence_embeddings_location is None: token_labels = token_labels[:,0,:] #Only keep the token labels for seq to score. Drops the token labels for MSA sequences\n masked_lm_loss = CrossEntropyLoss(reduction=\"mean\", label_smoothing=label_smoothing)(token_predictions_logits.reshape(-1, self.alphabet_size), token_labels.reshape(-1))\n reconstruction_loss = masked_lm_loss\n total_loss += MLM_reconstruction_loss_weight * reconstruction_loss\n else:\n reconstruction_loss = torch.tensor(0.0)\n target_prediction_loss = {}\n for target_name in self.target_names:\n if self.args.target_config[target_name][\"in_NPT_loss\"]:\n if self.args.target_config[target_name][\"type\"]==\"continuous\":\n loss_masked_targets = ~target_labels[target_name].eq(-100) #Masked items are the ones for which the label was not set to -100\n if loss_masked_targets.sum()==0 or torch.isnan(target_labels[target_name][loss_masked_targets]).sum() > 0: #First condition true if we dont mask anything (eg., all target missing at eval). Second condition true if we force-mask one value at train time (to satisfy min_num_labels_masked in mast_target()) and corresponding target value is missing\n tgt_loss = torch.tensor(0.0)\n else:\n tgt_loss = MSELoss(reduction=\"mean\")(target_predictions[target_name][loss_masked_targets], target_labels[target_name][loss_masked_targets]) #we do not average the loss per batch, so that it's easier to do 1 full average across all batches\n if torch.isnan(tgt_loss).sum() > 0:\n print(\"Detected nan loss\")\n print(target_predictions[target_name])\n else:\n tgt_loss = CrossEntropyLoss(reduction=\"mean\", label_smoothing=label_smoothing)(target_predictions[target_name].view(-1, self.args.target_config[target_name][\"dim\"]), target_labels[target_name].view(-1)) # Note: we dont add one to the # of categories in the CE loss here (we dont predict <mask>)\n target_prediction_loss[target_name] = tgt_loss\n \n total_loss += target_prediction_loss_weight * target_prediction_loss[target_name]\n return total_loss, reconstruction_loss, target_prediction_loss\n\n def create_optimizer(self):\n \"\"\"\n Setup the optimizer.\n We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the\n Trainer's init through `optimizers`, or subclass and override this method in a subclass.\n Adapted from Huggingface Transformers library.\n \"\"\"\n if self.optimizer is None:\n all_parameters = utils.model_utils.get_parameter_names(self, [nn.LayerNorm])\n decay_parameters = [name for name in all_parameters if (\"bias\" not in name and \"pseudo_likelihood_weight\" not in name and 'zero_shot_fitness_prediction_weight' not in name)]\n psl_decay_parameters = [name for name in all_parameters if (\"bias\" not in name and (\"pseudo_likelihood_weight\" in name or \"zero_shot_fitness_prediction_weight\" in name))]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in self.named_parameters() if n in decay_parameters],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [p for n, p in self.named_parameters() if n in psl_decay_parameters],\n \"weight_decay\": 1e-8, #Small decay on pseudo-likelihood as in Hsu et al.\n },\n {\n \"params\": [p for n, p in self.named_parameters() if (n not in decay_parameters and n not in psl_decay_parameters)],\n \"weight_decay\": 0.0,\n },\n ] \n optimizer_kwargs = {\n \"betas\": (self.args.adam_beta1, self.args.adam_beta2),\n \"eps\": self.args.adam_epsilon,\n \"lr\": self.args.max_learning_rate\n }\n optimizer = AdamW(optimizer_grouped_parameters, **optimizer_kwargs)\n return optimizer" }, { "identifier": "AugmentedPropertyPredictor", "path": "baselines/model.py", "snippet": "class AugmentedPropertyPredictor(nn.Module):\n def __init__(self, args, alphabet):\n super().__init__()\n self.args = args\n self.alphabet = alphabet\n self.alphabet_size = len(alphabet)\n print(\"Alphabet: {}\".format(alphabet))\n print(\"Alphabet size: {}\".format(self.alphabet_size))\n self.padding_idx = alphabet.padding_idx\n self.mask_idx = alphabet.mask_idx\n self.cls_idx = alphabet.cls_idx\n self.eos_idx = alphabet.eos_idx\n self.prepend_bos = alphabet.prepend_bos\n self.append_eos = alphabet.append_eos\n self.target_names = self.args.target_config.keys() \n self.MSA_sample_sequences = None \n self.device = None\n self.model_type = args.model_type \n if self.args.aa_embeddings in [\"MSA_Transformer\",\"ESM1v\"]:\n model, _ = utils.esm.pretrained.load_model_and_alphabet(args.embedding_model_location)\n self.aa_embedding = model\n if self.args.aa_embeddings == \"MSA_Transformer\": self.args.seq_len = self.args.MSA_seq_len #If MSA does not cover full sequence length, we adjust seq_len param to be MSA_len (sequences truncated as needed in preprocessing)\n elif self.args.aa_embeddings == \"Linear_embedding\":\n self.aa_embedding = nn.Sequential(\n nn.Embedding(\n self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx\n ),\n nn.ReLU()\n )\n elif self.args.aa_embeddings == \"One_hot_encoding\":\n self.args.target_prediction_head == \"One_hot_encoding\"\n elif self.args.aa_embeddings == \"Tranception\":\n self.aa_embedding_dim = 1280\n config = json.load(open(args.embedding_model_location+os.sep+'config.json'))\n config = utils.tranception.config.TranceptionConfig(**config)\n config.tokenizer = get_tranception_tokenizer()\n config.inference_time_retrieval_type = None\n config.retrieval_aggregation_mode = None\n self.aa_embedding = utils.tranception.model_pytorch.TranceptionLMHeadModel.from_pretrained(pretrained_model_name_or_path=args.embedding_model_location,config=config)\n self.config = config\n else:\n print(\"Error: Specified AA embedding invalid\")\n sys.exit(0)\n\n if self.args.aa_embeddings != \"One_hot_encoding\": \n self.emb_layer_norm_after = ESM1bLayerNorm(self.args.embed_dim)\n self.dropout_module = nn.Dropout(self.args.dropout)\n\n if self.args.target_prediction_head == \"AA_embeddings_mean_pooled\":\n target_pred_input_dim = self.args.embed_dim\n elif self.args.target_prediction_head == \"One_hot_encoding\":\n target_pred_input_dim = (self.args.seq_len + 1) * self.alphabet_size if args.target_prediction_model!=\"CNN\" else self.alphabet_size #Add one for the BOS token\n else:\n print(self.args.target_prediction_head)\n print(\"Error: Specified embedding aggregation invalid\")\n sys.exit(0)\n \n if args.target_prediction_model==\"MLP\":\n self.layer_pre_head = nn.Sequential(\n nn.Linear(target_pred_input_dim, target_pred_input_dim),\n nn.Dropout(self.args.dropout),\n nn.ReLU()\n )\n elif args.target_prediction_model==\"ConvBERT\":\n configuration = ConvBertConfig(\n hidden_size = self.args.embed_dim,\n num_attention_heads = self.args.attention_heads if self.args.attention_heads is not None else 4,\n conv_kernel_size = self.args.conv_kernel_size,\n hidden_act = \"gelu\",\n hidden_dropout_prob = self.args.dropout,\n attention_probs_dropout_prob = self.args.dropout\n )\n self.layer_pre_head = ConvBertLayer(configuration)\n elif args.target_prediction_model==\"CNN\":\n self.layer_pre_head = nn.Sequential(\n nn.Conv1d(in_channels=target_pred_input_dim, out_channels=target_pred_input_dim, kernel_size = self.args.conv_kernel_size, padding='same'),\n nn.Dropout(self.args.dropout),\n nn.ReLU()\n )\n target_pred_input_dim = target_pred_input_dim if self.args.target_prediction_head != \"One_hot_encoding\" else target_pred_input_dim * (self.args.seq_len + 1)\n elif args.target_prediction_model==\"light_attention\":\n # Adapted from Stark et al (https://github.com/HannesStark/protein-localization)\n self.feature_convolution = nn.Conv1d(self.args.embed_dim, self.args.embed_dim, self.args.conv_kernel_size, stride=1, padding='same')\n self.attention_convolution = nn.Conv1d(self.args.embed_dim, self.args.embed_dim, self.args.conv_kernel_size, stride=1, padding='same')\n self.softmax = nn.Softmax(dim=-1)\n self.dropout = nn.Dropout(self.args.dropout)\n self.linear = nn.Sequential(\n nn.Linear(2 * self.args.embed_dim, 32),\n nn.Dropout(self.args.dropout),\n nn.ReLU(),\n nn.BatchNorm1d(32)\n )\n target_pred_input_dim = 32\n elif args.target_prediction_model==\"linear\":\n pass\n else:\n print(\"Error: Specified layer_pre_head invalid\")\n sys.exit(0)\n\n if self.args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n self.zero_shot_fitness_prediction_weight = nn.ModuleDict(\n { \n target_name: nn.Linear(1, self.args.target_config[target_name][\"dim\"], bias=False)\n for target_name in self.target_names\n }\n )\n for target_name in self.target_names:\n torch.nn.init.constant_(self.zero_shot_fitness_prediction_weight[target_name].weight,1.0)\n\n self.target_pred_head = nn.ModuleDict(\n { \n target_name: nn.Linear(target_pred_input_dim, self.args.target_config[target_name][\"dim\"])\n for target_name in self.target_names #If multiple targets, we learn a separate linear head for each separately\n }\n )\n \n def set_device(self):\n if self.device is None:\n self.device = next(self.parameters()).device\n print(\"Model device: {}\".format(self.device))\n\n def forward(self, tokens, zero_shot_fitness_predictions=None, sequence_embeddings=None, repr_layers=[]):\n if self.args.aa_embeddings == \"MSA_Transformer\" and self.args.sequence_embeddings_location is None:\n assert tokens.ndim == 3, \"Finding dimension of tokens to be: {}\".format(tokens.ndim)\n num_MSAs_in_batch, num_sequences_in_alignments, seqlen = tokens.size()\n batch_size = num_MSAs_in_batch\n else:\n assert tokens.ndim == 2, \"Finding dimension of tokens to be: {}\".format(tokens.ndim)\n batch_size, seqlen = tokens.size()\n \n if sequence_embeddings is not None:\n x = sequence_embeddings.to(self.device)\n else:\n if self.args.aa_embeddings == \"MSA_Transformer\":\n output = self.aa_embedding(tokens, repr_layers=[12])\n x = output[\"representations\"][12][:] # B, N, L, D\n x = x[:,0,:,:] #In each MSA batch the first sequence is what we care about. The other MSA sequences were just to compute embeddings and logits\n elif self.args.aa_embeddings == \"ESM1v\":\n last_layer_index = 33\n output = self.aa_embedding(tokens, repr_layers=[last_layer_index])\n x = output[\"representations\"][last_layer_index][:] # N, L, D\n elif self.args.aa_embeddings == \"Tranception\":\n processed_batch = {'input_ids': tokens, 'labels': tokens}\n output = self.aa_embedding(**processed_batch, return_dict=True, output_hidden_states=True)\n x = output.hidden_states[0]\n elif self.args.aa_embeddings ==\"Linear_embedding\":\n x = self.aa_embedding(tokens)\n elif self.args.aa_embeddings == \"One_hot_encoding\":\n x = nn.functional.one_hot(tokens, num_classes=self.alphabet_size).view(batch_size,-1).float()\n if self.args.target_prediction_model == \"CNN\": x = x.view(batch_size,seqlen,self.alphabet_size)\n\n if self.args.aa_embeddings != \"One_hot_encoding\":\n x = self.emb_layer_norm_after(x)\n x = self.dropout_module(x)\n \n repr_layers = set(repr_layers)\n hidden_representations = {}\n if 0 in repr_layers:\n hidden_representations[0] = x\n\n if self.args.target_prediction_model == \"CNN\": \n assert len(x.size())==3, \"Size error input\"\n N, L, D = x.size()\n x = x.permute(0,2,1) #N, D, L\n x = self.layer_pre_head(x)\n x = x.permute(0,2,1)\n elif self.args.target_prediction_model == \"ConvBERT\":\n x = self.layer_pre_head(x)[0]\n elif self.args.target_prediction_model==\"light_attention\":\n x = x.permute(0,2,1) #N, D, L\n o = self.feature_convolution(x) \n o = self.dropout(o)\n attention = self.attention_convolution(x)\n o1 = torch.sum(o * self.softmax(attention), dim=-1)\n o2, _ = torch.max(o, dim=-1)\n o = torch.cat([o1, o2], dim=-1)\n x = self.linear(o)\n \n if self.args.target_prediction_head == \"AA_embeddings_mean_pooled\": x = x.mean(dim=-2)\n \n if self.args.target_prediction_model == \"MLP\": x = self.layer_pre_head(x)\n \n target_predictions = {}\n for target_name in self.target_names:\n target_predictions[target_name] = self.target_pred_head[target_name](x).view(-1)\n if self.args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n target_predictions[target_name] += self.zero_shot_fitness_prediction_weight[target_name](zero_shot_fitness_predictions).squeeze()\n\n result = {\"target_predictions\": target_predictions, \"representations\": hidden_representations}\n \n return result\n \n def forward_with_uncertainty(self, tokens, zero_shot_fitness_predictions=None, sequence_embeddings=None, num_MC_dropout_samples=10):\n \"\"\"\n Performs MC dropout to compute predictions and the corresponding uncertainties.\n Assumes 1D predictions (eg., prediction of continuous output).\n \"\"\"\n self.eval() \n for m in self.modules(): #Move all dropout layers in train mode to support MC dropout. Keep everything else in eval mode.\n if m.__class__.__name__.startswith('Dropout'):\n m.train()\n with torch.no_grad(): \n predictions_dict = defaultdict(list)\n for _ in range(num_MC_dropout_samples):\n target_predictions_sample = self.forward(tokens, zero_shot_fitness_predictions=zero_shot_fitness_predictions, sequence_embeddings=sequence_embeddings)[\"target_predictions\"]\n for target_name in self.target_names:\n predictions_dict[target_name].append(target_predictions_sample[target_name])\n results_with_uncertainty={}\n for target_name in self.target_names:\n concatenated_target_pred = torch.cat([x.view(-1,1) for x in predictions_dict[target_name]],dim=-1)\n results_with_uncertainty[target_name] = {}\n results_with_uncertainty[target_name]['predictions_avg'] = concatenated_target_pred.mean(dim=-1)\n results_with_uncertainty[target_name]['uncertainty'] = concatenated_target_pred.std(dim=-1)\n return results_with_uncertainty\n\n @property\n def num_layers(self):\n return self.args.num_protein_npt_layers\n \n def max_tokens_per_msa_(self, value: int) -> None:\n \"\"\"\n Batching attention computations when gradients are disabled as per MSA_Transformer\n Set this value to infinity to disable this behavior.\n \"\"\"\n for module in self.modules():\n if isinstance(module, (RowSelfAttention, ColumnSelfAttention)):\n module.max_tokens_per_msa = value\n\n def prediction_loss(self, target_predictions, target_labels, label_smoothing=0.1):\n total_target_prediction_loss = 0.0\n target_prediction_loss_dict = {}\n for target_name in self.target_names:\n non_missing_target_indicator = ~torch.isnan(target_labels[target_name])\n if self.args.target_config[target_name][\"type\"]==\"continuous\":\n tgt_loss = MSELoss(reduction=\"sum\")(target_predictions[target_name][non_missing_target_indicator], target_labels[target_name][non_missing_target_indicator])\n else:\n tgt_loss = CrossEntropyLoss(reduction=\"none\",label_smoothing=label_smoothing)(target_predictions[target_name].view(-1, self.args.target_config[target_name][\"dim\"]), target_labels[target_name].view(-1))\n target_prediction_loss_dict[target_name] = tgt_loss\n total_target_prediction_loss += tgt_loss\n return total_target_prediction_loss, target_prediction_loss_dict\n\n def create_optimizer(self):\n \"\"\"\n Setup the optimizer.\n We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the\n Trainer's init through `optimizers`, or subclass and override this method in a subclass.\n Adapted from Huggingface Transformers library.\n \"\"\"\n all_parameters = utils.model_utils.get_parameter_names(self, [nn.LayerNorm])\n decay_parameters = [name for name in all_parameters if (\"bias\" not in name and \"pseudo_likelihood_weight\" not in name and 'zero_shot_fitness_prediction_weight' not in name)]\n psl_decay_parameters = [name for name in all_parameters if (\"bias\" not in name and (\"pseudo_likelihood_weight\" in name or \"zero_shot_fitness_prediction_weight\" in name))]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in self.named_parameters() if n in decay_parameters],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [p for n, p in self.named_parameters() if n in psl_decay_parameters],\n \"weight_decay\": 1e-8, #Small decay on pseudo-likelihood as in Hsu et al.\n },\n {\n \"params\": [p for n, p in self.named_parameters() if (n not in decay_parameters and n not in psl_decay_parameters)],\n \"weight_decay\": 0.0,\n },\n ] \n optimizer_kwargs = {\n \"betas\": (self.args.adam_beta1, self.args.adam_beta2),\n \"eps\": self.args.adam_epsilon,\n \"lr\": self.args.max_learning_rate\n }\n optimizer = AdamW(optimizer_grouped_parameters, **optimizer_kwargs)\n return optimizer" }, { "identifier": "Alphabet", "path": "utils/esm/data.py", "snippet": "class Alphabet(object):\n def __init__(\n self,\n standard_toks: Sequence[str],\n prepend_toks: Sequence[str] = (\"<null_0>\", \"<pad>\", \"<eos>\", \"<unk>\"),\n append_toks: Sequence[str] = (\"<cls>\", \"<mask>\", \"<sep>\"),\n prepend_bos: bool = True,\n append_eos: bool = False,\n use_msa: bool = False,\n ):\n #ESM Alphabet: {'<cls>': 0, '<pad>': 1, '<eos>': 2, '<unk>': 3, 'L': 4, 'A': 5, 'G': 6, 'V': 7, 'S': 8, 'E': 9, 'R': 10, 'T': 11, 'I': 12, 'D': 13, 'P': 14, 'K': 15, 'Q': 16, 'N': 17, 'F': 18, 'Y': 19, 'M': 20, 'H': 21, 'W': 22, 'C': 23, 'X': 24, 'B': 25, 'U': 26, 'Z': 27, 'O': 28, '.': 29, '-': 30, '<null_1>': 31, '<mask>': 32}\n self.standard_toks = list(standard_toks)\n self.prepend_toks = list(prepend_toks)\n self.append_toks = list(append_toks)\n self.prepend_bos = prepend_bos\n self.append_eos = append_eos\n self.use_msa = use_msa\n\n self.all_toks = list(self.prepend_toks)\n self.all_toks.extend(self.standard_toks)\n for i in range((8 - (len(self.all_toks) % 8)) % 8):\n self.all_toks.append(f\"<null_{i + 1}>\")\n self.all_toks.extend(self.append_toks)\n\n self.tok_to_idx = {tok: i for i, tok in enumerate(self.all_toks)}\n\n self.unk_idx = self.tok_to_idx[\"<unk>\"]\n self.padding_idx = self.get_idx(\"<pad>\")\n self.cls_idx = self.get_idx(\"<cls>\")\n self.mask_idx = self.get_idx(\"<mask>\")\n self.eos_idx = self.get_idx(\"<eos>\")\n self.all_special_tokens = ['<eos>', '<unk>', '<pad>', '<cls>', '<mask>']\n self.unique_no_split_tokens = self.all_toks\n\n def __len__(self):\n return len(self.all_toks)\n\n def get_idx(self, tok):\n return self.tok_to_idx.get(tok, self.unk_idx)\n\n def get_tok(self, ind):\n return self.all_toks[ind]\n\n def to_dict(self):\n return self.tok_to_idx.copy()\n\n def get_batch_converter(self, truncation_seq_length: int = None):\n if self.use_msa:\n return MSABatchConverter(self, truncation_seq_length)\n else:\n return BatchConverter(self, truncation_seq_length)\n\n @classmethod\n def from_architecture(cls, name: str) -> \"Alphabet\":\n if name in (\"ESM-1\", \"protein_bert_base\"):\n standard_toks = proteinseq_toks[\"toks\"]\n prepend_toks: Tuple[str, ...] = (\"<null_0>\", \"<pad>\", \"<eos>\", \"<unk>\")\n append_toks: Tuple[str, ...] = (\"<cls>\", \"<mask>\", \"<sep>\")\n prepend_bos = True\n append_eos = False\n use_msa = False\n elif name in (\"ESM-1b\", \"roberta_large\"):\n standard_toks = proteinseq_toks[\"toks\"]\n prepend_toks = (\"<cls>\", \"<pad>\", \"<eos>\", \"<unk>\")\n append_toks = (\"<mask>\",)\n prepend_bos = True\n append_eos = True\n use_msa = False\n elif name in (\"MSA Transformer\", \"msa_transformer\"):\n standard_toks = proteinseq_toks[\"toks\"]\n prepend_toks = (\"<cls>\", \"<pad>\", \"<eos>\", \"<unk>\")\n append_toks = (\"<mask>\",)\n prepend_bos = True\n append_eos = False\n use_msa = True\n elif \"invariant_gvp\" in name.lower():\n standard_toks = proteinseq_toks[\"toks\"]\n prepend_toks = (\"<null_0>\", \"<pad>\", \"<eos>\", \"<unk>\")\n append_toks = (\"<mask>\", \"<cath>\", \"<af2>\")\n prepend_bos = True\n append_eos = False\n use_msa = False\n else:\n raise ValueError(\"Unknown architecture selected\")\n return cls(standard_toks, prepend_toks, append_toks, prepend_bos, append_eos, use_msa)\n\n def _tokenize(self, text) -> str:\n return text.split()\n\n def tokenize(self, text, **kwargs) -> List[str]:\n \"\"\"\n Inspired by https://github.com/huggingface/transformers/blob/master/src/transformers/tokenization_utils.py\n Converts a string in a sequence of tokens, using the tokenizer.\n\n Args:\n text (:obj:`str`):\n The sequence to be encoded.\n\n Returns:\n :obj:`List[str]`: The list of tokens.\n \"\"\"\n\n def split_on_token(tok, text):\n result = []\n split_text = text.split(tok)\n for i, sub_text in enumerate(split_text):\n # AddedToken can control whitespace stripping around them.\n # We use them for GPT2 and Roberta to have different behavior depending on the special token\n # Cf. https://github.com/huggingface/transformers/pull/2778\n # and https://github.com/huggingface/transformers/issues/3788\n # We strip left and right by default\n if i < len(split_text) - 1:\n sub_text = sub_text.rstrip()\n if i > 0:\n sub_text = sub_text.lstrip()\n\n if i == 0 and not sub_text:\n result.append(tok)\n elif i == len(split_text) - 1:\n if sub_text:\n result.append(sub_text)\n else:\n pass\n else:\n if sub_text:\n result.append(sub_text)\n result.append(tok)\n return result\n\n def split_on_tokens(tok_list, text):\n if not text.strip():\n return []\n\n tokenized_text = []\n text_list = [text]\n for tok in tok_list:\n tokenized_text = []\n for sub_text in text_list:\n if sub_text not in self.unique_no_split_tokens:\n tokenized_text.extend(split_on_token(tok, sub_text))\n else:\n tokenized_text.append(sub_text)\n text_list = tokenized_text\n\n return list(\n itertools.chain.from_iterable(\n (\n self._tokenize(token)\n if token not in self.unique_no_split_tokens\n else [token]\n for token in tokenized_text\n )\n )\n )\n\n no_split_token = self.unique_no_split_tokens\n tokenized_text = split_on_tokens(no_split_token, text)\n return tokenized_text\n\n def encode(self, text):\n return [self.tok_to_idx[tok] for tok in self.tokenize(text)]" }, { "identifier": "get_tranception_tokenizer", "path": "utils/tranception/model_pytorch.py", "snippet": "def get_tranception_tokenizer():\n #Tranception Alphabet: \"vocab\":{\"[UNK]\":0,\"[CLS]\":1,\"[SEP]\":2,\"[PAD]\":3,\"[MASK]\":4,\"A\":5,\"C\":6,\"D\":7,\"E\":8,\"F\":9,\"G\":10,\"H\":11,\"I\":12,\"K\":13,\"L\":14,\"M\":15,\"N\":16,\"P\":17,\"Q\":18,\"R\":19,\"S\":20,\"T\":21,\"V\":22,\"W\":23,\"Y\":24}\n dir_path = os.path.dirname(os.path.abspath(__file__))\n tokenizer = PreTrainedTokenizerFast(tokenizer_file=dir_path + os.sep + \"utils/tokenizers/Basic_tokenizer\", unk_token=\"[UNK]\", sep_token=\"[SEP]\", pad_token=\"[PAD]\", cls_token=\"[CLS]\",mask_token=\"[MASK]\")\n os.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n tokenizer.tok_to_idx = tokenizer.vocab\n tokenizer.padding_idx = tokenizer.tok_to_idx[\"[PAD]\"]\n tokenizer.mask_idx = tokenizer.tok_to_idx[\"[MASK]\"]\n tokenizer.cls_idx = tokenizer.tok_to_idx[\"[CLS]\"]\n tokenizer.eos_idx = tokenizer.tok_to_idx[\"[SEP]\"]\n tokenizer.prepend_bos = True\n tokenizer.append_eos = True\n return tokenizer" }, { "identifier": "get_train_val_test_data", "path": "utils/data_utils.py", "snippet": "def get_train_val_test_data(args, assay_file_names):\n target_names = args.target_config.keys() \n assay_data={}\n merge = None\n main_target_name = None\n main_target_name_count = 0\n for target in target_names:\n if args.target_config[target][\"main_target\"]: \n main_target_name=target\n main_target_name_count+=1\n assert main_target_name is not None, \"No main target referenced. Please update config to select a unique main target.\"\n assert main_target_name_count <= 1, \"Several main targets referenced. Please update config to select a unique main target.\"\n \n assay_data[main_target_name] = pd.read_csv(args.target_config[main_target_name][\"location\"] + os.sep + assay_file_names[main_target_name])[['mutant','mutated_sequence',args.target_config[main_target_name][\"var_name\"],args.fold_variable_name]] \n assay_data[main_target_name].columns = ['mutant','mutated_sequence', main_target_name, args.fold_variable_name]\n merge = assay_data[main_target_name]\n \n for target_name in target_names:\n if target_name!=main_target_name:\n print(target_name)\n print(args.target_config)\n print(assay_file_names)\n assay_data[target_name] = pd.read_csv(args.target_config[target_name][\"location\"] + os.sep + assay_file_names[target_name])[['mutant',args.target_config[target_name][\"var_name\"]]] \n assay_data[target_name].columns = ['mutant',target_name]\n merge = pd.merge(merge, assay_data[target_name], how='left', on='mutant')\n \n if args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n zero_shot_fitness_predictions = pd.read_csv(args.zero_shot_fitness_predictions_location + os.sep + assay_file_names[main_target_name])[['mutant',args.zero_shot_fitness_predictions_var_name]]\n zero_shot_fitness_predictions.columns = ['mutant','zero_shot_fitness_predictions']\n zero_shot_fitness_predictions['zero_shot_fitness_predictions'] = standardize(zero_shot_fitness_predictions['zero_shot_fitness_predictions'])\n merge = pd.merge(merge,zero_shot_fitness_predictions,how='inner',on='mutant')\n\n train_val_test_splits = split_data_based_on_test_fold_index(\n dataframe = merge, \n fold_variable_name = args.fold_variable_name,\n test_fold_index = args.test_fold_index,\n use_validation_set = args.use_validation_set\n )\n splits_dict = {}\n for split_name, split in zip(['train','val','test'], train_val_test_splits):\n if split_name=='val' and not args.use_validation_set: continue\n splits_dict[split_name] = {}\n splits_dict[split_name]['mutant_mutated_seq_pairs'] = list(zip(list(split['mutant']),list(split['mutated_sequence'])))\n raw_targets = {target_name: split[target_name] for target_name in target_names}\n if args.augmentation==\"zero_shot_fitness_predictions_covariate\": raw_targets['zero_shot_fitness_predictions'] = split['zero_shot_fitness_predictions']\n if split_name==\"train\":\n raw_targets, target_processing = preprocess_training_targets(raw_targets, args.target_config)\n else:\n raw_targets = preprocess_test_targets(raw_targets, args.target_config, target_processing)\n for target_name in target_names: \n splits_dict[split_name][target_name] = raw_targets[target_name]\n if args.augmentation==\"zero_shot_fitness_predictions_covariate\": splits_dict[split_name]['zero_shot_fitness_predictions'] = raw_targets['zero_shot_fitness_predictions']\n # load dict into dataset objects\n train_data = Dataset.from_dict(splits_dict['train'])\n val_data = Dataset.from_dict(splits_dict['val']) if args.use_validation_set else None\n test_data = Dataset.from_dict(splits_dict['test'])\n return train_data, val_data, test_data, target_processing" }, { "identifier": "standardize", "path": "utils/data_utils.py", "snippet": "def standardize(x):\n return (x - x.mean()) / x.std()" }, { "identifier": "pnpt_count_non_nan", "path": "utils/data_utils.py", "snippet": "def pnpt_count_non_nan(x):\n missing_mask = np.isnan(x) | np.equal(x,-100)\n return np.count_nonzero(~missing_mask)" }, { "identifier": "pnpt_spearmanr", "path": "utils/data_utils.py", "snippet": "def pnpt_spearmanr(prediction,target):\n mask_missing_values = np.isnan(target) | np.equal(target, -100) #In PNPT missing values are never masked so corresponding labels are always set to -100\n return spearmanr(prediction[~mask_missing_values], target[~mask_missing_values])[0] #first value is spearman rho, second is the corresponding p-value " }, { "identifier": "process_MSA", "path": "utils/msa_utils.py", "snippet": "def process_MSA(args, MSA_filename, MSA_weights_filename):\n filtered_MSA_filename = filter_msa(filename = args.MSA_data_folder + os.sep + MSA_filename, path_to_hhfilter = args.path_to_hhfilter)\n MSA_all_sequences, MSA_non_ref_sequences_weights = compute_sequence_weights(MSA_filename = filtered_MSA_filename, MSA_weights_filename = args.MSA_weight_data_folder + os.sep + MSA_weights_filename)\n return MSA_all_sequences, MSA_non_ref_sequences_weights" }, { "identifier": "Trainer", "path": "utils/model_utils.py", "snippet": "class Trainer():\n def __init__(self, \n model,\n args,\n train_data, \n val_data,\n MSA_sequences, \n MSA_weights,\n MSA_start_position,\n MSA_end_position,\n target_processing,\n distributed_training=False\n ):\n self.model = model\n self.args = args\n self.train_data = train_data\n self.val_data = val_data\n self.MSA_sequences = MSA_sequences\n self.MSA_weights = MSA_weights\n self.MSA_start_position = MSA_start_position\n self.MSA_end_position = MSA_end_position\n self.target_processing = target_processing\n self.distributed_training = distributed_training\n \n def train(self):\n \"\"\"\n Returns the last value of training_step (useful in case of early stopping for isntance)\n \"\"\"\n \n self.model.train()\n self.model.cuda()\n self.model.set_device()\n\n if self.distributed_training:\n self.model = torch.nn.parallel.DistributedDataParallel(self.model)\n train_sampler = torch.utils.data.distributed.DistributedSampler(self.train_data)\n else:\n train_sampler = None\n \n #To ensure reproducibility with seed setting\n def seed_worker(worker_id):\n worker_seed = torch.initial_seed() % 2**32\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n g = torch.Generator()\n g.manual_seed(0)\n train_loader = torch.utils.data.DataLoader(\n dataset=self.train_data, \n batch_size=self.args.training_num_assay_sequences_per_batch_per_gpu, \n shuffle=(train_sampler is None),\n num_workers=self.args.num_data_loaders_workers, \n pin_memory=True, \n sampler=train_sampler,\n collate_fn=collate_fn_protein_npt,\n worker_init_fn=seed_worker,\n generator=g,\n )\n optimizer = self.model.create_optimizer()\n scheduler = learning_rate_scheduler(\n num_warmup_steps=self.args.num_warmup_steps, \n num_total_training_steps=self.args.num_total_training_steps, \n max_learning_rate=self.args.max_learning_rate, \n min_learning_rate=self.args.min_learning_rate\n )\n \n train_iterator = iter(train_loader)\n num_epochs = 0\n prior_log_time = time.time()\n total_train_time = 0\n log_train_total_loss = 0\n if self.model.model_type==\"ProteinNPT\":\n log_train_reconstruction_loss = 0\n log_train_num_masked_tokens = 0\n log_train_num_target_masked_tokens_dict = defaultdict(int)\n else:\n log_num_sequences_predicted = 0\n log_train_target_prediction_loss_dict = defaultdict(int)\n all_spearmans_eval_during_training = []\n max_average_spearman_across_targets = - math.inf\n if self.args.training_fp16: scaler = torch.cuda.amp.GradScaler()\n\n for training_step in tqdm.tqdm(range(1, self.args.num_total_training_steps+1)):\n optimizer.zero_grad(set_to_none=True)\n lr = scheduler(training_step)\n update_lr_optimizer(optimizer, lr)\n reconstruction_loss_coeff = get_reconstruction_loss_coefficient(training_step, num_total_training_steps=self.args.num_total_training_steps) if (self.model.model_type==\"ProteinNPT\" and not self.model.PNPT_no_reconstruction_error) else 0\n for gradient_accum_step in range(self.args.gradient_accumulation):\n try:\n batch = next(train_iterator)\n except:\n num_epochs +=1\n train_iterator = iter(train_loader)\n batch = next(train_iterator)\n \n if self.model.model_type==\"ProteinNPT\":\n processed_batch = proteinnpt.data_processing.process_batch(\n batch = batch,\n model = self.model,\n alphabet = self.model.alphabet, \n args = self.args, \n MSA_sequences = self.MSA_sequences, \n MSA_weights = self.MSA_weights,\n MSA_start_position = self.MSA_start_position, \n MSA_end_position = self.MSA_end_position,\n target_processing = self.target_processing,\n training_sequences = None,\n proba_target_mask = 0.15,\n proba_aa_mask = 0.15,\n eval_mode = False,\n device=self.model.device,\n indel_mode=self.args.indel_mode\n )\n else:\n processed_batch = baselines.data_processing.process_batch(\n batch = batch,\n model = self.model,\n alphabet = self.model.alphabet, \n args = self.args, \n MSA_sequences = self.MSA_sequences, \n MSA_weights = self.MSA_weights,\n MSA_start_position = self.MSA_start_position, \n MSA_end_position = self.MSA_end_position,\n device=self.model.device,\n eval_mode=False,\n indel_mode=self.args.indel_mode\n )\n\n if self.args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n zero_shot_fitness_predictions = processed_batch['target_labels']['zero_shot_fitness_predictions'].view(-1,1)\n del processed_batch['target_labels']['zero_shot_fitness_predictions']\n else:\n zero_shot_fitness_predictions = None\n \n if self.args.training_fp16:\n with torch.cuda.amp.autocast():\n if self.model.model_type==\"ProteinNPT\":\n output = self.model(\n tokens=processed_batch['masked_tokens'],\n targets=processed_batch['masked_targets'],\n zero_shot_fitness_predictions=zero_shot_fitness_predictions,\n sequence_embeddings=processed_batch['sequence_embeddings']\n )\n total_loss, reconstruction_loss, target_prediction_loss_dict = self.model.protein_npt_loss(\n token_predictions_logits=output['logits_protein_sequence'], \n token_labels=processed_batch['token_labels'], \n target_predictions=output['target_predictions'], \n target_labels=processed_batch['target_labels'], \n MLM_reconstruction_loss_weight=reconstruction_loss_coeff, \n label_smoothing=self.args.label_smoothing\n )\n else:\n output = self.model(\n tokens=processed_batch['input_tokens'],\n zero_shot_fitness_predictions=zero_shot_fitness_predictions,\n sequence_embeddings=processed_batch['sequence_embeddings']\n )\n total_loss, target_prediction_loss_dict = self.model.prediction_loss(\n target_predictions=output[\"target_predictions\"], \n target_labels=processed_batch['target_labels'],\n label_smoothing=self.args.label_smoothing\n )\n scaler.scale(total_loss).backward()\n else:\n if self.model.model_type==\"ProteinNPT\":\n output = self.model(\n tokens=processed_batch['masked_tokens'],\n targets=processed_batch['masked_targets'],\n zero_shot_fitness_predictions=zero_shot_fitness_predictions,\n sequence_embeddings=processed_batch['sequence_embeddings']\n )\n total_loss, reconstruction_loss, target_prediction_loss_dict = self.model.protein_npt_loss(\n token_predictions_logits=output['logits_protein_sequence'], \n token_labels=processed_batch['token_labels'], \n target_predictions=output['target_predictions'], \n target_labels=processed_batch['target_labels'], \n MLM_reconstruction_loss_weight=reconstruction_loss_coeff, \n label_smoothing=self.args.label_smoothing\n )\n if total_loss.item() > 10.0 and training_step >= 100:\n print(\"High training loss detected: {}\".format(total_loss.item()))\n else:\n output = self.model(\n tokens=processed_batch['input_tokens'],\n zero_shot_fitness_predictions=zero_shot_fitness_predictions,\n sequence_embeddings=processed_batch['sequence_embeddings']\n )\n total_loss, target_prediction_loss_dict = self.model.prediction_loss(\n target_predictions=output[\"target_predictions\"], \n target_labels=processed_batch['target_labels'],\n label_smoothing=self.args.label_smoothing\n )\n total_loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.grad_norm_clip)\n # Taking optimizer update out of the inner loop to support gradient accumulation\n if self.args.training_fp16:\n with torch.cuda.amp.autocast():\n scaler.step(optimizer)\n scaler.update()\n else:\n optimizer.step()\n\n log_train_total_loss += total_loss\n for target_name in self.model.target_names:\n log_train_target_prediction_loss_dict[target_name] += target_prediction_loss_dict[target_name]\n if self.model.model_type==\"ProteinNPT\": \n log_train_reconstruction_loss += reconstruction_loss\n log_train_num_masked_tokens += processed_batch['masked_tokens'].eq(self.model.alphabet.mask_idx).sum()\n for target_name in self.model.target_names:\n log_train_num_target_masked_tokens_dict[target_name] += processed_batch['masked_targets'][target_name][:,-1].eq(1.0).sum().item() # Masked targets are encoded by 1.0. Mask column is the very last one\n else:\n log_num_sequences_predicted += len(batch['mutant_mutated_seq_pairs'])\n \n if training_step % self.args.num_logging_training_steps == 0 and self.args.use_wandb:\n time_end_step = time.time()\n delta_time_since_last_log = time_end_step - prior_log_time\n total_train_time += delta_time_since_last_log\n prior_log_time = time_end_step\n train_logs = {\n \"training_step\": training_step, \n \"step_time\": delta_time_since_last_log / (self.args.num_logging_training_steps)\n }\n if self.model.model_type==\"ProteinNPT\": \n train_logs[\"train_total_loss_per_step\"]: log_train_total_loss / self.args.num_logging_training_steps\n train_logs[\"train_reconstruction_loss_per_masked_token\"] = log_train_reconstruction_loss / log_train_num_masked_tokens\n for target_name in self.model.target_names:\n train_logs[\"train_prediction_\"+str(target_name)+\"_loss_per_masked_token\"] = log_train_target_prediction_loss_dict[target_name] / log_train_num_target_masked_tokens_dict[target_name]\n else:\n train_logs[\"train_total_loss_per_seq\"]: log_train_total_loss / log_num_sequences_predicted\n for target_name in self.model.target_names:\n train_logs[\"train_prediction_\"+str(target_name)+\"_loss_per_seq\"] = log_train_target_prediction_loss_dict[target_name] / log_num_sequences_predicted\n wandb.log(train_logs)\n log_train_total_loss = 0\n log_train_target_prediction_loss_dict = defaultdict(int)\n if self.model.model_type==\"ProteinNPT\":\n log_train_reconstruction_loss = 0\n log_train_num_masked_tokens = 0\n log_train_num_target_masked_tokens_dict = defaultdict(int)\n else:\n log_num_sequences_predicted = 0 \n \n if self.args.save_model_checkpoint and (training_step % self.args.num_saving_training_steps) == 0:\n if not os.path.exists(self.args.model_location): os.mkdir(self.args.model_location)\n if not os.path.exists(self.args.model_location + os.sep + 'checkpoint-'+str(training_step)): os.mkdir(self.args.model_location + os.sep + 'checkpoint-'+str(training_step))\n torch.save({\n 'training_step': training_step,\n 'args': self.args,\n 'state_dict': self.model.state_dict(),\n 'optimizer' : optimizer.state_dict()\n }, \n self.args.model_location + os.sep + 'checkpoint-'+str(training_step) + os.sep + 'checkpoint.t7'\n )\n \n if training_step % self.args.num_eval_steps == 0 and self.args.use_validation_set:\n if self.model.model_type==\"ProteinNPT\":\n eval_results = self.eval(\n test_data=self.val_data,\n train_data=self.train_data,\n reconstruction_loss_weight=0.0,\n output_all_predictions=True\n )\n else:\n eval_results = self.eval(\n test_data=self.val_data, \n output_all_predictions=True\n )\n eval_logs = {\"Training step\": training_step} \n if self.model.model_type==\"ProteinNPT\":\n normalization = 0\n for target_name in self.model.target_names: normalization += eval_results['eval_num_masked_targets'][target_name]\n else:\n normalization = eval_results['eval_num_predicted_targets']\n eval_logs['Eval total loss per seq.']: eval_results['eval_total_loss'] / normalization\n average_spearman_across_targets = 0 #If early stopping based on validation spearman and multiple targets, we check that avg spearman is not decreasing for a certain # of times in a row\n for target_name in self.model.target_names:\n if self.model.model_type==\"ProteinNPT\": normalization = eval_results['eval_num_masked_targets'][target_name] #Update for PNPT (keeep the same normalization constant otherwise)\n eval_logs['Eval loss '+str(target_name)+' per seq.'] = eval_results['eval_target_prediction_loss_dict'][target_name] / normalization\n eval_logs['Eval spearman '+target_name] = spearmanr(eval_results['output_scores']['predictions_'+target_name], eval_results['output_scores']['labels_'+target_name])[0]\n average_spearman_across_targets += eval_logs['Eval spearman '+target_name]\n average_spearman_across_targets /= len(self.model.target_names)\n print(\" | \".join([key + \": \"+str(round(eval_logs[key],5)) for key in eval_logs.keys()]))\n if self.args.use_wandb: wandb.log(eval_logs)\n # Early stopping\n all_spearmans_eval_during_training.append(average_spearman_across_targets)\n if average_spearman_across_targets > max_average_spearman_across_targets: max_average_spearman_across_targets = average_spearman_across_targets\n if (training_step >= 1000) and (self.args.early_stopping_patience is not None) and (np.array(all_spearmans_eval_during_training)[-self.args.early_stopping_patience:].max() < max_average_spearman_across_targets):\n print(\"Early stopping. Training step: {}. Total eval loss: {}. Avg spearman: {}\".format(training_step, eval_results['eval_total_loss'], average_spearman_across_targets))\n break\n self.model.train() #Move back the model to train mode after eval loop\n trainer_final_status = {\n 'total_training_steps': training_step,\n 'total_train_time': total_train_time,\n 'total_training_epochs': num_epochs\n }\n return trainer_final_status\n\n def eval(self, test_data, output_all_predictions=False, need_head_weights=False, train_data = None, reconstruction_loss_weight=0.5, selected_indices_seed=0):\n \"\"\"\n total_eval_target_prediction_loss is the sum of all target prediction losses across all targets\n total_eval_target_prediction_loss contains the breakdown by target\n num_predicted_targets has the number of predicted items\n output_scores is a dict with sequences, predictions and labels\n \"\"\"\n self.model.eval()\n self.model.cuda()\n self.model.set_device()\n with torch.no_grad():\n eval_loader = torch.utils.data.DataLoader(\n dataset=test_data, \n batch_size=self.args.eval_num_sequences_to_score_per_batch_per_gpu, \n shuffle=False,\n num_workers=self.args.num_data_loaders_workers, \n pin_memory=True,\n collate_fn=collate_fn_protein_npt\n )\n eval_iterator = iter(eval_loader)\n \n eval_total_loss = 0\n if self.model.model_type==\"ProteinNPT\": \n eval_reconstruction_loss = 0\n eval_num_masked_tokens = 0\n eval_num_masked_targets = defaultdict(int)\n else:\n num_predicted_targets = 0\n eval_target_prediction_loss_dict = defaultdict(int)\n output_scores = defaultdict(list) if output_all_predictions else None\n\n if need_head_weights:\n col_attentions=[]\n row_attentions=[]\n\n for batch in tqdm.tqdm(eval_iterator):\n if output_all_predictions: \n output_scores['mutated_sequence'] += list(zip(*batch['mutant_mutated_seq_pairs']))[1]\n output_scores['mutant'] += list(zip(*batch['mutant_mutated_seq_pairs']))[0]\n if self.model.model_type==\"ProteinNPT\":\n processed_batch = proteinnpt.data_processing.process_batch(\n batch = batch,\n model = self.model,\n alphabet = self.model.alphabet, \n args = self.args, \n MSA_sequences = self.MSA_sequences, \n MSA_weights = self.MSA_weights,\n MSA_start_position = self.MSA_start_position, \n MSA_end_position = self.MSA_end_position,\n target_processing = self.target_processing,\n training_sequences = train_data,\n proba_target_mask = 1.0, \n proba_aa_mask = 0.0,\n eval_mode = True,\n device=self.model.device,\n selected_indices_seed=selected_indices_seed,\n indel_mode=self.args.indel_mode\n )\n else:\n processed_batch = baselines.data_processing.process_batch(\n batch = batch,\n model = self.model,\n alphabet = self.model.alphabet, \n args = self.args, \n MSA_sequences = self.MSA_sequences, \n MSA_weights = self.MSA_weights,\n MSA_start_position = self.MSA_start_position, \n MSA_end_position = self.MSA_end_position,\n device=self.model.device,\n eval_mode=True,\n indel_mode=self.args.indel_mode\n )\n if self.args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n zero_shot_fitness_predictions = processed_batch['target_labels']['zero_shot_fitness_predictions'].view(-1,1)\n del processed_batch['target_labels']['zero_shot_fitness_predictions']\n else:\n zero_shot_fitness_predictions = None\n \n if self.model.model_type==\"ProteinNPT\":\n output = self.model(\n tokens=processed_batch['masked_tokens'],\n targets=processed_batch['masked_targets'],\n zero_shot_fitness_predictions=zero_shot_fitness_predictions,\n sequence_embeddings=processed_batch['sequence_embeddings'],\n need_head_weights=need_head_weights\n )\n batch_loss, batch_reconstruction_loss, batch_target_prediction_loss_dict = self.model.protein_npt_loss(\n token_predictions_logits=output['logits_protein_sequence'], \n token_labels=processed_batch['token_labels'], \n target_predictions=output['target_predictions'], \n target_labels=processed_batch['target_labels'], \n MLM_reconstruction_loss_weight=reconstruction_loss_weight, \n label_smoothing=self.args.label_smoothing\n )\n if batch_loss.item() > 10.0:\n print(\"High eval loss detected: {}\".format(batch_loss.item()))\n else:\n output = self.model(\n tokens=processed_batch['input_tokens'],\n zero_shot_fitness_predictions=zero_shot_fitness_predictions,\n sequence_embeddings=processed_batch['sequence_embeddings']\n )\n batch_loss, batch_target_prediction_loss_dict = self.model.prediction_loss(\n target_predictions=output[\"target_predictions\"], \n target_labels=processed_batch['target_labels'],\n label_smoothing=self.args.label_smoothing\n )\n \n eval_total_loss += batch_loss.item()\n for target_name in self.model.target_names:\n eval_target_prediction_loss_dict[target_name] += batch_target_prediction_loss_dict[target_name].item()\n if self.model.model_type==\"ProteinNPT\":\n eval_reconstruction_loss += batch_reconstruction_loss.item()\n eval_num_masked_tokens += processed_batch['masked_tokens'].eq(self.model.alphabet.mask_idx).sum().item()\n for target_name in self.model.target_names:\n eval_num_masked_targets[target_name] += processed_batch['masked_targets'][target_name][:,-1].eq(1.0).sum().item()\n else:\n num_predicted_targets += len(batch['mutant_mutated_seq_pairs'])\n if output_all_predictions:\n num_of_mutated_seqs_to_score = processed_batch['num_of_mutated_seqs_to_score'] if self.model.model_type==\"ProteinNPT\" else len(processed_batch['mutant_mutated_seq_pairs'])\n for target_name in self.model.target_names:\n output_scores['predictions_'+target_name] += list(output[\"target_predictions\"][target_name][:num_of_mutated_seqs_to_score].cpu().numpy())\n output_scores['labels_'+target_name] += list(processed_batch['target_labels'][target_name][:num_of_mutated_seqs_to_score].cpu().numpy())\n if need_head_weights:\n col_attentions.append(output[\"col_attentions\"])\n row_attentions.append(output[\"row_attentions\"])\n\n output_scores = pd.DataFrame.from_dict(output_scores)\n output_scores_numeric_cols = [col_name for col_name in output_scores.columns if col_name not in ['mutant','mutated_sequence']]\n output_scores = output_scores.groupby(['mutant'])[output_scores_numeric_cols].mean().reset_index() \n mutated_seqs_dict = {}\n mutant_mutated_seqs = list(zip(*test_data['mutant_mutated_seq_pairs']))\n mutated_seqs_dict['mutant'] = mutant_mutated_seqs[0]\n mutated_seqs_dict['mutated_sequence'] = mutant_mutated_seqs[1]\n mutated_seqs_df = pd.DataFrame.from_dict(mutated_seqs_dict)\n output_scores = pd.merge(output_scores, mutated_seqs_df, on='mutant', how='left')\n \n\n eval_results = {\n 'eval_total_loss':eval_total_loss,\n 'eval_target_prediction_loss_dict':eval_target_prediction_loss_dict,\n 'output_scores': output_scores\n }\n if need_head_weights:\n print(\"dimension of first attention column {}\".format(col_attentions[0].shape))\n eval_results['col_attentions'] = torch.stack(col_attentions, dim=0).cpu().numpy()\n eval_results['row_attentions'] = torch.stack(row_attentions, dim=0).cpu().numpy()\n \n if self.model.model_type==\"ProteinNPT\":\n eval_results['eval_reconstruction_loss']=eval_reconstruction_loss\n eval_results['eval_num_masked_tokens']=eval_num_masked_tokens\n eval_results['eval_num_masked_targets']=eval_num_masked_targets\n else:\n eval_results['eval_num_predicted_targets']=num_predicted_targets\n return eval_results" } ]
import os,gc import json import argparse import random import numpy as np import pandas as pd import wandb import torch import proteinnpt,baselines,utils from collections import defaultdict from proteinnpt.model import ProteinNPTModel from baselines.model import AugmentedPropertyPredictor from utils.esm.data import Alphabet from utils.tranception.model_pytorch import get_tranception_tokenizer from utils.data_utils import get_train_val_test_data, standardize, pnpt_count_non_nan, pnpt_spearmanr from utils.msa_utils import process_MSA from utils.model_utils import Trainer
21,137
if args.model_type=="ProteinNPT": normalization = test_eval_results['eval_num_masked_targets'][target_name] test_logs['Test loss '+str(target_name)+' per seq.'] = test_eval_results['eval_target_prediction_loss_dict'][target_name] / normalization with open(logs_folder+os.sep+"test_performance_by_fold_"+args.model_name_suffix+".csv", "a") as perf_tracker: if os.path.getsize(logs_folder+os.sep+"test_performance_by_fold_"+args.model_name_suffix+".csv") == 0: header="fold_index,model_type,model_name_suffix,targets,assay_id,UniProt_id,fold_variable_name,total_training_steps,total_training_epochs,aa_embeddings,target_prediction_model,target_prediction_head,augmentation,frozen_embedding_parameters,dropout,weight_decay,early_stopping_patience,use_validation_set,training_num_assay_sequences_per_batch_per_gpu,eval_num_sequences_to_score_per_batch_per_gpu,eval_num_training_sequences_per_batch_per_gpu,eval_training_sequences_sampling_method,num_MSA_sequences_per_training_instance,embed_dim,ffn_embed_dim,attention_heads,conv_kernel_size,num_protein_npt_layers,total_loss" for target_name in target_names: header += (",loss_" + target_name + ",Spearman_" + target_name + ",num_obs_Spearman_" + target_name) perf_tracker.write(header+"\n") perf = ",".join([str(x) for x in perf_list]) + "," + str(round(test_logs['Test total loss per seq.'],5)) for target_name in target_names: perf += ("," + str(round(test_logs['Test loss '+str(target_name)+' per seq.'],5)) +","+str(spearmans[target_name])+","+str(num_obs_spearmans[target_name])) perf_tracker.write(perf+"\n") return test_logs, spearmans def log_performance_all_folds(args,target_names,all_test_predictions_across_folds,spearmans_across_folds,perf_list,logs_folder=None): if not os.path.exists(args.output_scores_location + os.sep + 'all_aggregated_predictions'): os.mkdir(args.output_scores_location + os.sep + 'all_aggregated_predictions') all_test_predictions_across_folds = pd.DataFrame.from_dict(all_test_predictions_across_folds) all_test_predictions_across_folds.to_csv(args.output_scores_location + os.sep + 'all_aggregated_predictions' + os.sep + model_name_prefix + ".csv", index=False) if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) with open(logs_folder+os.sep+"test_performance_overall_"+perf_list[2]+".csv", "a") as overall_perf: if os.path.getsize(logs_folder+os.sep+"test_performance_overall_"+perf_list[2]+".csv") == 0: header = "model_type,model_name_suffix,targets,assay_id,UniProt_id,fold_variable_name,total_training_steps,total_training_epochs,aa_embeddings,target_prediction_model,target_prediction_head,augmentation,frozen_embedding_parameters,dropout,weight_decay,early_stopping_patience,use_validation_set,training_num_assay_sequences_per_batch_per_gpu,eval_num_sequences_to_score_per_batch_per_gpu,eval_num_training_sequences_per_batch_per_gpu,eval_training_sequences_sampling_method,num_MSA_sequences_per_training_instance,embed_dim,ffn_embed_dim,attention_heads,conv_kernel_size,num_protein_npt_layers,total_loss" for target_name in target_names: header += (",loss_" + target_name + ",Spearman_" + target_name + ",Std_dev_Spearman_" + target_name + ",num_obs_Spearman_" + target_name + ",standardized_loss_" + target_name + ",standardized_Spearman_" + target_name) overall_perf.write(header+"\n") perf = ",".join([str(x) for x in perf_list[1:]]) #Remove fold_index from perf_list for target_name in target_names: missing_mask = np.isnan(all_test_predictions_across_folds['labels_'+target_name]) | np.equal(all_test_predictions_across_folds['labels_'+target_name],-100) MSE = ((all_test_predictions_across_folds['predictions_'+target_name][~missing_mask] - all_test_predictions_across_folds['labels_'+target_name][~missing_mask])**2).mean() spearman = pnpt_spearmanr(all_test_predictions_across_folds['predictions_'+target_name], all_test_predictions_across_folds['labels_'+target_name]) num_obs_spearman = pnpt_count_non_nan(all_test_predictions_across_folds['labels_'+target_name]) MSE_standardized = ((all_test_predictions_across_folds['fold_standardized_predictions_'+target_name][~missing_mask] - all_test_predictions_across_folds['labels_'+target_name][~missing_mask])**2).mean() spearman_standardized = pnpt_spearmanr(all_test_predictions_across_folds['fold_standardized_predictions_'+target_name], all_test_predictions_across_folds['labels_'+target_name]) spearman_std_dev = np.array(spearmans_across_folds[target_name]).std() perf += ("," + str(MSE) +","+str(spearman) + ","+ str(spearman_std_dev) + "," + str(num_obs_spearman) + "," + str(MSE_standardized) +","+str(spearman_standardized)) overall_perf.write(perf+"\n") def main(args): # Set random seeds torch.manual_seed(args.seed) np.random.seed(args.seed) random.seed(args.seed) # target_names are the true targets we want to predict. target_names_input also includes auxiliary labels (as used in ProteinNPT) target_names = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] target_names_input = args.target_config.keys() num_targets = len(target_names) num_targets_input = len(target_names_input) print("We want to predict {} target(s): {}".format(num_targets, ' and '.join(target_names))) if num_targets_input > num_targets: print("We leverage {} target(s) and auxiliary labels: {}".format(num_targets_input, ' and '.join(target_names_input))) assay_reference_file = pd.read_csv(args.assay_reference_file_location) assay_id=assay_reference_file["DMS_id"][args.assay_index] args.seq_len = int(assay_reference_file["seq_len"][assay_reference_file["DMS_id"]==assay_id].values[0]) args.MSA_seq_len = int(assay_reference_file["MSA_len"][assay_reference_file["DMS_id"]==assay_id].values[0]) print("Training model for assay: {}, where the test_fold index is: {}".format(assay_id, args.test_fold_index)) args.save_model_checkpoint = not args.do_not_save_model_checkpoint args.frozen_embedding_parameters = not args.fine_tune_model_embedding_parameters if args.model_type=="MSA_Transformer_pred": assert args.num_MSA_sequences_per_training_instance==args.num_MSA_sequences_per_eval_instance, "MSA_Transformer_pred only supports same size of MSA for train and eval" effective_batch_size = args.gradient_accumulation * args.training_num_assay_sequences_per_batch_per_gpu print("Effective batch size is {}".format(effective_batch_size)) model_hypers = [args.aa_embeddings,args.target_prediction_model,args.target_prediction_head,args.augmentation,args.frozen_embedding_parameters,args.dropout,args.weight_decay, \ args.early_stopping_patience, args.use_validation_set, args.training_num_assay_sequences_per_batch_per_gpu, args.eval_num_sequences_to_score_per_batch_per_gpu, args.eval_num_training_sequences_per_batch_per_gpu, \ args.eval_training_sequences_sampling_method, args.num_MSA_sequences_per_training_instance, args.embed_dim, args.ffn_embed_dim, args.attention_heads, args.conv_kernel_size, args.num_protein_npt_layers] model_hypers_str = ','.join([str(x) for x in model_hypers]) model_name_prefix = '_'.join([str(x) for x in [args.model_type,assay_id,"_".join(target_names_input),args.fold_variable_name,'embed_'+args.aa_embeddings,'head_'+str(args.target_prediction_model),'aug_'+str(args.augmentation_short), \ 'froz_'+str(args.frozen_embedding_parameters),'drop_'+str(args.dropout),'val_'+str(args.use_validation_set),args.model_name_suffix]]) model_name = model_name_prefix + "_fold-" + str(args.test_fold_index) if not os.path.exists(args.model_location+os.sep+model_name): os.mkdir(args.model_location+os.sep+model_name) with open(args.model_location+os.sep+model_name+os.sep+'training_arguments', 'w') as f: json.dump(args.__dict__, f, indent=2) print("Model name: "+model_name) assay_file_name = assay_reference_file["DMS_filename"][assay_reference_file["DMS_id"]==assay_id].values[0] # File name of main assay used during training (if single property, this is also the only assay). Retrieved embeddings are always for this assay. args.sequence_embeddings_location = args.sequence_embeddings_folder + os.sep + assay_file_name.split(".csv")[0] + '.h5' if args.sequence_embeddings_folder else None print("Sequence embeddings: {}".format(args.sequence_embeddings_location)) if args.use_wandb: wandb.login() # Create & initiate model alphabet = get_tranception_tokenizer() if args.aa_embeddings=="Tranception" else Alphabet.from_architecture("msa_transformer") if args.model_type=="ProteinNPT": model = ProteinNPTModel(args, alphabet) elif args.model_type in ["MSA_Transformer_pred", "ESM1v_pred", "Tranception_pred", "TranceptEVE_pred", "Linear_Embedding_pred", "DeepSequence_pred"]: model = AugmentedPropertyPredictor(args, alphabet) if args.frozen_embedding_parameters and args.aa_embeddings in ["MSA_Transformer", "ESM1v", "Tranception"]: for para in model.aa_embedding.parameters(): para.requires_grad = False # List of assays involved in training if num_targets==1: # Single property prediction assay_file_names={ target_names[0]: assay_file_name } if "zero_shot_fitness_predictions" in target_names_input: assay_file_names["zero_shot_fitness_predictions"] = assay_file_name else: # Multiple properties prediction assay_file_names={} for target in target_names_input: if target=="zero_shot_fitness_predictions": assay_file_names[target] = assay_file_name # The name of the zero-shot prediction file matches that of the main assay else: assay_file_names[target] = assay_reference_file[target][assay_reference_file["DMS_id"]==assay_id].values[0] # Load training, val and test data UniProt_id = assay_reference_file["UniProt_ID"][assay_reference_file["DMS_id"]==assay_id].values[0] MSA_filename = assay_reference_file["MSA_filename"][assay_reference_file["DMS_id"]==assay_id].values[0] MSA_weights_filename = assay_reference_file["weight_file_name"][assay_reference_file["DMS_id"]==assay_id].values[0] MSA_start_position = int(assay_reference_file["MSA_start"][assay_reference_file["DMS_id"]==assay_id].values[0]) MSA_end_position = int(assay_reference_file["MSA_end"][assay_reference_file["DMS_id"]==assay_id].values[0]) train_data, val_data, test_data, target_processing = get_train_val_test_data(args = args, assay_file_names = assay_file_names) MSA_sequences, MSA_weights = process_MSA(args, MSA_filename, MSA_weights_filename) if args.aa_embeddings=="MSA_Transformer" else (None, None) if args.use_wandb: combined_dict = {**vars(args), "parameter_count": sum(p.numel() for p in model.parameters()), "world_size": world_size, "assay_id": assay_id, "UniProt_id": UniProt_id} wandb.init(project=os.getenv("WANDB_PROJECT"), config=combined_dict, name=model_name, dir=args.wandb_location, save_code=True) # Define trainer
def setup_config_and_paths(args): # All parameters that are not defined by end user are fetched from the config file if args.model_config_location is not None: args.main_config=json.load(open(args.model_config_location)) for key in args.main_config: if args.__dict__[key] is None: args.__dict__[key] = args.main_config[key] # File paths config for local_path in ['embedding_model_location','MSA_data_folder','MSA_weight_data_folder','path_to_hhfilter']: if getattr(args, local_path): setattr(args, local_path, args.data_location + os.sep + getattr(args, local_path)) if not os.path.exists(args.data_location + os.sep + 'model_predictions'): os.mkdir(args.data_location + os.sep + 'model_predictions') if not os.path.exists(args.data_location + os.sep + 'checkpoint'): os.mkdir(args.data_location + os.sep + 'checkpoint') args.output_scores_location = args.data_location + os.sep + 'model_predictions' + os.sep + args.model_name_suffix if not os.path.exists(args.output_scores_location): os.mkdir(args.output_scores_location) args.model_location = args.data_location + os.sep + 'checkpoint' + os.sep + args.model_name_suffix if not os.path.exists(args.model_location): os.mkdir(args.model_location) # Target config args.target_config=json.load(open(args.target_config_location)) zero_shot_predictions_mapping={ "MSA_Transformer_pred": "MSA_Transformer_ensemble", "ESM1v_pred": "ESM1v_ensemble", "TranceptEVE_pred": "TranceptEVE_L", "Tranception_pred": "Tranception_L", "DeepSequence_pred": "DeepSequence_ensemble" } if args.model_type=="ProteinNPT": zero_shot_predictions_mapping["ProteinNPT"]=zero_shot_predictions_mapping[args.aa_embeddings+"_pred"] if args.augmentation=="zero_shot_fitness_predictions_auxiliary_labels": # Add auxiliary label to target_config assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as auxiliary labels not properly referenced" print("Using zero-shot fitness predictions as auxiliary labels") args.target_config["zero_shot_fitness_predictions"] = { "type": "continuous", "dim": 1, "var_name": zero_shot_predictions_mapping[args.model_type], #Select the relevant model for zero-shot fitness predictions "location": args.zero_shot_fitness_predictions_location, "in_NPT_loss": False, "main_target": False } args.augmentation_short="auxiliary" elif args.augmentation=="zero_shot_fitness_predictions_covariate": # Will use zero-shot fitness predictions as an additional model covariate assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as model covariate not properly referenced" print("Using zero-shot fitness predictions as covariate") args.augmentation_short="covariate" args.zero_shot_fitness_predictions_var_name = zero_shot_predictions_mapping[args.model_type] else: args.augmentation_short="none" for target_index,target in enumerate(args.target_config): if "location" not in args.target_config[target].keys(): # Note: the case of zero-shot fitness predictions is already handled above if present if args.assay_location is not None: # We passed at least one path for the assay location num_targets = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] if len(args.assay_location) > 1: assert len(args.assay_location)==num_targets, "Trying to predict {} targets, but only referencing {} distinct paths for them.".format(num_targets,len(args.assay_location)) args.target_config[target]["location"] = args.assay_location[target_index] print("Location used for target {} if {}".format(target,args.assay_location[target_index])) else: args.target_config[target]["location"] = args.assay_location[0] print("Location used for target {} if {}".format(target,args.assay_location[0])) else: print("Assay location not provided. Defaulting to location for single substitutions fitness assays: {}".format(args.data_location + os.sep + 'data/fitness/substitutions_singles')) args.target_config[target]["location"] = args.data_location + os.sep + 'data/fitness/substitutions_singles' return args def log_performance_fold(args,target_names,test_eval_results,trainer_final_status,perf_list,logs_folder=None): test_logs = {'total_training_steps': trainer_final_status['total_training_steps'], 'total_training_epochs': trainer_final_status['total_training_epochs'], 'total_train_time': trainer_final_status['total_train_time']} if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) if args.model_type=="ProteinNPT": normalization = 0 for target_name in target_names: normalization += test_eval_results['eval_num_masked_targets'][target_name] else: normalization = test_eval_results['eval_num_predicted_targets'] test_logs['Test total loss per seq.'] = test_eval_results['eval_total_loss'] / normalization spearmans = {target_name: pnpt_spearmanr(test_eval_results['output_scores']['predictions_'+target_name], test_eval_results['output_scores']['labels_'+target_name]) for target_name in target_names} num_obs_spearmans = {target_name: pnpt_count_non_nan(test_eval_results['output_scores']['labels_'+target_name]) for target_name in target_names} for target_name in target_names: print("Spearman {} target: {}".format(target_name,spearmans[target_name])) test_logs['Test Spearman '+target_name] = spearmans[target_name] if args.model_type=="ProteinNPT": normalization = test_eval_results['eval_num_masked_targets'][target_name] test_logs['Test loss '+str(target_name)+' per seq.'] = test_eval_results['eval_target_prediction_loss_dict'][target_name] / normalization with open(logs_folder+os.sep+"test_performance_by_fold_"+args.model_name_suffix+".csv", "a") as perf_tracker: if os.path.getsize(logs_folder+os.sep+"test_performance_by_fold_"+args.model_name_suffix+".csv") == 0: header="fold_index,model_type,model_name_suffix,targets,assay_id,UniProt_id,fold_variable_name,total_training_steps,total_training_epochs,aa_embeddings,target_prediction_model,target_prediction_head,augmentation,frozen_embedding_parameters,dropout,weight_decay,early_stopping_patience,use_validation_set,training_num_assay_sequences_per_batch_per_gpu,eval_num_sequences_to_score_per_batch_per_gpu,eval_num_training_sequences_per_batch_per_gpu,eval_training_sequences_sampling_method,num_MSA_sequences_per_training_instance,embed_dim,ffn_embed_dim,attention_heads,conv_kernel_size,num_protein_npt_layers,total_loss" for target_name in target_names: header += (",loss_" + target_name + ",Spearman_" + target_name + ",num_obs_Spearman_" + target_name) perf_tracker.write(header+"\n") perf = ",".join([str(x) for x in perf_list]) + "," + str(round(test_logs['Test total loss per seq.'],5)) for target_name in target_names: perf += ("," + str(round(test_logs['Test loss '+str(target_name)+' per seq.'],5)) +","+str(spearmans[target_name])+","+str(num_obs_spearmans[target_name])) perf_tracker.write(perf+"\n") return test_logs, spearmans def log_performance_all_folds(args,target_names,all_test_predictions_across_folds,spearmans_across_folds,perf_list,logs_folder=None): if not os.path.exists(args.output_scores_location + os.sep + 'all_aggregated_predictions'): os.mkdir(args.output_scores_location + os.sep + 'all_aggregated_predictions') all_test_predictions_across_folds = pd.DataFrame.from_dict(all_test_predictions_across_folds) all_test_predictions_across_folds.to_csv(args.output_scores_location + os.sep + 'all_aggregated_predictions' + os.sep + model_name_prefix + ".csv", index=False) if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) with open(logs_folder+os.sep+"test_performance_overall_"+perf_list[2]+".csv", "a") as overall_perf: if os.path.getsize(logs_folder+os.sep+"test_performance_overall_"+perf_list[2]+".csv") == 0: header = "model_type,model_name_suffix,targets,assay_id,UniProt_id,fold_variable_name,total_training_steps,total_training_epochs,aa_embeddings,target_prediction_model,target_prediction_head,augmentation,frozen_embedding_parameters,dropout,weight_decay,early_stopping_patience,use_validation_set,training_num_assay_sequences_per_batch_per_gpu,eval_num_sequences_to_score_per_batch_per_gpu,eval_num_training_sequences_per_batch_per_gpu,eval_training_sequences_sampling_method,num_MSA_sequences_per_training_instance,embed_dim,ffn_embed_dim,attention_heads,conv_kernel_size,num_protein_npt_layers,total_loss" for target_name in target_names: header += (",loss_" + target_name + ",Spearman_" + target_name + ",Std_dev_Spearman_" + target_name + ",num_obs_Spearman_" + target_name + ",standardized_loss_" + target_name + ",standardized_Spearman_" + target_name) overall_perf.write(header+"\n") perf = ",".join([str(x) for x in perf_list[1:]]) #Remove fold_index from perf_list for target_name in target_names: missing_mask = np.isnan(all_test_predictions_across_folds['labels_'+target_name]) | np.equal(all_test_predictions_across_folds['labels_'+target_name],-100) MSE = ((all_test_predictions_across_folds['predictions_'+target_name][~missing_mask] - all_test_predictions_across_folds['labels_'+target_name][~missing_mask])**2).mean() spearman = pnpt_spearmanr(all_test_predictions_across_folds['predictions_'+target_name], all_test_predictions_across_folds['labels_'+target_name]) num_obs_spearman = pnpt_count_non_nan(all_test_predictions_across_folds['labels_'+target_name]) MSE_standardized = ((all_test_predictions_across_folds['fold_standardized_predictions_'+target_name][~missing_mask] - all_test_predictions_across_folds['labels_'+target_name][~missing_mask])**2).mean() spearman_standardized = pnpt_spearmanr(all_test_predictions_across_folds['fold_standardized_predictions_'+target_name], all_test_predictions_across_folds['labels_'+target_name]) spearman_std_dev = np.array(spearmans_across_folds[target_name]).std() perf += ("," + str(MSE) +","+str(spearman) + ","+ str(spearman_std_dev) + "," + str(num_obs_spearman) + "," + str(MSE_standardized) +","+str(spearman_standardized)) overall_perf.write(perf+"\n") def main(args): # Set random seeds torch.manual_seed(args.seed) np.random.seed(args.seed) random.seed(args.seed) # target_names are the true targets we want to predict. target_names_input also includes auxiliary labels (as used in ProteinNPT) target_names = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] target_names_input = args.target_config.keys() num_targets = len(target_names) num_targets_input = len(target_names_input) print("We want to predict {} target(s): {}".format(num_targets, ' and '.join(target_names))) if num_targets_input > num_targets: print("We leverage {} target(s) and auxiliary labels: {}".format(num_targets_input, ' and '.join(target_names_input))) assay_reference_file = pd.read_csv(args.assay_reference_file_location) assay_id=assay_reference_file["DMS_id"][args.assay_index] args.seq_len = int(assay_reference_file["seq_len"][assay_reference_file["DMS_id"]==assay_id].values[0]) args.MSA_seq_len = int(assay_reference_file["MSA_len"][assay_reference_file["DMS_id"]==assay_id].values[0]) print("Training model for assay: {}, where the test_fold index is: {}".format(assay_id, args.test_fold_index)) args.save_model_checkpoint = not args.do_not_save_model_checkpoint args.frozen_embedding_parameters = not args.fine_tune_model_embedding_parameters if args.model_type=="MSA_Transformer_pred": assert args.num_MSA_sequences_per_training_instance==args.num_MSA_sequences_per_eval_instance, "MSA_Transformer_pred only supports same size of MSA for train and eval" effective_batch_size = args.gradient_accumulation * args.training_num_assay_sequences_per_batch_per_gpu print("Effective batch size is {}".format(effective_batch_size)) model_hypers = [args.aa_embeddings,args.target_prediction_model,args.target_prediction_head,args.augmentation,args.frozen_embedding_parameters,args.dropout,args.weight_decay, \ args.early_stopping_patience, args.use_validation_set, args.training_num_assay_sequences_per_batch_per_gpu, args.eval_num_sequences_to_score_per_batch_per_gpu, args.eval_num_training_sequences_per_batch_per_gpu, \ args.eval_training_sequences_sampling_method, args.num_MSA_sequences_per_training_instance, args.embed_dim, args.ffn_embed_dim, args.attention_heads, args.conv_kernel_size, args.num_protein_npt_layers] model_hypers_str = ','.join([str(x) for x in model_hypers]) model_name_prefix = '_'.join([str(x) for x in [args.model_type,assay_id,"_".join(target_names_input),args.fold_variable_name,'embed_'+args.aa_embeddings,'head_'+str(args.target_prediction_model),'aug_'+str(args.augmentation_short), \ 'froz_'+str(args.frozen_embedding_parameters),'drop_'+str(args.dropout),'val_'+str(args.use_validation_set),args.model_name_suffix]]) model_name = model_name_prefix + "_fold-" + str(args.test_fold_index) if not os.path.exists(args.model_location+os.sep+model_name): os.mkdir(args.model_location+os.sep+model_name) with open(args.model_location+os.sep+model_name+os.sep+'training_arguments', 'w') as f: json.dump(args.__dict__, f, indent=2) print("Model name: "+model_name) assay_file_name = assay_reference_file["DMS_filename"][assay_reference_file["DMS_id"]==assay_id].values[0] # File name of main assay used during training (if single property, this is also the only assay). Retrieved embeddings are always for this assay. args.sequence_embeddings_location = args.sequence_embeddings_folder + os.sep + assay_file_name.split(".csv")[0] + '.h5' if args.sequence_embeddings_folder else None print("Sequence embeddings: {}".format(args.sequence_embeddings_location)) if args.use_wandb: wandb.login() # Create & initiate model alphabet = get_tranception_tokenizer() if args.aa_embeddings=="Tranception" else Alphabet.from_architecture("msa_transformer") if args.model_type=="ProteinNPT": model = ProteinNPTModel(args, alphabet) elif args.model_type in ["MSA_Transformer_pred", "ESM1v_pred", "Tranception_pred", "TranceptEVE_pred", "Linear_Embedding_pred", "DeepSequence_pred"]: model = AugmentedPropertyPredictor(args, alphabet) if args.frozen_embedding_parameters and args.aa_embeddings in ["MSA_Transformer", "ESM1v", "Tranception"]: for para in model.aa_embedding.parameters(): para.requires_grad = False # List of assays involved in training if num_targets==1: # Single property prediction assay_file_names={ target_names[0]: assay_file_name } if "zero_shot_fitness_predictions" in target_names_input: assay_file_names["zero_shot_fitness_predictions"] = assay_file_name else: # Multiple properties prediction assay_file_names={} for target in target_names_input: if target=="zero_shot_fitness_predictions": assay_file_names[target] = assay_file_name # The name of the zero-shot prediction file matches that of the main assay else: assay_file_names[target] = assay_reference_file[target][assay_reference_file["DMS_id"]==assay_id].values[0] # Load training, val and test data UniProt_id = assay_reference_file["UniProt_ID"][assay_reference_file["DMS_id"]==assay_id].values[0] MSA_filename = assay_reference_file["MSA_filename"][assay_reference_file["DMS_id"]==assay_id].values[0] MSA_weights_filename = assay_reference_file["weight_file_name"][assay_reference_file["DMS_id"]==assay_id].values[0] MSA_start_position = int(assay_reference_file["MSA_start"][assay_reference_file["DMS_id"]==assay_id].values[0]) MSA_end_position = int(assay_reference_file["MSA_end"][assay_reference_file["DMS_id"]==assay_id].values[0]) train_data, val_data, test_data, target_processing = get_train_val_test_data(args = args, assay_file_names = assay_file_names) MSA_sequences, MSA_weights = process_MSA(args, MSA_filename, MSA_weights_filename) if args.aa_embeddings=="MSA_Transformer" else (None, None) if args.use_wandb: combined_dict = {**vars(args), "parameter_count": sum(p.numel() for p in model.parameters()), "world_size": world_size, "assay_id": assay_id, "UniProt_id": UniProt_id} wandb.init(project=os.getenv("WANDB_PROJECT"), config=combined_dict, name=model_name, dir=args.wandb_location, save_code=True) # Define trainer
trainer = Trainer(
9
2023-10-28 11:41:05+00:00
24k
CVHub520/yolov5_obb
detect.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=None, dnn=False):\n # Usage:\n # PyTorch: weights = *.pt\n # TorchScript: *.torchscript\n # CoreML: *.mlmodel\n # TensorFlow: *_saved_model\n # TensorFlow: *.pb\n # TensorFlow Lite: *.tflite\n # ONNX Runtime: *.onnx\n # OpenCV DNN: *.onnx with dnn=True\n # TensorRT: *.engine\n from models.experimental import attempt_download, attempt_load # scoped to avoid circular import\n\n super().__init__()\n w = str(weights[0] if isinstance(weights, list) else weights)\n suffix = Path(w).suffix.lower()\n suffixes = ['.pt', '.torchscript', '.onnx', '.engine', '.tflite', '.pb', '', '.mlmodel']\n check_suffix(w, suffixes) # check weights have acceptable suffix\n pt, jit, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans\n stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults\n w = attempt_download(w) # download if not local\n\n if jit: # TorchScript\n LOGGER.info(f'Loading {w} for TorchScript inference...')\n extra_files = {'config.txt': ''} # model metadata\n model = torch.jit.load(w, _extra_files=extra_files)\n if extra_files['config.txt']:\n d = json.loads(extra_files['config.txt']) # extra_files dict\n stride, names = int(d['stride']), d['names']\n elif pt: # PyTorch\n model = attempt_load(weights if isinstance(weights, list) else w, map_location=device)\n stride = int(model.stride.max()) # model stride\n names = model.module.names if hasattr(model, 'module') else model.names # get class names\n self.model = model # explicitly assign for to(), cpu(), cuda(), half()\n elif coreml: # CoreML\n LOGGER.info(f'Loading {w} for CoreML inference...')\n import coremltools as ct\n model = ct.models.MLModel(w)\n elif dnn: # ONNX OpenCV DNN\n LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')\n check_requirements(('opencv-python>=4.5.4',))\n net = cv2.dnn.readNetFromONNX(w)\n elif onnx: # ONNX Runtime\n LOGGER.info(f'Loading {w} for ONNX Runtime inference...')\n cuda = torch.cuda.is_available()\n check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))\n import onnxruntime\n providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']\n session = onnxruntime.InferenceSession(w, providers=providers)\n elif engine: # TensorRT\n LOGGER.info(f'Loading {w} for TensorRT inference...')\n import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download\n check_version(trt.__version__, '8.0.0', verbose=True) # version requirement\n Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))\n logger = trt.Logger(trt.Logger.INFO)\n with open(w, 'rb') as f, trt.Runtime(logger) as runtime:\n model = runtime.deserialize_cuda_engine(f.read())\n bindings = OrderedDict()\n for index in range(model.num_bindings):\n name = model.get_binding_name(index)\n dtype = trt.nptype(model.get_binding_dtype(index))\n shape = tuple(model.get_binding_shape(index))\n data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device)\n bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr()))\n binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())\n context = model.create_execution_context()\n batch_size = bindings['images'].shape[0]\n else: # TensorFlow model (TFLite, pb, saved_model)\n if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt\n LOGGER.info(f'Loading {w} for TensorFlow *.pb inference...')\n import tensorflow as tf\n\n def wrap_frozen_graph(gd, inputs, outputs):\n x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=\"\"), []) # wrapped\n return x.prune(tf.nest.map_structure(x.graph.as_graph_element, inputs),\n tf.nest.map_structure(x.graph.as_graph_element, outputs))\n\n graph_def = tf.Graph().as_graph_def()\n graph_def.ParseFromString(open(w, 'rb').read())\n frozen_func = wrap_frozen_graph(gd=graph_def, inputs=\"x:0\", outputs=\"Identity:0\")\n elif saved_model:\n LOGGER.info(f'Loading {w} for TensorFlow saved_model inference...')\n import tensorflow as tf\n model = tf.keras.models.load_model(w)\n elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python\n if 'edgetpu' in w.lower():\n LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')\n import tflite_runtime.interpreter as tfli\n delegate = {'Linux': 'libedgetpu.so.1', # install https://coral.ai/software/#edgetpu-runtime\n 'Darwin': 'libedgetpu.1.dylib',\n 'Windows': 'edgetpu.dll'}[platform.system()]\n interpreter = tfli.Interpreter(model_path=w, experimental_delegates=[tfli.load_delegate(delegate)])\n else:\n LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')\n import tensorflow as tf\n interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model\n interpreter.allocate_tensors() # allocate\n input_details = interpreter.get_input_details() # inputs\n output_details = interpreter.get_output_details() # outputs\n self.__dict__.update(locals()) # assign all variables to self\n\n def forward(self, im, augment=False, visualize=False, val=False):\n # YOLOv5 MultiBackend inference\n b, ch, h, w = im.shape # batch, channel, height, width\n if self.pt or self.jit: # PyTorch\n y = self.model(im) if self.jit else self.model(im, augment=augment, visualize=visualize)\n return y if val else y[0]\n elif self.coreml: # CoreML\n im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3)\n im = Image.fromarray((im[0] * 255).astype('uint8'))\n # im = im.resize((192, 320), Image.ANTIALIAS)\n y = self.model.predict({'image': im}) # coordinates are xywh normalized\n box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels\n conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)\n y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)\n elif self.onnx: # ONNX\n im = im.cpu().numpy() # torch to numpy\n if self.dnn: # ONNX OpenCV DNN\n self.net.setInput(im)\n y = self.net.forward()\n else: # ONNX Runtime\n y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0]\n elif self.engine: # TensorRT\n assert im.shape == self.bindings['images'].shape, (im.shape, self.bindings['images'].shape)\n self.binding_addrs['images'] = int(im.data_ptr())\n self.context.execute_v2(list(self.binding_addrs.values()))\n y = self.bindings['output'].data\n else: # TensorFlow model (TFLite, pb, saved_model)\n im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3)\n if self.pb:\n y = self.frozen_func(x=self.tf.constant(im)).numpy()\n elif self.saved_model:\n y = self.model(im, training=False).numpy()\n elif self.tflite:\n input, output = self.input_details[0], self.output_details[0]\n int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model\n if int8:\n scale, zero_point = input['quantization']\n im = (im / scale + zero_point).astype(np.uint8) # de-scale\n self.interpreter.set_tensor(input['index'], im)\n self.interpreter.invoke()\n y = self.interpreter.get_tensor(output['index'])\n if int8:\n scale, zero_point = output['quantization']\n y = (y.astype(np.float32) - zero_point) * scale # re-scale\n y[..., 0] *= w # x\n y[..., 1] *= h # y\n y[..., 2] *= w # w\n y[..., 3] *= h # h\n y = torch.tensor(y) if isinstance(y, np.ndarray) else y\n return (y, []) if val else y\n\n def warmup(self, imgsz=(1, 3, 640, 640), half=False):\n # Warmup model by running inference once\n if self.pt or self.engine or self.onnx: # warmup types\n if isinstance(self.device, torch.device) and self.device.type != 'cpu': # only warmup GPU models\n im = torch.zeros(*imgsz).to(self.device).type(torch.half if half else torch.float) # input image\n self.forward(im) # warmup" }, { "identifier": "IMG_FORMATS", "path": "utils/datasets.py", "snippet": "IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes" }, { "identifier": "VID_FORMATS", "path": "utils/datasets.py", "snippet": "VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes" }, { "identifier": "LoadImages", "path": "utils/datasets.py", "snippet": "class LoadImages:\n # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`\n def __init__(self, path, img_size=640, stride=32, auto=True):\n p = str(Path(path).resolve()) # os-agnostic absolute path\n if '*' in p:\n files = sorted(glob.glob(p, recursive=True)) # glob\n elif os.path.isdir(p):\n files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir\n elif os.path.isfile(p):\n files = [p] # files\n else:\n raise Exception(f'ERROR: {p} does not exist')\n\n images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]\n videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]\n ni, nv = len(images), len(videos)\n\n self.img_size = img_size\n self.stride = stride\n self.files = images + videos\n self.nf = ni + nv # number of files\n self.video_flag = [False] * ni + [True] * nv\n self.mode = 'image'\n self.auto = auto\n if any(videos):\n self.new_video(videos[0]) # new video\n else:\n self.cap = None\n assert self.nf > 0, f'No images or videos found in {p}. ' \\\n f'Supported formats are:\\nimages: {IMG_FORMATS}\\nvideos: {VID_FORMATS}'\n\n def __iter__(self):\n self.count = 0\n return self\n\n def __next__(self):\n if self.count == self.nf:\n raise StopIteration\n path = self.files[self.count]\n\n if self.video_flag[self.count]:\n # Read video\n self.mode = 'video'\n ret_val, img0 = self.cap.read()\n while not ret_val:\n self.count += 1\n self.cap.release()\n if self.count == self.nf: # last video\n raise StopIteration\n else:\n path = self.files[self.count]\n self.new_video(path)\n ret_val, img0 = self.cap.read()\n\n self.frame += 1\n s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '\n\n else:\n # Read image\n self.count += 1\n img0 = cv2.imread(path) # BGR\n assert img0 is not None, f'Image Not Found {path}'\n s = f'image {self.count}/{self.nf} {path}: '\n\n # Padded resize\n img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]\n\n # Convert\n img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n img = np.ascontiguousarray(img)\n\n return path, img, img0, self.cap, s\n\n def new_video(self, path):\n self.frame = 0\n self.cap = cv2.VideoCapture(path)\n self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n def __len__(self):\n return self.nf # number of files" }, { "identifier": "LoadStreams", "path": "utils/datasets.py", "snippet": "class LoadStreams:\n # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`\n def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):\n self.mode = 'stream'\n self.img_size = img_size\n self.stride = stride\n\n if os.path.isfile(sources):\n with open(sources) as f:\n sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]\n else:\n sources = [sources]\n\n n = len(sources)\n self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n\n self.sources = [clean_str(x) for x in sources] # clean source names for later\n self.auto = auto\n for i, s in enumerate(sources): # index, source\n # Start thread to read frames from video stream\n st = f'{i + 1}/{n}: {s}... '\n if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video\n check_requirements(('pafy', 'youtube_dl'))\n import pafy\n s = pafy.new(s).getbest(preftype=\"mp4\").url # YouTube URL\n s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam\n cap = cv2.VideoCapture(s)\n assert cap.isOpened(), f'{st}Failed to open {s}'\n w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback\n self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback\n\n _, self.imgs[i] = cap.read() # guarantee first frame\n self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)\n LOGGER.info(f\"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)\")\n self.threads[i].start()\n LOGGER.info('') # newline\n\n # check for common shapes\n s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])\n self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal\n if not self.rect:\n LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.')\n\n def update(self, i, cap, stream):\n # Read stream `i` frames in daemon thread\n n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame\n while cap.isOpened() and n < f:\n n += 1\n # _, self.imgs[index] = cap.read()\n cap.grab()\n if n % read == 0:\n success, im = cap.retrieve()\n if success:\n self.imgs[i] = im\n else:\n LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.')\n self.imgs[i] = np.zeros_like(self.imgs[i])\n cap.open(stream) # re-open stream if signal was lost\n time.sleep(1 / self.fps[i]) # wait time\n\n def __iter__(self):\n self.count = -1\n return self\n\n def __next__(self):\n self.count += 1\n if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit\n cv2.destroyAllWindows()\n raise StopIteration\n\n # Letterbox\n img0 = self.imgs.copy()\n img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0]\n\n # Stack\n img = np.stack(img, 0)\n\n # Convert\n img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW\n img = np.ascontiguousarray(img)\n\n return self.sources, img, img0, None, ''\n\n def __len__(self):\n return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years" }, { "identifier": "LOGGER", "path": "utils/general.py", "snippet": "LOGGER = set_logging(__name__) # define globally (used in train.py, val.py, detect.py, etc.)" }, { "identifier": "check_file", "path": "utils/general.py", "snippet": "def check_file(file, suffix=''):\n # Search/download file (if necessary) and return path\n check_suffix(file, suffix) # optional\n file = str(file) # convert to str()\n if Path(file).is_file() or file == '': # exists\n return file\n elif file.startswith(('http:/', 'https:/')): # download\n url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/\n file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth\n if Path(file).is_file():\n print(f'Found {url} locally at {file}') # file already exists\n else:\n print(f'Downloading {url} to {file}...')\n torch.hub.download_url_to_file(url, file)\n assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check\n return file\n else: # search\n files = []\n for d in 'data', 'models', 'utils': # search directories\n files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file\n assert len(files), f'File not found: {file}' # assert file was found\n assert len(files) == 1, f\"Multiple files match '{file}', specify exact path: {files}\" # assert unique\n return files[0] # return file" }, { "identifier": "check_img_size", "path": "utils/general.py", "snippet": "def check_img_size(imgsz, s=32, floor=0):\n print(f\"#305 in utils/general.py - s={s}\")\n # Verify image size is a multiple of stride s in each dimension\n if isinstance(imgsz, int): # integer i.e. img_size=640\n new_size = max(make_divisible(imgsz, int(s)), floor)\n else: # list i.e. img_size=[640, 480]\n new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]\n if new_size != imgsz:\n print(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')\n return new_size" }, { "identifier": "check_imshow", "path": "utils/general.py", "snippet": "def check_imshow():\n # Check if environment supports image displays\n try:\n assert not is_docker(), 'cv2.imshow() is disabled in Docker environments'\n assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments'\n cv2.imshow('test', np.zeros((1, 1, 3)))\n cv2.waitKey(1)\n cv2.destroyAllWindows()\n cv2.waitKey(1)\n return True\n except Exception as e:\n print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\\n{e}')\n return False" }, { "identifier": "check_requirements", "path": "utils/general.py", "snippet": "@try_except\ndef check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True):\n # Check installed dependencies meet requirements (pass *.txt file or list of packages)\n prefix = colorstr('red', 'bold', 'requirements:')\n check_python() # check python version\n if isinstance(requirements, (str, Path)): # requirements.txt file\n file = Path(requirements)\n assert file.exists(), f\"{prefix} {file.resolve()} not found, check failed.\"\n with file.open() as f:\n requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude]\n else: # list or tuple of packages\n requirements = [x for x in requirements if x not in exclude]\n\n n = 0 # number of packages updates\n for r in requirements:\n try:\n pkg.require(r)\n except Exception as e: # DistributionNotFound or VersionConflict if requirements not met\n s = f\"{prefix} {r} not found and is required by YOLOv5\"\n if install:\n print(f\"{s}, attempting auto-update...\")\n try:\n assert check_online(), f\"'pip install {r}' skipped (offline)\"\n print(check_output(f\"pip install '{r}'\", shell=True).decode())\n n += 1\n except Exception as e:\n print(f'{prefix} {e}')\n else:\n print(f'{s}. Please install and rerun your command.')\n\n if n: # if packages updated\n source = file.resolve() if 'file' in locals() else requirements\n s = f\"{prefix} {n} package{'s' * (n > 1)} updated per {source}\\n\" \\\n f\"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\\n\"\n print(emojis(s))" }, { "identifier": "colorstr", "path": "utils/general.py", "snippet": "def colorstr(*input):\n # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')\n *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string\n colors = {'black': '\\033[30m', # basic colors\n 'red': '\\033[31m',\n 'green': '\\033[32m',\n 'yellow': '\\033[33m',\n 'blue': '\\033[34m',\n 'magenta': '\\033[35m',\n 'cyan': '\\033[36m',\n 'white': '\\033[37m',\n 'bright_black': '\\033[90m', # bright colors\n 'bright_red': '\\033[91m',\n 'bright_green': '\\033[92m',\n 'bright_yellow': '\\033[93m',\n 'bright_blue': '\\033[94m',\n 'bright_magenta': '\\033[95m',\n 'bright_cyan': '\\033[96m',\n 'bright_white': '\\033[97m',\n 'end': '\\033[0m', # misc\n 'bold': '\\033[1m',\n 'underline': '\\033[4m'}\n return ''.join(colors[x] for x in args) + f'{string}' + colors['end']" }, { "identifier": "increment_path", "path": "utils/general.py", "snippet": "def increment_path(path, exist_ok=False, sep='', mkdir=False):\n # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.\n path = Path(path) # os-agnostic\n if path.exists() and not exist_ok:\n path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '')\n dirs = glob.glob(f\"{path}{sep}*\") # similar paths\n matches = [re.search(rf\"%s{sep}(\\d+)\" % path.stem, d) for d in dirs]\n i = [int(m.groups()[0]) for m in matches if m] # indices\n n = max(i) + 1 if i else 2 # increment number\n path = Path(f\"{path}{sep}{n}{suffix}\") # increment path\n if mkdir:\n path.mkdir(parents=True, exist_ok=True) # make directory\n return path" }, { "identifier": "non_max_suppression", "path": "utils/general.py", "snippet": "def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=(), max_det=300):\n \"\"\"Runs Non-Maximum Suppression (NMS) on inference results\n\n Returns:\n list of detections, on (n,6) tensor per image [xyxy, conf, cls]\n \"\"\"\n\n nc = prediction.shape[2] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Checks\n assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'\n assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 10.0 # seconds to quit after\n redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n merge = False # use merge-NMS\n\n t = time.time()\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n weights = iou * scores[None] # box weights\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n if redundant:\n i = i[iou.sum(1) > 1] # require redundancy\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n print(f'WARNING: NMS time limit {time_limit}s exceeded')\n break # time limit exceeded\n\n return output" }, { "identifier": "non_max_suppression_obb", "path": "utils/general.py", "snippet": "def non_max_suppression_obb(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=(), max_det=1500):\n \"\"\"Runs Non-Maximum Suppression (NMS) on inference results_obb\n Args:\n prediction (tensor): (b, n_all_anchors, [cx cy l s obj num_cls theta_cls])\n agnostic (bool): True = NMS will be applied between elements of different categories\n labels : () or\n\n Returns:\n list of detections, len=batch_size, on (n,7) tensor per image [xylsθ, conf, cls] θ ∈ [-pi/2, pi/2)\n \"\"\"\n\n nc = prediction.shape[2] - 5 - 180 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n class_index = nc + 5\n\n # Checks\n assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'\n assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'\n\n # Settings\n max_wh = 4096 # min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 30.0 # seconds to quit after\n # redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n\n t = time.time()\n output = [torch.zeros((0, 7), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence, (tensor): (n_conf_thres, [cx cy l s obj num_cls theta_cls])\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:class_index] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n thete_index, theta_pred = torch.max(x[:, class_index:], 1, keepdim=True) # [n_conf_thres, 1] θ ∈ int[0, 179]\n theta_pred = (theta_pred - 90) / 180 * pi # [n_conf_thres, 1] θ ∈ [-pi/2, pi/2)\n\n # Detections matrix nx7 (xyls, θ, conf, cls) θ ∈ [-pi/2, pi/2)\n if multi_label:\n i, j = (x[:, 5:class_index] > conf_thres).nonzero(as_tuple=False).T # ()\n x = torch.cat((x[i, :4], theta_pred[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:class_index].max(1, keepdim=True)\n x = torch.cat((x[:, :4], theta_pred, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 6:7] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 5].argsort(descending=True)[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 6:7] * (0 if agnostic else max_wh) # classes\n rboxes = x[:, :5].clone() \n rboxes[:, :2] = rboxes[:, :2] + c # rboxes (offset by class)\n scores = x[:, 5] # scores\n _, i = obb_nms(rboxes, scores, iou_thres)\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n print(f'WARNING: NMS time limit {time_limit}s exceeded')\n break # time limit exceeded\n\n return output" }, { "identifier": "print_args", "path": "utils/general.py", "snippet": "def print_args(name, opt):\n # Print argparser arguments\n LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))" }, { "identifier": "scale_coords", "path": "utils/general.py", "snippet": "def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):\n # Rescale coords (xyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0]\n pad = ratio_pad[1]\n\n coords[:, [0, 2]] -= pad[0] # x padding\n coords[:, [1, 3]] -= pad[1] # y padding\n coords[:, :4] /= gain\n clip_coords(coords, img0_shape)\n return coords" }, { "identifier": "scale_polys", "path": "utils/general.py", "snippet": "def scale_polys(img1_shape, polys, img0_shape, ratio_pad=None):\n # ratio_pad: [(h_raw, w_raw), (hw_ratios, wh_paddings)]\n # Rescale coords (xyxyxyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = resized / raw\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0] # h_ratios\n pad = ratio_pad[1] # wh_paddings\n\n polys[:, [0, 2, 4, 6]] -= pad[0] # x padding\n polys[:, [1, 3, 5, 7]] -= pad[1] # y padding\n polys[:, :8] /= gain # Rescale poly shape to img0_shape\n #clip_polys(polys, img0_shape)\n return polys" }, { "identifier": "strip_optimizer", "path": "utils/general.py", "snippet": "def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()\n # Strip optimizer from 'f' to finalize training, optionally save as 's'\n x = torch.load(f, map_location=torch.device('cpu'))\n if x.get('ema'):\n x['model'] = x['ema'] # replace model with ema\n for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates': # keys\n x[k] = None\n x['epoch'] = -1\n x['model'].half() # to FP16\n for p in x['model'].parameters():\n p.requires_grad = False\n torch.save(x, s or f)\n mb = os.path.getsize(s or f) / 1E6 # filesize\n print(f\"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB\")" }, { "identifier": "xyxy2xywh", "path": "utils/general.py", "snippet": "def xyxy2xywh(x):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center\n y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center\n y[:, 2] = x[:, 2] - x[:, 0] # width\n y[:, 3] = x[:, 3] - x[:, 1] # height\n return y" }, { "identifier": "Annotator", "path": "utils/plots.py", "snippet": "CONFIG_DIR = user_config_dir() # Ultralytics settings dir\nRANK = int(os.getenv('RANK', -1))\nclass Colors:\nclass Annotator:\n def __init__(self):\n def __call__(self, i, bgr=False):\n def hex2rgb(h): # rgb order (PIL)\ndef check_font(font='Arial.ttf', size=10):\n def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):\n def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):\n def poly_label(self, poly, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):\n def rectangle(self, xy, fill=None, outline=None, width=1):\n def text(self, xy, text, txt_color=(255, 255, 255)):\n def result(self):\ndef feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):\ndef hist2d(x, y, n=100):\ndef butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):\n def butter_lowpass(cutoff, fs, order):\ndef output_to_target(output): #list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2)\ndef plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=2048, max_subplots=4):\ndef plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):\ndef plot_val_txt(): # from utils.plots import *; plot_val()\ndef plot_targets_txt(): # from utils.plots import *; plot_targets_txt()\ndef plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()\ndef plot_labels(labels, names=(), save_dir=Path(''), img_size=1024):\ndef plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve()\ndef plot_results(file='path/to/results.csv', dir=''):\ndef profile_idetection(start=0, stop=0, labels=(), save_dir=''):\ndef save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True):" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=0, newline=True):\n # device = 'cpu' or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string\n device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n if cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable\n assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability\n\n cuda = not cpu and torch.cuda.is_available()\n if cuda:\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size > 0: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2:.0f}MiB)\\n\" # bytes to MB\n else:\n s += 'CPU\\n'\n\n if not newline:\n s = s.rstrip()\n LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe\n return torch.device('cuda:0' if cuda else 'cpu')" }, { "identifier": "time_sync", "path": "utils/torch_utils.py", "snippet": "def time_sync():\n # pytorch-accurate time\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n return time.time()" }, { "identifier": "poly2rbox", "path": "utils/rboxs_utils.py", "snippet": "def poly2rbox(polys, num_cls_thata=180, radius=6.0, use_pi=False, use_gaussian=False):\n \"\"\"\n Trans poly format to rbox format.\n Args:\n polys (array): (num_gts, [x1 y1 x2 y2 x3 y3 x4 y4]) \n num_cls_thata (int): [1], theta class num\n radius (float32): [1], window radius for Circular Smooth Label\n use_pi (bool): True θ∈[-pi/2, pi/2) , False θ∈[0, 180)\n\n Returns:\n use_gaussian True:\n rboxes (array): \n csl_labels (array): (num_gts, num_cls_thata)\n elif \n rboxes (array): (num_gts, [cx cy l s θ]) \n \"\"\"\n assert polys.shape[-1] == 8\n if use_gaussian:\n csl_labels = []\n rboxes = []\n for poly in polys:\n poly = np.float32(poly.reshape(4, 2))\n (x, y), (w, h), angle = cv2.minAreaRect(poly) # θ ∈ [0, 90]\n angle = -angle # θ ∈ [-90, 0]\n theta = angle / 180 * pi # 转为pi制\n\n # trans opencv format to longedge format θ ∈ [-pi/2, pi/2]\n if w != max(w, h): \n w, h = h, w\n theta += pi/2\n theta = regular_theta(theta) # limit theta ∈ [-pi/2, pi/2)\n angle = (theta * 180 / pi) + 90 # θ ∈ [0, 180)\n\n if not use_pi: # 采用angle弧度制 θ ∈ [0, 180)\n rboxes.append([x, y, w, h, angle])\n else: # 采用pi制\n rboxes.append([x, y, w, h, theta])\n if use_gaussian:\n csl_label = gaussian_label_cpu(label=angle, num_class=num_cls_thata, u=0, sig=radius)\n csl_labels.append(csl_label)\n if use_gaussian:\n return np.array(rboxes), np.array(csl_labels)\n return np.array(rboxes)" }, { "identifier": "rbox2poly", "path": "utils/rboxs_utils.py", "snippet": "def rbox2poly(obboxes):\n \"\"\"\n Trans rbox format to poly format.\n Args:\n rboxes (array/tensor): (num_gts, [cx cy l s θ]) θ∈[-pi/2, pi/2)\n\n Returns:\n polys (array/tensor): (num_gts, [x1 y1 x2 y2 x3 y3 x4 y4]) \n \"\"\"\n if isinstance(obboxes, torch.Tensor):\n center, w, h, theta = obboxes[:, :2], obboxes[:, 2:3], obboxes[:, 3:4], obboxes[:, 4:5]\n Cos, Sin = torch.cos(theta), torch.sin(theta)\n\n vector1 = torch.cat(\n (w/2 * Cos, -w/2 * Sin), dim=-1)\n vector2 = torch.cat(\n (-h/2 * Sin, -h/2 * Cos), dim=-1)\n point1 = center + vector1 + vector2\n point2 = center + vector1 - vector2\n point3 = center - vector1 - vector2\n point4 = center - vector1 + vector2\n order = obboxes.shape[:-1]\n return torch.cat(\n (point1, point2, point3, point4), dim=-1).reshape(*order, 8)\n else:\n center, w, h, theta = np.split(obboxes, (2, 3, 4), axis=-1)\n Cos, Sin = np.cos(theta), np.sin(theta)\n\n vector1 = np.concatenate(\n [w/2 * Cos, -w/2 * Sin], axis=-1)\n vector2 = np.concatenate(\n [-h/2 * Sin, -h/2 * Cos], axis=-1)\n\n point1 = center + vector1 + vector2\n point2 = center + vector1 - vector2\n point3 = center - vector1 - vector2\n point4 = center - vector1 + vector2\n order = obboxes.shape[:-1]\n return np.concatenate(\n [point1, point2, point3, point4], axis=-1).reshape(*order, 8)" } ]
import argparse import os import sys import cv2 import torch import torch.backends.cudnn as cudnn from pathlib import Path from models.common import DetectMultiBackend from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr, increment_path, non_max_suppression, non_max_suppression_obb, print_args, scale_coords, scale_polys, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import select_device, time_sync from utils.rboxs_utils import poly2rbox, rbox2poly
15,619
# pred: list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2) pred = non_max_suppression_obb(pred, conf_thres, iou_thres, classes, agnostic_nms, multi_label=True, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image pred_poly = rbox2poly(det[:, :5]) # (n, [x1 y1 x2 y2 x3 y3 x4 y4]) seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale polys from img_size to im0 size # det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() pred_poly = scale_polys(im.shape[2:], pred_poly, im0.shape) det = torch.cat((pred_poly, det[:, -2:]), dim=1) # (n, [poly conf cls]) # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *poly, conf, cls in reversed(det): if save_txt: # Write to file # xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh poly = poly.tolist() line = (cls, *poly, conf) if save_conf else (cls, *poly) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add poly to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') # annotator.box_label(xyxy, label, color=colors(c, True)) annotator.poly_label(poly, label, color=colors(c, True)) if save_crop: # Yolov5-obb doesn't support it yet # save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) pass # Print time (inference-only) LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") if update: strip_optimizer(weights) # update model (to fix SourceChangeWarning) def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'runs/train/yolov5n_DroneVehicle/weights/best.pt', help='model path(s)') parser.add_argument('--source', type=str, default='/media/test/4d846cae-2315-4928-8d1b-ca6d3a61a3c6/DroneVehicle/val/raw/images/', help='file/dir/URL/glob, 0 for webcam') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[840], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.2, help='NMS IoU threshold') parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') parser.add_argument('--device', default='3', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='show results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--visualize', action='store_true', help='visualize features') parser.add_argument('--update', action='store_true', help='update all models') parser.add_argument('--project', default='runs/detect', help='save results to project/name') parser.add_argument('--name', default='exp', help='save results to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--line-thickness', default=2, type=int, help='bounding box thickness (pixels)') parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Run inference on images, videos, directories, streams, etc. Usage: $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam img.jpg # image vid.mp4 # video path/ # directory path/*.jpg # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream """ FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative @torch.no_grad() def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models project=ROOT / 'runs/detect', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) if is_url and is_file: source = check_file(source) # download # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn) stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size # Half half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA if pt or jit: model.model.half() if half else model.model.float() # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference model.warmup(imgsz=(1, 3, *imgsz), half=half) # warmup dt, seen = [0.0, 0.0, 0.0], 0 for path, im, im0s, vid_cap, s in dataset: t1 = time_sync() im = torch.from_numpy(im).to(device) im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(im, augment=augment, visualize=visualize) t3 = time_sync() dt[1] += t3 - t2 # NMS # pred: list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2) pred = non_max_suppression_obb(pred, conf_thres, iou_thres, classes, agnostic_nms, multi_label=True, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image pred_poly = rbox2poly(det[:, :5]) # (n, [x1 y1 x2 y2 x3 y3 x4 y4]) seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale polys from img_size to im0 size # det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() pred_poly = scale_polys(im.shape[2:], pred_poly, im0.shape) det = torch.cat((pred_poly, det[:, -2:]), dim=1) # (n, [poly conf cls]) # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *poly, conf, cls in reversed(det): if save_txt: # Write to file # xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh poly = poly.tolist() line = (cls, *poly, conf) if save_conf else (cls, *poly) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add poly to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') # annotator.box_label(xyxy, label, color=colors(c, True)) annotator.poly_label(poly, label, color=colors(c, True)) if save_crop: # Yolov5-obb doesn't support it yet # save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) pass # Print time (inference-only) LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") if update: strip_optimizer(weights) # update model (to fix SourceChangeWarning) def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'runs/train/yolov5n_DroneVehicle/weights/best.pt', help='model path(s)') parser.add_argument('--source', type=str, default='/media/test/4d846cae-2315-4928-8d1b-ca6d3a61a3c6/DroneVehicle/val/raw/images/', help='file/dir/URL/glob, 0 for webcam') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[840], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.2, help='NMS IoU threshold') parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') parser.add_argument('--device', default='3', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='show results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--visualize', action='store_true', help='visualize features') parser.add_argument('--update', action='store_true', help='update all models') parser.add_argument('--project', default='runs/detect', help='save results to project/name') parser.add_argument('--name', default='exp', help='save results to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--line-thickness', default=2, type=int, help='bounding box thickness (pixels)') parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
print_args(FILE.stem, opt)
14
2023-10-31 06:06:41+00:00
24k
serengil/LightPHE
lightphe/models/Ciphertext.py
[ { "identifier": "Homomorphic", "path": "lightphe/models/Homomorphic.py", "snippet": "class Homomorphic(ABC):\n keys: dict\n plaintext_modulo: int\n ciphertext_modulo: int\n\n @abstractmethod\n def generate_keys(self, key_size: int, s: Optional[int] = None) -> dict:\n pass\n\n @abstractmethod\n def generate_random_key(self) -> int:\n pass\n\n @abstractmethod\n def encrypt(\n self, plaintext: int, random_key: Union[Optional[int], Optional[list]] = None\n ) -> Union[int, tuple, list]:\n pass\n\n @abstractmethod\n def decrypt(self, ciphertext: Union[int, tuple, list]) -> int:\n pass\n\n @abstractmethod\n def add(\n self, ciphertext1: Union[int, tuple, list], ciphertext2: Union[int, tuple, list]\n ) -> Union[int, tuple, list]:\n pass\n\n @abstractmethod\n def multiply(\n self, ciphertext1: Union[int, tuple, list], ciphertext2: Union[int, tuple, list]\n ) -> Union[int, tuple]:\n pass\n\n @abstractmethod\n def xor(self, ciphertext1: list, ciphertext2: list) -> list:\n pass\n\n @abstractmethod\n def multiply_by_contant(self, ciphertext: Union[int, tuple, list], constant: int) -> int:\n pass\n\n @abstractmethod\n def reencrypt(self, ciphertext: Union[int, tuple, list]) -> Union[int, tuple, list]:\n pass" }, { "identifier": "Algorithm", "path": "lightphe/models/Algorithm.py", "snippet": "class Algorithm:\n RSA = \"RSA\"\n ElGamal = \"ElGamal\"\n ExponentialElGamal = \"Exponential-ElGamal\"\n EllipticCurveElGamal = \"EllipticCurve-ElGamal\"\n Paillier = \"Paillier\"\n DamgardJurik = \"Damgard-Jurik\"\n OkamotoUchiyama = \"Okamoto-Uchiyama\"\n Benaloh = \"Benaloh\"\n NaccacheStern = \"Naccache-Stern\"\n GoldwasserMicali = \"Goldwasser-Micali\"" }, { "identifier": "RSA", "path": "lightphe/cryptosystems/RSA.py", "snippet": "class RSA(Homomorphic):\n \"\"\"\n RSA algorithm is partially homomorphic with respect to the multiplication\n Ref: https://sefiks.com/2023/03/06/a-step-by-step-partially-homomorphic-encryption-example-with-rsa-in-python/\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, key_size: int = 1024, encrypt_with_public=True):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits\n encrypt_with_public (boolean): RSA has two keys: private (d) and public (e).\n If you encrypt a message with smo's public, then just that person can decrypt it\n with his private (secure message). Otherwise, if you encrypt it with your private,\n one can decrypt it with your public (digital signatures).\n Set this arg to True if you want to do encryption with public key e,\n and do decryption with private key d.\n \"\"\"\n self.keys = keys or self.generate_keys(key_size)\n self.plaintext_modulo = self.keys[\"public_key\"][\"n\"]\n self.ciphertext_modulo = self.keys[\"public_key\"][\"n\"]\n self.encrypt_with_public = encrypt_with_public\n\n def generate_keys(self, key_size: int) -> dict:\n \"\"\"\n Generate public and private keys of RSA cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n while True:\n try:\n # picking a prime modulus p and q\n p = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n q = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n n = p * q\n phi = (p - 1) * (q - 1)\n\n # select public exponent e\n while True:\n e = random.randint(1, phi - 1)\n if math.gcd(e, n) == 1:\n break\n\n d = pow(e, -1, phi)\n break\n except:\n pass\n\n keys[\"public_key\"][\"n\"] = n\n keys[\"public_key\"][\"e\"] = e\n keys[\"private_key\"][\"d\"] = d\n return keys\n\n def generate_random_key(self) -> int:\n pass\n\n def encrypt(self, plaintext: int) -> int:\n \"\"\"\n Encrypt plain messages with RSA\n Args:\n plaintext (int): plain message\n Returns:\n ciphertext (int): ciphertext encrypted with RSA\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n\n if plaintext > n:\n plaintext = plaintext % n\n logger.debug(\n f\"RSA can encrypt messages [1, {n}]. \"\n f\"Seems you exceeded this limit. New plaintext is {plaintext}\"\n )\n\n if self.encrypt_with_public is True:\n e = self.keys[\"public_key\"][\"e\"]\n c = pow(plaintext, e, n)\n else:\n d = self.keys[\"private_key\"][\"d\"]\n c = pow(plaintext, d, n)\n\n return c\n\n def decrypt(self, ciphertext: int) -> int:\n \"\"\"\n Decrypt ciphertexts with RSA\n Args:\n ciphertext (int): encrypted message\n decrypt_with_private (int): RSA has two keys: private (d) and public (e).\n If you encrypt a message with smo's public, then just that person can decrypt it\n with his private (secure message). Otherwise, if you encrypt it with your private,\n one can decrypt it with your public (digital signatures).\n Set this arg to True if you want to do encryption with public key e,\n and do decryption with private key d.\n Returns:\n plaintext (int): restored message\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n if self.encrypt_with_public is True:\n d = self.keys[\"private_key\"][\"d\"]\n p = pow(ciphertext, d, n)\n else:\n e = self.keys[\"public_key\"][\"e\"]\n p = pow(ciphertext, e, n)\n\n return p\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n \"\"\"\n Perform homomorphic multiplication on encrypted data.\n Result of this must be equal to E(m1 * m2)\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n return (ciphertext1 * ciphertext2) % n\n\n def add(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"RSA is not homomorphic with respect to the addition\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"RSA is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: int, constant: int) -> int:\n raise ValueError(\"RSA is not supporting multiplying ciphertext by a known constant\")\n\n def reencrypt(self, ciphertext: int) -> int:\n raise ValueError(\"RSA does not support re-encryption\")" }, { "identifier": "ElGamal", "path": "lightphe/cryptosystems/ElGamal.py", "snippet": "class ElGamal(Homomorphic):\n \"\"\"\n ElGamal algorithm is either multiplicatively or additively homomorphic\n Ref: https://sefiks.com/2023/03/27/a-step-by-step-partially-homomorphic-encryption-example-with-elgamal-in-python/\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, exponential=False, key_size: int = 1024):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits\n exponential (boolean): set this to True to make cryptosystem exponential ElGamal.\n Regular ElGamal is homomorphic with respect to the multiplication whereas\n exponential ElGamal is homomorphic with respect to the addition\n \"\"\"\n self.exponential = exponential\n self.keys = keys or self.generate_keys(key_size)\n self.plaintext_modulo = self.keys[\"public_key\"][\"p\"]\n self.ciphertext_modulo = self.keys[\"public_key\"][\"p\"]\n\n def generate_keys(self, key_size: int):\n \"\"\"\n Generate public and private keys of ElGamal cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # picking a prime modulus p\n p = sympy.randprime(100, 2 ** int(key_size / 2) - 1)\n\n # picking a generator g\n g = random.randint(2, int(math.sqrt(p)))\n\n # picking a private key x\n x = random.randint(1, p - 2)\n\n # public key\n y = pow(g, x, p)\n\n keys[\"public_key\"] = {\n \"p\": p,\n \"g\": g,\n \"y\": y,\n }\n\n keys[\"private_key\"] = {\"x\": x}\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n ElGamal requires to generate one-time random key per encryption\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n p = self.keys[\"public_key\"][\"p\"]\n return random.randint(1, p - 1)\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> tuple:\n \"\"\"\n Encrypt plaintext with ElGamal\n Args:\n plaintext (int): message to encrypt\n random_key (int): random key for encryption. Do not set this to a static value.\n Returns\n ciphertext (tuple): c1 and c2\n \"\"\"\n p = self.keys[\"public_key\"][\"p\"]\n g = self.keys[\"public_key\"][\"g\"]\n y = self.keys[\"public_key\"][\"y\"]\n r = random_key or self.generate_random_key()\n\n if plaintext > p:\n plaintext = plaintext % p\n logger.debug(\n f\"ElGamal can encrypt messages [1, {p}]. \"\n f\"Seems you exceeded this limit. New plaintext is {plaintext}\"\n )\n\n c1 = pow(g, r, p)\n if self.exponential is False:\n c2 = (plaintext * pow(y, r, p)) % p\n else:\n c2 = (pow(g, plaintext, p) * pow(y, r, p)) % p\n\n return c1, c2\n\n def decrypt(self, ciphertext: tuple) -> int:\n \"\"\"\n Decrypt ciphertext with ElGamal\n Args:\n ciphertext (tuple): c1 and c2\n Returns:\n plaintext (int): restored message\n \"\"\"\n c1, c2 = ciphertext\n\n x = self.keys[\"private_key\"][\"x\"]\n p = self.keys[\"public_key\"][\"p\"]\n g = self.keys[\"public_key\"][\"g\"]\n\n m_prime = (c2 * pow(c1, -1 * x, p)) % p\n\n if self.exponential is False:\n return m_prime\n\n if self.exponential is True:\n # m_prime = g^m . Find m for known m_prime and known g (DLP).\n m = 0\n while True:\n if pow(g, m, p) == m_prime:\n return m\n m += 1\n if m > p:\n raise ValueError(f\"Cannot restore the message in [0, {p}]\")\n\n return -1\n\n def multiply(self, ciphertext1: tuple, ciphertext2: tuple) -> tuple:\n \"\"\"\n Perform homomorphic multiplication on encrypted data\n Result of this must be equal to E(m1 * m2)\n Args:\n ciphertext1 (dict): ElGamal ciphertext consisting of c1 and c2 keys\n ciphertext2 (dict): ElGamal ciphertext consisting of c1 and c2 keys\n Returns\n ciphertext (dict): ElGamal ciphertext consisting of c1 and c2 keys\n \"\"\"\n if self.exponential is True:\n raise ValueError(\"Exponential ElGamal is not homomorphic with respect to the addition\")\n p = self.keys[\"public_key\"][\"p\"]\n return (ciphertext1[0] * ciphertext2[0]) % p, (ciphertext1[1] * ciphertext2[1]) % p\n\n def add(self, ciphertext1: tuple, ciphertext2: tuple) -> tuple:\n \"\"\"\n Perform homomorphic addition on encrypted data\n Result of this must be equal to E(m1 + m2)\n Args:\n ciphertext1 (dict): ElGamal ciphertext consisting of c1 and c2 keys\n ciphertext2 (dict): ElGamal ciphertext consisting of c1 and c2 keys\n Returns\n ciphertext (dict): ElGamal ciphertext consisting of c1 and c2 keys\n \"\"\"\n if self.exponential is False:\n raise ValueError(\"Regular ElGamal is not homomorphic with respect to the addition\")\n p = self.keys[\"public_key\"][\"p\"]\n return (ciphertext1[0] * ciphertext2[0]) % p, (ciphertext1[1] * ciphertext2[1]) % p\n\n def xor(self, ciphertext1: tuple, ciphertext2: tuple) -> int:\n raise ValueError(\"ElGamal is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: tuple, constant: int) -> tuple:\n if self.exponential is False:\n raise ValueError(\"ElGamal is not supporting multiplying ciphertext by a known constant\")\n p = self.keys[\"public_key\"][\"p\"]\n if constant > p:\n constant = constant % p\n logger.debug(\n f\"ElGamal can encrypt messages [1, {p}]. \"\n f\"Seems constant exceeded this limit. New constant is {constant}\"\n )\n\n return pow(ciphertext[0], constant, p), pow(ciphertext[1], constant, p)\n\n def reencrypt(self, ciphertext: tuple) -> tuple:\n \"\"\"\n Re-generate ciphertext with re-encryption. Many ciphertext will be decrypted to same plaintext.\n Args:\n ciphertext (int): given ciphertext\n Returns:\n new ciphertext (int): different ciphertext for same plaintext\n \"\"\"\n if self.exponential is True:\n # then this is additively homomorphic\n neutral_element = 0\n else:\n # then this is multiplicatively homomorphic\n neutral_element = 1\n\n neutral_encrypted = self.encrypt(plaintext=neutral_element)\n\n if self.exponential is True:\n reencrypted_value = self.add(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)\n else:\n reencrypted_value = self.multiply(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)\n\n return reencrypted_value" }, { "identifier": "Paillier", "path": "lightphe/cryptosystems/Paillier.py", "snippet": "class Paillier(Homomorphic):\n \"\"\"\n Paillier algorithm is homomorphic with respect to the addition.\n Also, it supports power operation for ciphertext base and plaintext exponent\n Ref: https://sefiks.com/2023/04/03/a-step-by-step-partially-homomorphic-encryption-example-with-paillier-in-python/\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, key_size=1024):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits\n \"\"\"\n self.keys = keys or self.generate_keys(key_size)\n n = self.keys[\"public_key\"][\"n\"]\n self.plaintext_modulo = n\n self.ciphertext_modulo = n * n\n\n def generate_keys(self, key_size: int):\n \"\"\"\n Generate public and private keys of Paillier cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # picking a prime modulus p\n p = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n # picking a prime modulus q\n q = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n n = p * q\n phi = (p - 1) * (q - 1)\n g = 1 + n\n\n keys[\"private_key\"][\"phi\"] = phi\n keys[\"public_key\"][\"g\"] = g\n keys[\"public_key\"][\"n\"] = n\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Paillier requires to generate one-time random key per encryption\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n while True:\n r = random.randint(0, n)\n if math.gcd(r, n) == 1:\n break\n return r\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> int:\n \"\"\"\n Encrypt a given plaintext for optionally given random key with Paillier\n Args:\n plaintext (int): message to encrypt\n random_key (int): Paillier requires a random key that co-prime to n.\n Random key will be generated automatically if you do not set this.\n Returns:\n ciphertext (int): encrypted message\n \"\"\"\n g = self.keys[\"public_key\"][\"g\"]\n n = self.keys[\"public_key\"][\"n\"]\n r = random_key or self.generate_random_key()\n assert math.gcd(r, n) == 1\n return (pow(g, plaintext, n * n) * pow(r, n, n * n)) % (n * n)\n\n def decrypt(self, ciphertext: int):\n \"\"\"\n Decrypt a given ciphertext with Paillier\n Args:\n ciphertext (int): encrypted message\n Returns:\n plaintext (int): restored message\n \"\"\"\n phi = self.keys[\"private_key\"][\"phi\"]\n n = self.keys[\"public_key\"][\"n\"]\n mu = pow(phi, -1, n)\n\n return (self.lx(pow(ciphertext, phi, n * n)) * mu) % (n)\n\n def add(self, ciphertext1: int, ciphertext2: int) -> int:\n \"\"\"\n Perform homomorphic addition on encrypted data.\n Result of this must be equal to E(m1 + m2)\n Encryption calculations are done in module n squared.\n Args:\n ciphertext1 (int): 1st ciphertext created with Paillier\n ciphertext2 (int): 2nd ciphertext created with Paillier\n Returns:\n ciphertext3 (int): 3rd ciphertext created with Paillier\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n return (ciphertext1 * ciphertext2) % (n * n)\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Paillier is not homomorphic with respect to the multiplication\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Paillier is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: int, constant: int) -> int:\n \"\"\"\n Multiply a ciphertext with a plain constant.\n Result of this must be equal to E(m1 * m2) where E(m1) = ciphertext\n Encryption calculations are done in module n squared.\n Args:\n ciphertext (int): ciphertext created with Paillier\n constant (int): known plain constant\n Returns:\n ciphertext (int): new ciphertext created with Paillier\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n\n if constant > self.plaintext_modulo:\n constant = constant % self.plaintext_modulo\n logger.debug(\n f\"Paillier can encrypt messages [1, {n}]. \"\n f\"Seems constant exceeded this limit. New constant is {constant}\"\n )\n\n return pow(ciphertext, constant, n * n)\n\n def reencrypt(self, ciphertext: int) -> int:\n \"\"\"\n Re-generate ciphertext with re-encryption. Many ciphertext will be decrypted to same plaintext.\n Args:\n ciphertext (int): given ciphertext\n Returns:\n new ciphertext (int): different ciphertext for same plaintext\n \"\"\"\n neutral_element = 0\n neutral_encrypted = self.encrypt(plaintext=neutral_element)\n return self.add(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)\n\n def lx(self, x: int) -> int:\n \"\"\"\n Find logarithm over cyclic group\n Args:\n x (int): some integer\n Returns:\n lx (int): (x-1) / n\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n y = (x - 1) // n\n assert y - int(y) == 0\n return int(y)" }, { "identifier": "DamgardJurik", "path": "lightphe/cryptosystems/DamgardJurik.py", "snippet": "class DamgardJurik(Homomorphic):\n \"\"\"\n Damgard-Jurik algorithm is a generalization of Paillier.\n It is homomorphic with respect to the addition.\n Ref: https://sefiks.com/2023/10/20/a-step-by-step-partially-homomorphic-encryption-example-with-damgard-jurik-in-python/\n \"\"\"\n\n def __init__(self, s: int = 2, keys: Optional[dict] = None, key_size: int = 1024):\n \"\"\"\n Args:\n s (int): cryptosystem's module is going to be n^(s+1). if s == 1 then this is Paillier\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits\n \"\"\"\n self.keys = keys or self.generate_keys(key_size=key_size, s=s)\n n = self.keys[\"public_key\"][\"n\"]\n self.plaintext_modulo = n\n self.ciphertext_modulo = pow(n, s + 1)\n\n def generate_keys(self, key_size: int, s: Optional[int] = None):\n \"\"\"\n Generate public and private keys of Paillier cryptosystem\n Args:\n s (int): cryptosystem's module is going to be n^(s+1). if s == 1 then this is Paillier\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # picking a prime modulus p\n p = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n # picking a prime modulus q\n q = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n n = p * q\n phi = (p - 1) * (q - 1)\n g = 1 + n\n\n keys[\"private_key\"][\"phi\"] = phi\n keys[\"public_key\"][\"g\"] = g\n keys[\"public_key\"][\"n\"] = n\n keys[\"public_key\"][\"s\"] = s\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Paillier requires to generate one-time random key per encryption\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n while True:\n r = random.randint(0, n)\n if math.gcd(r, n) == 1:\n break\n return r\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> int:\n \"\"\"\n Encrypt a given plaintext for optionally given random key with Paillier\n Args:\n plaintext (int): message to encrypt\n random_key (int): Paillier requires a random key that co-prime to n.\n Random key will be generated automatically if you do not set this.\n Returns:\n ciphertext (int): encrypted message\n \"\"\"\n g = self.keys[\"public_key\"][\"g\"]\n n = self.keys[\"public_key\"][\"n\"]\n s = self.keys[\"public_key\"][\"s\"]\n r = random_key or self.generate_random_key()\n modulo = pow(n, s + 1)\n\n # assert math.gcd(r, n) == 1\n c = (pow(g, plaintext, modulo) * pow(r, n, modulo)) % modulo\n # c = (pow(g, plaintext, modulo) * pow(r, pow(n, s), modulo)) % modulo\n if math.gcd(c, modulo) != 1:\n logger.info(f\"WARNING! gcd({c=}, {modulo=}) != 1\")\n return c\n\n def decrypt(self, ciphertext: int):\n \"\"\"\n Decrypt a given ciphertext with Paillier\n Args:\n ciphertext (int): encrypted message\n Returns:\n plaintext (int): restored message\n \"\"\"\n phi = self.keys[\"private_key\"][\"phi\"]\n n = self.keys[\"public_key\"][\"n\"]\n s = self.keys[\"public_key\"][\"s\"]\n mu = pow(phi, -1, n)\n modulo = pow(n, s + 1)\n return (self.lx(pow(ciphertext, phi, modulo)) * mu) % (n)\n\n def add(self, ciphertext1: int, ciphertext2: int) -> int:\n \"\"\"\n Perform homomorphic addition on encrypted data.\n Result of this must be equal to E(m1 + m2)\n Encryption calculations are done in module n squared.\n Args:\n ciphertext1 (int): 1st ciphertext created with Paillier\n ciphertext2 (int): 2nd ciphertext created with Paillier\n Returns:\n ciphertext3 (int): 3rd ciphertext created with Paillier\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n s = self.keys[\"public_key\"][\"s\"]\n modulo = pow(n, s + 1)\n return (ciphertext1 * ciphertext2) % modulo\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Damgard-Jurik is not homomorphic with respect to the multiplication\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Damgard-Jurik is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: int, constant: int) -> int:\n \"\"\"\n Multiply a ciphertext by a known plain constant\n Result of this must be equal to E(m1 * m2), where E(m1) = ciphertext\n Encryption calculations are done in module n squared.\n Args:\n ciphertext (int): ciphertext created with Damgard-Jurik\n constant (int): a known plain constant\n Returns:\n ciphertext (int): new ciphertext created with Damgard-Jurik\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n if constant > self.plaintext_modulo:\n constant = constant % self.plaintext_modulo\n logger.debug(\n f\"Damgard-Jurik can encrypt messages [1, {n}]. \"\n f\"Seems constant exceeded this limit. New constant is {constant}\"\n )\n return pow(ciphertext, constant, self.ciphertext_modulo)\n\n def reencrypt(self, ciphertext: int) -> int:\n \"\"\"\n Re-generate ciphertext with re-encryption. Many ciphertext will be decrypted to same plaintext.\n Args:\n ciphertext (int): given ciphertext\n Returns:\n new ciphertext (int): different ciphertext for same plaintext\n \"\"\"\n neutral_element = 0\n neutral_encrypted = self.encrypt(plaintext=neutral_element)\n return self.add(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)\n\n def lx(self, x: int) -> int:\n \"\"\"\n Find logarithm over cyclic group\n Args:\n x (int): some integer\n Returns:\n lx (int): (x-1) / n\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n y = (x - 1) // n\n assert y - int(y) == 0\n return int(y)" }, { "identifier": "OkamotoUchiyama", "path": "lightphe/cryptosystems/OkamotoUchiyama.py", "snippet": "class OkamotoUchiyama(Homomorphic):\n \"\"\"\n Okamoto-Uchiyama algorithm is homomorphic with respect to the addition.\n Ref: https://sefiks.com/2023/10/20/a-step-by-step-partially-homomorphic-encryption-example-with-okamoto-uchiyama-in-python/\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, key_size=1024):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits\n \"\"\"\n self.keys = keys or self.generate_keys(key_size)\n self.plaintext_modulo = self.keys[\"private_key\"][\"p\"]\n self.ciphertext_modulo = self.keys[\"public_key\"][\"n\"]\n\n def generate_keys(self, key_size: int) -> dict:\n \"\"\"\n Generate public and private keys of OkamotoUchiyama cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # picking a prime modulus p\n p = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n # picking a prime modulus q\n q = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n # modulo\n n = p * p * q\n\n # generator\n g = random.randint(2, n)\n\n if pow(g, p - 1, p * p) == 1:\n raise ValueError(\"Fermat's Little Theorem must be satisfied\")\n\n h = pow(g, n, n)\n\n keys[\"public_key\"][\"n\"] = n\n keys[\"public_key\"][\"g\"] = g\n keys[\"public_key\"][\"h\"] = h\n keys[\"private_key\"][\"p\"] = p\n keys[\"private_key\"][\"q\"] = q\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Okamoto-Uchiyama requires to generate one-time random key per encryption\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n return random.randint(1, n - 1)\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> int:\n \"\"\"\n Encrypt a given plaintext for optionally given random key with OkamotoUchiyama\n Args:\n plaintext (int): message to encrypt\n random_key (int): OkamotoUchiyama requires a random key\n Random key will be generated automatically if you do not set this.\n Returns:\n ciphertext (int): encrypted message\n \"\"\"\n p = self.keys[\"private_key\"][\"p\"]\n g = self.keys[\"public_key\"][\"g\"]\n n = self.keys[\"public_key\"][\"n\"]\n h = self.keys[\"public_key\"][\"h\"]\n r = random_key or self.generate_random_key()\n\n if plaintext > p:\n plaintext = plaintext % p\n logger.debug(\n f\"plaintext must be in scale [0, {p=}] but this is exceeded.\"\n \"New plaintext is {plaintext}\"\n )\n return (pow(g, plaintext, n) * pow(h, r, n)) % n\n\n def decrypt(self, ciphertext: int):\n \"\"\"\n Decrypt a given ciphertext with Okamoto-Uchiyama\n Args:\n ciphertext (int): encrypted message\n Returns:\n plaintext (int): restored message\n \"\"\"\n p = self.keys[\"private_key\"][\"p\"]\n g = self.keys[\"public_key\"][\"g\"]\n\n a = self.lx(pow(ciphertext, p - 1, p * p))\n b = self.lx(pow(g, p - 1, p * p))\n return (a * pow(b, -1, p)) % p\n\n def add(self, ciphertext1: int, ciphertext2: int) -> int:\n \"\"\"\n Perform homomorphic addition on encrypted data.\n Result of this must be equal to E(m1 + m2)\n Encryption calculations are done in module n\n Args:\n ciphertext1 (int): 1st ciphertext created with OkamotoUchiyama\n ciphertext2 (int): 2nd ciphertext created with OkamotoUchiyama\n Returns:\n ciphertext3 (int): 3rd ciphertext created with OkamotoUchiyama\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n return (ciphertext1 * ciphertext2) % n\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Okamoto-Uchiyama is not homomorphic with respect to the multiplication\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Okamoto-Uchiyama is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: int, constant: int) -> int:\n \"\"\"\n Multiply a ciphertext with a plain constant.\n Result of this must be equal to E(m1 * constant) where E(m1) = ciphertext\n Encryption calculations are done in module n squared.\n Args:\n ciphertext (int): ciphertext created with Okamoto-Uchiyama\n constant (int): known plain constant\n Returns:\n ciphertext (int): new ciphertext created with Okamoto-Uchiyama\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n if constant > self.plaintext_modulo:\n constant = constant % self.plaintext_modulo\n logger.debug(\n f\"Okamoto-Uchiyama can encrypt messages [1, {n}]. \"\n f\"Seems constant exceeded this limit. New constant is {constant}\"\n )\n return pow(ciphertext, constant, n)\n\n def reencrypt(self, ciphertext: int) -> int:\n \"\"\"\n Re-generate ciphertext with re-encryption. Many ciphertext will be decrypted to same plaintext.\n Args:\n ciphertext (int): given ciphertext\n Returns:\n new ciphertext (int): different ciphertext for same plaintext\n \"\"\"\n neutral_element = 0\n neutral_encrypted = self.encrypt(plaintext=neutral_element)\n return self.add(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)\n\n def lx(self, x: int) -> int:\n \"\"\"\n Find logarithm over cyclic group\n Args:\n x (int): some integer\n Returns:\n lx (int): (x-1) / p\n \"\"\"\n p = self.keys[\"private_key\"][\"p\"]\n if x % p != 1:\n raise ValueError(f\"Input passed to lx ({x}) must be identical to 1 in modulo {p}\")\n if math.gcd(x, p * p) != 1:\n raise ValueError(f\"gcd({x}, {p}^2) must be equal to 1\")\n y = (x - 1) // p\n assert y - int(y) == 0\n return int(y)" }, { "identifier": "Benaloh", "path": "lightphe/cryptosystems/Benaloh.py", "snippet": "class Benaloh(Homomorphic):\n def __init__(self, keys: Optional[dict] = None, key_size: int = 50):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits. default is less than other cryptosystems\n because decryption of Benaloh requires to solve DLP :/\n \"\"\"\n self.keys = keys or self.generate_keys(key_size)\n self.plaintext_modulo = self.keys[\"public_key\"][\"r\"]\n self.ciphertext_modulo = self.keys[\"public_key\"][\"n\"]\n\n def generate_keys(self, key_size: int) -> dict:\n \"\"\"\n Generate public and private keys of Paillier cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n x = 1\n while x == 1:\n # picking a prime p\n p = sympy.randprime(200, 2**key_size)\n\n # picking a prime q\n q = sympy.randprime(100, p)\n\n n = p * q\n phi = (p - 1) * (q - 1)\n\n r = p - 1\n while gcd(q - 1, r) != 1:\n r = int(r / gcd(q - 1, r))\n\n if not (\n # r should divide p-1 without remainder\n (p - 1) % r == 0\n # r and (p - 1) / r must be coprimes\n and gcd(r, int((p - 1) / r)) == 1\n # r and q-1 must be coprimes\n and gcd(r, q - 1) == 1\n ):\n continue\n\n y = random.randint(2, n)\n if gcd(y, n) != 1:\n continue\n\n # to guarantee correct decryption\n prime_factors = sympy.factorint(r).keys()\n decryption_guaranteed = True\n for prime_factor in prime_factors:\n # none of r's prime factor should satisfy the condition\n if pow(y, int(phi / prime_factor), n) == 1:\n decryption_guaranteed = False\n\n if decryption_guaranteed is False:\n continue\n\n x = pow(y, int(phi / r), n)\n if x != 1:\n break\n\n keys[\"public_key\"][\"y\"] = y\n keys[\"public_key\"][\"r\"] = r\n keys[\"public_key\"][\"n\"] = n\n\n keys[\"private_key\"][\"p\"] = p\n keys[\"private_key\"][\"q\"] = q\n keys[\"private_key\"][\"phi\"] = phi\n keys[\"private_key\"][\"x\"] = x\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Generate random key for encryption\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n while True:\n u = random.randint(1, n)\n if gcd(u, n) == 1:\n break\n return u\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> int:\n \"\"\"\n Encrypt a given plaintext for optionally given random key with Benaloh\n Args:\n plaintext (int): message to encrypt\n random_key (int): Benaloh requires a random key\n Random key will be generated automatically if you do not set this.\n Returns:\n ciphertext (int): encrypted message\n \"\"\"\n y = self.keys[\"public_key\"][\"y\"]\n r = self.keys[\"public_key\"][\"r\"]\n n = self.keys[\"public_key\"][\"n\"]\n\n u = random_key or self.generate_random_key()\n\n if plaintext > r:\n plaintext = plaintext % r\n logger.debug(\n f\"Benaloh lets you to encrypt messages in [0, {r=}].\"\n f\"But your plaintext exceeds this limit.\"\n f\"New plaintext is {plaintext}\"\n )\n\n c = (pow(y, plaintext, n) * pow(u, r, n)) % n\n\n if gcd(c, n) != 1:\n logger.debug(\"ciphertext is not co-prime with n!\")\n\n return c\n\n def decrypt(self, ciphertext: int) -> int:\n \"\"\"\n Decrypt a given ciphertext with Benaloh\n Args:\n ciphertext (int): encrypted message\n Returns:\n plaintext (int): restored message\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n r = self.keys[\"public_key\"][\"r\"]\n phi = self.keys[\"private_key\"][\"phi\"]\n x = self.keys[\"private_key\"][\"x\"]\n\n a = pow(ciphertext, int(phi / r), n)\n\n md = 0\n while True:\n if pow(x, md, n) == a:\n break\n md = md + 1\n if md > r:\n raise ValueError(f\"Message cannot be restored in [{0}, {n}]\")\n return md\n\n def add(self, ciphertext1: int, ciphertext2: int) -> int:\n \"\"\"\n Perform homomorphic addition on encrypted data.\n Result of this must be equal to E(m1 + m2)\n Encryption calculations are done in module n\n Args:\n ciphertext1 (int): 1st ciphertext created with Benaloh\n ciphertext2 (int): 2nd ciphertext created with Benaloh\n Returns:\n ciphertext3 (int): 3rd ciphertext created with Benaloh\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n return (ciphertext1 * ciphertext2) % n\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Benaloh is not homomorphic with respect to the multiplication\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Benaloh is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: int, constant: int) -> int:\n \"\"\"\n Multiply a ciphertext with a plain constant.\n Result of this must be equal to E(m1 * constant) where E(m1) = ciphertext\n Encryption calculations are done in module n squared.\n Args:\n ciphertext (int): ciphertext created with Benaloh\n constant (int): known plain constant\n Returns:\n ciphertext (int): new ciphertext created with Benaloh\n \"\"\"\n # raise ValueError(\"Benaloh is not supporting multiplying by a constant\")\n n = self.keys[\"public_key\"][\"n\"]\n if constant > self.plaintext_modulo:\n constant = constant % self.plaintext_modulo\n logger.debug(\n f\"Benaloh can encrypt messages [1, {self.plaintext_modulo}]. \"\n f\"Seems constant exceeded this limit. New constant is {constant}\"\n )\n return pow(ciphertext, constant, n)\n\n def reencrypt(self, ciphertext: int) -> int:\n \"\"\"\n Re-generate ciphertext with re-encryption. Many ciphertext will be decrypted to same plaintext.\n Args:\n ciphertext (int): given ciphertext\n Returns:\n new ciphertext (int): different ciphertext for same plaintext\n \"\"\"\n neutral_element = 0\n neutral_encrypted = self.encrypt(plaintext=neutral_element)\n return self.add(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)" }, { "identifier": "NaccacheStern", "path": "lightphe/cryptosystems/NaccacheStern.py", "snippet": "class NaccacheStern(Homomorphic):\n \"\"\"\n Naccache-Stern algorithm is homomorphic with respect to the addition.\n It is a generaliation of Benaloh cryptosystem\n Ref: https://sefiks.com/2023/10/26/a-step-by-step-partially-homomorphic-encryption-example-with-naccache-stern-in-python/\n Original paper: https://dl.acm.org/doi/pdf/10.1145/288090.288106\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, key_size=37, deterministic: bool = False):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits. Less than many cryptosystems because\n decryption requires to solve DLP.\n deterministic (boolean): deterministic or probabilistic version of\n cryptosystem\n \"\"\"\n self.keys = keys or self.generate_keys(key_size)\n self.plaintext_modulo = self.keys[\"public_key\"][\"sigma\"]\n self.ciphertext_modulo = self.keys[\"public_key\"][\"n\"]\n self.deterministic = deterministic\n\n def generate_keys(self, key_size: int) -> dict:\n \"\"\"\n Generate public and private keys of Naccache-Stern cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # pick a family of small primes. the largest one is 10-bits\n # TODO: do something generic instead of constant primes\n prime_set = [3, 5, 7, 11, 13, 17]\n k = len(prime_set)\n\n if all(sympy.isprime(prime) is True for prime in prime_set) is False:\n raise ValueError(\"All items of prime set must be prime!\")\n\n # divide the set in half and find products of primes\n u = 1\n v = 1\n\n for i, prime in enumerate(prime_set):\n if i < len(prime_set) / 2:\n u = u * prime\n else:\n v = v * prime\n\n # product of all primes\n sigma = u * v\n\n # pick large prime numbers\n while True:\n a = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n b = sympy.randprime(100, a)\n\n # calculate two primes from chosen ones\n p = (2 * a * u) + 1\n q = (2 * b * v) + 1\n\n # recommended n is 768 bits\n n = p * q\n phi = (p - 1) * (q - 1)\n\n if phi % sigma != 0:\n logger.debug(\"canceled because phi cannot be divisible by sigma\")\n continue\n\n if math.gcd(sigma, int(phi // sigma)) != 1:\n logger.debug(\"canceled because sigma and phi/sigma are not coprime\")\n continue\n\n p_conditions = []\n for i in range(0, int(k / 2)):\n pi = prime_set[i]\n if (\n (p - 1) % pi == 0\n and math.gcd(pi, int((p - 1) / pi)) == 1\n and math.gcd(pi, q - 1) == 1\n ):\n p_conditions.append(1)\n else:\n p_conditions.append(0)\n p_satisfied = True if len(p_conditions) == sum(p_conditions) else False\n if p_satisfied is False:\n logger.debug(\"canceled because p_conditions are not satisfied\")\n continue\n\n q_conditions = []\n for i in range(int(k / 2), k):\n pi = prime_set[i]\n if (\n (q - 1) % pi == 0\n and math.gcd(pi, int((q - 1) / pi)) == 1\n and math.gcd(pi, p - 1)\n ):\n q_conditions.append(1)\n else:\n q_conditions.append(0)\n\n q_satisfied = True if len(q_conditions) == sum(q_conditions) else False\n if q_satisfied is False:\n logger.debug(\"canceled because q_conditions are not satisfied\")\n continue\n\n # p and q must be primes\n if not (sympy.isprime(p) and sympy.isprime(q)):\n continue\n\n # choose a generator g\n g = random.randint(2, n)\n # it must be co-prime to n\n if math.gcd(g, n) != 1:\n logger.debug(\"canceled becuase g is not co-prime with ne\")\n continue\n # guarantee it is not pi-th power.\n for pi in prime_set:\n logger.debug(\"canceled because g is a pi-th power\")\n if pow(g, int(phi / pi), n) == 1:\n continue\n\n # the order of g modulo n must be phi/4\n if pow(g, int(phi / 4), n) != 1:\n continue\n\n # check decryption is guaranteed similar to benaloh\n # ps: this is not mentioned in the original paper\n is_decryption_guaranteed = True\n for pi in prime_set:\n prime_factors = sympy.factorint(pi).keys()\n for prime_factor in prime_factors:\n if pow(g, int(phi / prime_factor), n) == 1:\n is_decryption_guaranteed = False\n if is_decryption_guaranteed is True:\n break\n\n logger.debug(f\"n bits is {len(bin(n)[2:])}\")\n\n keys[\"public_key\"][\"g\"] = g\n keys[\"public_key\"][\"n\"] = n\n # sigma can optionally be secret in deterministic version\n keys[\"public_key\"][\"sigma\"] = sigma\n\n keys[\"private_key\"][\"p\"] = p\n keys[\"private_key\"][\"q\"] = q\n keys[\"private_key\"][\"phi\"] = phi\n keys[\"private_key\"][\"prime_set\"] = prime_set\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Naccache-Stern requires to generate one-time random key per encryption\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n return random.randint(1, n - 1)\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> int:\n \"\"\"\n Encrypt a given plaintext for optionally given random key with Naccache-Stern\n Args:\n plaintext (int): message to encrypt\n random_key (int): Naccache-Stern requires a random key\n Random key will be generated automatically if you do not set this.\n Returns:\n ciphertext (int): encrypted message\n \"\"\"\n g = self.keys[\"public_key\"][\"g\"]\n n = self.keys[\"public_key\"][\"n\"]\n r = random_key or self.generate_random_key()\n sigma = self.keys[\"public_key\"][\"sigma\"]\n if plaintext > self.plaintext_modulo:\n plaintext = plaintext % self.plaintext_modulo\n logger.debug(\n f\"plaintext must be in scale [0, {self.plaintext_modulo}] \"\n \"but this is exceeded. New plaintext is {plaintext}\"\n )\n\n if self.deterministic is True:\n return pow(g, plaintext, n)\n\n # Probabilistic\n return (pow(r, sigma, n) * pow(g, plaintext, n)) % n\n\n def decrypt(self, ciphertext: int):\n \"\"\"\n Decrypt a given ciphertext with Naccache-Stern\n Args:\n ciphertext (int): encrypted message\n Returns:\n plaintext (int): restored message\n \"\"\"\n phi = self.keys[\"private_key\"][\"phi\"]\n n = self.keys[\"public_key\"][\"n\"]\n g = self.keys[\"public_key\"][\"g\"]\n prime_set = self.keys[\"private_key\"][\"prime_set\"]\n\n remainders = []\n for i, prime in enumerate(prime_set):\n ci = pow(ciphertext, int(phi / prime), n)\n logger.debug(f\"c_{i} = {ci}\")\n\n j = 0\n while True:\n if ci == pow(g, int((j * phi) / prime), n):\n logger.debug(f\"m_{i} = {j}\")\n remainders.append(j)\n break\n j = j + 1\n if j > prime**2:\n raise ValueError(\n f\"c_{i} cannot be restored from {ci} = {g}^(j*{phi}/{prime}) mod {n}\"\n )\n\n congruences = []\n for i in range(0, len(prime_set)):\n logger.debug(f\"m mod {prime_set[i]} = {remainders[i]}\")\n congruences.append((remainders[i], prime_set[i]))\n\n # chinese remainder problem\n ms = solve_congruence(*congruences)\n if not ms:\n raise ValueError(\"message cannot be restored with Chinese Remainder!\")\n return ms[0]\n\n def add(self, ciphertext1: int, ciphertext2: int) -> int:\n \"\"\"\n Perform homomorphic addition on encrypted data.\n Result of this must be equal to E(m1 + m2)\n Encryption calculations are done in module n\n Args:\n ciphertext1 (int): 1st ciphertext created with Naccache-Stern\n ciphertext2 (int): 2nd ciphertext created with Naccache-Stern\n Returns:\n ciphertext3 (int): 3rd ciphertext created with Naccache-Stern\n \"\"\"\n return (ciphertext1 * ciphertext2) % self.ciphertext_modulo\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Naccache-Stern is not homomorphic with respect to the multiplication\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Naccache-Stern is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: int, constant: int) -> int:\n \"\"\"\n Multiply a ciphertext with a plain constant.\n Result of this must be equal to E(m1 * constant) where E(m1) = ciphertext\n Encryption calculations are done in module n squared.\n Args:\n ciphertext (int): ciphertext created with Naccache-Stern\n constant (int): known plain constant\n Returns:\n ciphertext (int): new ciphertext created with Naccache-Stern\n \"\"\"\n if constant > self.plaintext_modulo:\n constant = constant % self.plaintext_modulo\n logger.debug(\n f\"Naccache-Stern can encrypt messages [1, {self.plaintext_modulo}]. \"\n f\"Seems constant exceeded this limit. New constant is {constant}\"\n )\n\n return pow(ciphertext, constant, self.ciphertext_modulo)\n\n def reencrypt(self, ciphertext: int) -> int:\n \"\"\"\n Re-generate ciphertext with re-encryption. Many ciphertext will be decrypted to same plaintext.\n Args:\n ciphertext (int): given ciphertext\n Returns:\n new ciphertext (int): different ciphertext for same plaintext\n \"\"\"\n if self.deterministic is True:\n raise ValueError(\n \"Deterministic version of Naccache-Stern does not support reencryption.\"\n \"If you still want to perform ciphertext regeneration, then you may \"\n \"consider to use its probabilistic version.\"\n )\n neutral_element = 0\n neutral_encrypted = self.encrypt(plaintext=neutral_element)\n return self.add(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)" }, { "identifier": "GoldwasserMicali", "path": "lightphe/cryptosystems/GoldwasserMicali.py", "snippet": "class GoldwasserMicali(Homomorphic):\n \"\"\"\n Goldwasser-Micali algorithm is homomorphic with respect to the Exclusively OR (XOR).\n Ref: https://sefiks.com/2023/10/27/a-step-by-step-partially-homomorphic-encryption-example-with-goldwasser-micali-in-python/\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, key_size=100):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits\n \"\"\"\n self.keys = keys or self.generate_keys(key_size)\n self.ciphertext_modulo = self.keys[\"public_key\"][\"n\"]\n # TODO: not sure about the plaintext modulo\n self.plaintext_modulo = self.keys[\"public_key\"][\"n\"]\n\n def generate_keys(self, key_size: int) -> dict:\n \"\"\"\n Generate public and private keys of Goldwasser-Micali cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # picking a prime p\n p = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n # picking a prime q\n q = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n n = p * q\n\n # find non-residue x\n while True:\n x = random.randint(1, n - 1)\n if math.gcd(x, n) == 1 and jacobi_symbol(x, p) == -1 and jacobi_symbol(x, q) == -1:\n break\n\n keys[\"public_key\"][\"n\"] = n\n keys[\"public_key\"][\"x\"] = x\n\n keys[\"private_key\"][\"p\"] = p\n keys[\"private_key\"][\"q\"] = q\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Goldwasser-Micali requires to generate one-time random key that co-prime to n\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n while True:\n r = random.randint(1, n)\n if math.gcd(r, n) == 1:\n break\n return r\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> list:\n \"\"\"\n Encrypt a given plaintext for optionally given random key with Goldwasser-Micali\n Args:\n plaintext (int): message to encrypt\n random_key (int): Goldwasser-Micali requires a random key\n Random key will be generated automatically if you do not set this.\n Returns:\n ciphertext (int): encrypted message\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n x = self.keys[\"public_key\"][\"x\"]\n\n m_binary = bin(plaintext)[2:]\n\n # number of bits\n k = len(m_binary)\n\n if random_key and len(random_key) != k:\n raise ValueError(f\"Random key must be length of {k}\")\n\n c = []\n for i in range(0, k):\n mi = int(m_binary[i])\n\n if random_key:\n ri = random_key[i]\n else:\n ri = self.generate_random_key()\n\n ci = (pow(ri, 2, n) * pow(x, mi, n)) % n\n c.append(ci)\n\n return c\n\n def decrypt(self, ciphertext: list) -> int:\n \"\"\"\n Decrypt a given ciphertext with Goldwasser-Micali\n Args:\n ciphertext (int): encrypted message\n Returns:\n plaintext (int): restored message\n \"\"\"\n m_binaries = []\n\n p = self.keys[\"private_key\"][\"p\"]\n q = self.keys[\"private_key\"][\"q\"]\n\n for i in ciphertext:\n xp = i % p\n xq = i % q\n\n if pow(xp, int((p - 1) / 2), p) == 1 and pow(xq, int((q - 1) / 2), q) == 1:\n m_binaries.append(\"0\")\n else:\n m_binaries.append(\"1\")\n\n m_binary = \"\".join(m_binaries)\n return int(m_binary, 2)\n\n def add(self, ciphertext1: list, ciphertext2: list) -> list:\n raise ValueError(\"Goldwasser-Micali is not homomorphic with respect to the addition\")\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Goldwasser-Micali is not homomorphic with respect to the multiplication\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> list:\n \"\"\"\n Perform homomorphic xor on encrypted data.\n Result of this must be equal to E(m1 ^ m2) = E(m1) ^ E(m2)\n Encryption calculations are done in module n\n Args:\n ciphertext1 (int): 1st ciphertext created with Goldwasser-Micali\n ciphertext2 (int): 2nd ciphertext created with Goldwasser-Micali\n Returns:\n ciphertext3 (int): 3rd ciphertext created with Goldwasser-Micali\n \"\"\"\n ciphertext3 = []\n for i in range(0, len(ciphertext1)):\n c1 = ciphertext1[i]\n c2 = ciphertext2[i]\n ciphertext3.append((c1 * c2) % self.ciphertext_modulo)\n\n return ciphertext3\n\n def multiply_by_contant(self, ciphertext: int, constant: int):\n raise ValueError(\"Goldwasser-Micali does not support multiplying with constant\")\n\n def reencrypt(self, ciphertext: int):\n raise ValueError(\"Goldwasser-Micali does not support re-encryption\")" }, { "identifier": "EllipticCurveElGamal", "path": "lightphe/cryptosystems/EllipticCurveElGamal.py", "snippet": "class EllipticCurveElGamal(Homomorphic):\n \"\"\"\n Elliptic Curve ElGamal algorithm is an additively homomorphic algorithm\n Unluckily, it requires to solve (EC)DLP to restore plaintext in decryption\n However it is easy to restore plaintext while plaintext is not very large\n unsimilar to Benaloh or Naccache-Stern\n Ref: https://sefiks.com/2018/08/21/elliptic-curve-elgamal-encryption/\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, key_size: int = 160):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits. default is 160.\n this is equivalent to 1024 bit RSA.\n \"\"\"\n # TODO: add different forms and curves. e.g. Koblitz, Edwards (Ed25519)\n self.curve = Weierstrass()\n self.keys = keys or self.generate_keys(key_size)\n self.plaintext_modulo = self.curve.p\n self.ciphertext_modulo = self.curve.p\n\n def generate_keys(self, key_size: int):\n \"\"\"\n Generate public and private keys of Elliptic Curve ElGamal cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # private key\n ka = random.getrandbits(key_size)\n\n # public key\n Qa = self.curve.apply_double_and_add_method(G=self.curve.G, k=ka, p=self.curve.p)\n\n keys[\"public_key\"][\"Qa\"] = Qa\n keys[\"private_key\"][\"ka\"] = ka\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Elliptic Curve ElGamal requires to generate one-time random key per encryption\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n return random.getrandbits(128)\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> tuple:\n \"\"\"\n Encrypt plaintext with Elliptic Curve ElGamal\n Args:\n plaintext (int): message to encrypt\n random_key (int): random key for encryption. Do not set this to a static value.\n Returns\n ciphertext (tuple): c1 and c2\n \"\"\"\n # modulo\n p = self.curve.p\n\n # base point\n G = self.curve.G\n\n # public key\n Qa = self.keys[\"public_key\"][\"Qa\"]\n\n # random key\n r = random_key or self.generate_random_key()\n\n s = self.curve.apply_double_and_add_method(G=G, k=plaintext, p=p)\n\n c1 = self.curve.apply_double_and_add_method(G=G, k=r, p=p)\n\n c2 = self.curve.apply_double_and_add_method(G=Qa, k=r, p=p)\n c2 = self.curve.add_points(c2, s, p)\n\n return c1, c2\n\n def decrypt(self, ciphertext: tuple) -> int:\n \"\"\"\n Decrypt ciphertext with Elliptic Curve ElGamal\n Args:\n ciphertext (tuple): c1 and c2\n Returns:\n plaintext (int): restored message\n \"\"\"\n # modulo\n p = self.curve.p\n\n # private key\n ka = self.keys[\"private_key\"][\"ka\"]\n\n c1, c2 = ciphertext\n c1_prime = (c1[0], (-1 * c1[1]) % p)\n s_prime = self.curve.apply_double_and_add_method(G=c1_prime, k=ka, p=p)\n s_prime = self.curve.add_points(P=c2, Q=s_prime, p=p)\n\n # s_prime is a point on the elliptic curve\n # s_prime = k x G\n # we need to find k from known s_prime and G\n # this requires to solve ECDLP\n\n # base point\n G = self.curve.G\n k = 2\n while True:\n G = self.curve.add_points(P=G, Q=self.curve.G, p=p)\n if G[0] == s_prime[0] and G[1] == s_prime[1]:\n return k\n k = k + 1\n if k > self.curve.n:\n raise ValueError(f\"Cannot restore scalar from {s_prime} = k x {self.curve.G}\")\n\n def multiply(self, ciphertext1: tuple, ciphertext2: tuple) -> tuple:\n raise ValueError(\n \"Elliptic Curve ElGamal is not homomorphic with respect to the multiplication\"\n )\n\n def add(self, ciphertext1: tuple, ciphertext2: tuple) -> tuple:\n \"\"\"\n Perform homomorphic addition on encrypted data\n Result of this must be equal to E(m1 + m2)\n Args:\n ciphertext1 (dict): Elliptic Curve ElGamal ciphertext consisting of c1 and c2 keys\n ciphertext2 (dict): Elliptic Curve ElGamal ciphertext consisting of c1 and c2 keys\n Returns\n ciphertext (dict): Elliptic Curve ElGamal ciphertext consisting of c1 and c2 keys\n \"\"\"\n a = self.curve.add_points(P=ciphertext1[0], Q=ciphertext2[0], p=self.curve.p)\n b = self.curve.add_points(P=ciphertext1[1], Q=ciphertext2[1], p=self.curve.p)\n return a, b\n\n def xor(self, ciphertext1: tuple, ciphertext2: tuple) -> int:\n raise ValueError(\n \"Elliptic Curve ElGamal is not homomorphic with respect to the exclusive or\"\n )\n\n def multiply_by_contant(self, ciphertext: tuple, constant: int) -> tuple:\n \"\"\"\n Multiply a ciphertext with a plain constant.\n Result of this must be equal to k x E(m1) = E(m1 * k)\n where E(m1) = ciphertext\n Args:\n ciphertext (int): ciphertext created with Elliptic Curve ElGamal\n constant (int): known plain constant\n Returns:\n ciphertext (int): new ciphertext created with Elliptic Curve ElGamal\n \"\"\"\n return self.curve.apply_double_and_add_method(\n G=ciphertext[0], k=constant, p=self.curve.p\n ), self.curve.apply_double_and_add_method(G=ciphertext[1], k=constant, p=self.curve.p)\n\n def reencrypt(self, ciphertext: tuple) -> tuple:\n raise ValueError(\"Elliptic Curve ElGamal does not support regeneration of ciphertext\")" }, { "identifier": "phe_utils", "path": "lightphe/commons/phe_utils.py", "snippet": "def parse_int(value: Union[int, float], modulo: int) -> int:\ndef fractionize(value: float, modulo: int, precision: Optional[int] = None) -> Tuple[int, int]:\ndef solve_dlp():" }, { "identifier": "Logger", "path": "lightphe/commons/logger.py", "snippet": "class Logger:\n def __init__(self, module):\n self.module = module\n log_level = os.environ.get(\"LIGHTPHE_LOG_LEVEL\", str(logging.INFO))\n try:\n self.log_level = int(log_level)\n except Exception as err:\n self.dump_log(\n f\"Exception while parsing $LIGHTPHE_LOG_LEVEL.\"\n f\"Expected int but it is {log_level} ({str(err)})\"\n )\n self.log_level = logging.INFO\n\n def info(self, message):\n if self.log_level <= logging.INFO:\n self.dump_log(message)\n\n def debug(self, message):\n if self.log_level <= logging.DEBUG:\n self.dump_log(f\"🕷️ {message}\")\n\n def warn(self, message):\n if self.log_level <= logging.WARNING:\n self.dump_log(f\"⚠️ {message}\")\n\n def error(self, message):\n if self.log_level <= logging.ERROR:\n self.dump_log(f\"🔴 {message}\")\n\n def critical(self, message):\n if self.log_level <= logging.CRITICAL:\n self.dump_log(f\"💥 {message}\")\n\n def dump_log(self, message):\n print(f\"{str(datetime.now())[2:-7]} - {message}\")" } ]
from typing import Union from lightphe.models.Homomorphic import Homomorphic from lightphe.models.Algorithm import Algorithm from lightphe.cryptosystems.RSA import RSA from lightphe.cryptosystems.ElGamal import ElGamal from lightphe.cryptosystems.Paillier import Paillier from lightphe.cryptosystems.DamgardJurik import DamgardJurik from lightphe.cryptosystems.OkamotoUchiyama import OkamotoUchiyama from lightphe.cryptosystems.Benaloh import Benaloh from lightphe.cryptosystems.NaccacheStern import NaccacheStern from lightphe.cryptosystems.GoldwasserMicali import GoldwasserMicali from lightphe.cryptosystems.EllipticCurveElGamal import EllipticCurveElGamal from lightphe.commons import phe_utils from lightphe.commons.logger import Logger
18,005
logger = Logger(module="lightphe/models/Ciphertext.py") # pylint: disable=too-few-public-methods, no-else-return class Ciphertext: def __init__(self, algorithm_name: str, keys: dict, value: Union[int, tuple, list]): self.algorithm_name = algorithm_name self.keys = keys self.value = value if algorithm_name == Algorithm.RSA: cs = RSA(keys=keys) elif algorithm_name == Algorithm.ElGamal: cs = ElGamal(keys=keys) elif algorithm_name == Algorithm.ExponentialElGamal: cs = ElGamal(keys=keys, exponential=True) elif algorithm_name == Algorithm.EllipticCurveElGamal: cs = EllipticCurveElGamal(keys=keys) elif algorithm_name == Algorithm.Paillier: cs = Paillier(keys=keys) elif algorithm_name == Algorithm.DamgardJurik: cs = DamgardJurik(keys=keys) elif algorithm_name == Algorithm.OkamotoUchiyama: cs = OkamotoUchiyama(keys=keys) elif algorithm_name == Algorithm.Benaloh: cs = Benaloh(keys=keys) elif algorithm_name == Algorithm.NaccacheStern: cs = NaccacheStern(keys=keys) elif algorithm_name == Algorithm.GoldwasserMicali: cs = GoldwasserMicali(keys=keys) else: raise ValueError(f"unimplemented algorithm - {algorithm_name}") self.cs: Homomorphic = cs def __str__(self) -> str: return f"Ciphertext({self.value})" def __repr__(self) -> str: return f"Ciphertext({self.value})" def __add__(self, other: "Ciphertext") -> "Ciphertext": """ Perform homomorphic addition methods Args: other (Ciperhtext): some other ciphertext Returns: ciphertext (Ciphertext): homomorphic addition of ciphertext """ if self.cs.keys.get("public_key") is None: raise ValueError("You must have public key to perform homomorphic addition") result = self.cs.add(ciphertext1=self.value, ciphertext2=other.value) return Ciphertext(algorithm_name=self.algorithm_name, keys=self.keys, value=result) def __mul__(self, other: Union["Ciphertext", int, float]) -> "Ciphertext": """ Perform homomorphic multiplication or multiply a ciphertext with a known constant Args: other (int | float | Ciphertext): a known plain constant of some other ciphertext Returns homomorphic multiplication of ciphertexts | scalar multiplication of ciphertext """ if self.cs.keys.get("public_key") is None: raise ValueError("You must have public key to perform homomorphic multiplication") if isinstance(other, Ciphertext): # Handle multiplication with another EncryptedObject result = self.cs.multiply(ciphertext1=self.value, ciphertext2=other.value) elif isinstance(other, int): result = self.cs.multiply_by_contant(ciphertext=self.value, constant=other) elif isinstance(other, float):
logger = Logger(module="lightphe/models/Ciphertext.py") # pylint: disable=too-few-public-methods, no-else-return class Ciphertext: def __init__(self, algorithm_name: str, keys: dict, value: Union[int, tuple, list]): self.algorithm_name = algorithm_name self.keys = keys self.value = value if algorithm_name == Algorithm.RSA: cs = RSA(keys=keys) elif algorithm_name == Algorithm.ElGamal: cs = ElGamal(keys=keys) elif algorithm_name == Algorithm.ExponentialElGamal: cs = ElGamal(keys=keys, exponential=True) elif algorithm_name == Algorithm.EllipticCurveElGamal: cs = EllipticCurveElGamal(keys=keys) elif algorithm_name == Algorithm.Paillier: cs = Paillier(keys=keys) elif algorithm_name == Algorithm.DamgardJurik: cs = DamgardJurik(keys=keys) elif algorithm_name == Algorithm.OkamotoUchiyama: cs = OkamotoUchiyama(keys=keys) elif algorithm_name == Algorithm.Benaloh: cs = Benaloh(keys=keys) elif algorithm_name == Algorithm.NaccacheStern: cs = NaccacheStern(keys=keys) elif algorithm_name == Algorithm.GoldwasserMicali: cs = GoldwasserMicali(keys=keys) else: raise ValueError(f"unimplemented algorithm - {algorithm_name}") self.cs: Homomorphic = cs def __str__(self) -> str: return f"Ciphertext({self.value})" def __repr__(self) -> str: return f"Ciphertext({self.value})" def __add__(self, other: "Ciphertext") -> "Ciphertext": """ Perform homomorphic addition methods Args: other (Ciperhtext): some other ciphertext Returns: ciphertext (Ciphertext): homomorphic addition of ciphertext """ if self.cs.keys.get("public_key") is None: raise ValueError("You must have public key to perform homomorphic addition") result = self.cs.add(ciphertext1=self.value, ciphertext2=other.value) return Ciphertext(algorithm_name=self.algorithm_name, keys=self.keys, value=result) def __mul__(self, other: Union["Ciphertext", int, float]) -> "Ciphertext": """ Perform homomorphic multiplication or multiply a ciphertext with a known constant Args: other (int | float | Ciphertext): a known plain constant of some other ciphertext Returns homomorphic multiplication of ciphertexts | scalar multiplication of ciphertext """ if self.cs.keys.get("public_key") is None: raise ValueError("You must have public key to perform homomorphic multiplication") if isinstance(other, Ciphertext): # Handle multiplication with another EncryptedObject result = self.cs.multiply(ciphertext1=self.value, ciphertext2=other.value) elif isinstance(other, int): result = self.cs.multiply_by_contant(ciphertext=self.value, constant=other) elif isinstance(other, float):
constant = phe_utils.parse_int(value=other, modulo=self.cs.plaintext_modulo)
11
2023-10-28 14:57:59+00:00
24k
chenran-li/RQL-release
stable_baselines3/sac_residual/sac_residual.py
[ { "identifier": "ReplayBuffer", "path": "stable_baselines3/common/buffers.py", "snippet": "class ReplayBuffer(BaseBuffer):\n \"\"\"\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n \"\"\"\n\n def __init__(\n self,\n buffer_size: int,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n device: Union[th.device, str] = \"auto\",\n n_envs: int = 1,\n optimize_memory_usage: bool = False,\n handle_timeout_termination: bool = True,\n ):\n super().__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)\n\n # Adjust buffer size\n self.buffer_size = max(buffer_size // n_envs, 1)\n\n # Check that the replay buffer can fit into the memory\n if psutil is not None:\n mem_available = psutil.virtual_memory().available\n\n # there is a bug if both optimize_memory_usage and handle_timeout_termination are true\n # see https://github.com/DLR-RM/stable-baselines3/issues/934\n if optimize_memory_usage and handle_timeout_termination:\n raise ValueError(\n \"ReplayBuffer does not support optimize_memory_usage = True \"\n \"and handle_timeout_termination = True simultaneously.\"\n )\n self.optimize_memory_usage = optimize_memory_usage\n\n self.observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype)\n\n if optimize_memory_usage:\n # `observations` contains also the next observation\n self.next_observations = None\n else:\n self.next_observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype)\n\n self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=action_space.dtype)\n\n self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n # Handle timeouts termination properly if needed\n # see https://github.com/DLR-RM/stable-baselines3/issues/284\n self.handle_timeout_termination = handle_timeout_termination\n self.timeouts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n\n if psutil is not None:\n total_memory_usage = self.observations.nbytes + self.actions.nbytes + self.rewards.nbytes + self.dones.nbytes\n\n if self.next_observations is not None:\n total_memory_usage += self.next_observations.nbytes\n\n if total_memory_usage > mem_available:\n # Convert to GB\n total_memory_usage /= 1e9\n mem_available /= 1e9\n warnings.warn(\n \"This system does not have apparently enough memory to store the complete \"\n f\"replay buffer {total_memory_usage:.2f}GB > {mem_available:.2f}GB\"\n )\n\n def add(\n self,\n obs: np.ndarray,\n next_obs: np.ndarray,\n action: np.ndarray,\n reward: np.ndarray,\n done: np.ndarray,\n infos: List[Dict[str, Any]],\n ) -> None:\n\n # Reshape needed when using multiple envs with discrete observations\n # as numpy cannot broadcast (n_discrete,) to (n_discrete, 1)\n if isinstance(self.observation_space, spaces.Discrete):\n obs = obs.reshape((self.n_envs,) + self.obs_shape)\n next_obs = next_obs.reshape((self.n_envs,) + self.obs_shape)\n\n # Same, for actions\n action = action.reshape((self.n_envs, self.action_dim))\n\n # Copy to avoid modification by reference\n self.observations[self.pos] = np.array(obs).copy()\n\n if self.optimize_memory_usage:\n self.observations[(self.pos + 1) % self.buffer_size] = np.array(next_obs).copy()\n else:\n self.next_observations[self.pos] = np.array(next_obs).copy()\n\n self.actions[self.pos] = np.array(action).copy()\n self.rewards[self.pos] = np.array(reward).copy()\n self.dones[self.pos] = np.array(done).copy()\n\n if self.handle_timeout_termination:\n self.timeouts[self.pos] = np.array([info.get(\"TimeLimit.truncated\", False) for info in infos])\n\n self.pos += 1\n if self.pos == self.buffer_size:\n self.full = True\n self.pos = 0\n\n def sample(self, batch_size: int, env: Optional[VecNormalize] = None) -> ReplayBufferSamples:\n \"\"\"\n Sample elements from the replay buffer.\n Custom sampling when using memory efficient variant,\n as we should not sample the element with index `self.pos`\n See https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n\n :param batch_size: Number of element to sample\n :param env: associated gym VecEnv\n to normalize the observations/rewards when sampling\n :return:\n \"\"\"\n if not self.optimize_memory_usage:\n return super().sample(batch_size=batch_size, env=env)\n # Do not sample the element with index `self.pos` as the transitions is invalid\n # (we use only one array to store `obs` and `next_obs`)\n if self.full:\n batch_inds = (np.random.randint(1, self.buffer_size, size=batch_size) + self.pos) % self.buffer_size\n else:\n batch_inds = np.random.randint(0, self.pos, size=batch_size)\n return self._get_samples(batch_inds, env=env)\n\n def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None) -> ReplayBufferSamples:\n # Sample randomly the env idx\n env_indices = np.random.randint(0, high=self.n_envs, size=(len(batch_inds),))\n\n if self.optimize_memory_usage:\n next_obs = self._normalize_obs(self.observations[(batch_inds + 1) % self.buffer_size, env_indices, :], env)\n else:\n next_obs = self._normalize_obs(self.next_observations[batch_inds, env_indices, :], env)\n\n data = (\n self._normalize_obs(self.observations[batch_inds, env_indices, :], env),\n self.actions[batch_inds, env_indices, :],\n next_obs,\n # Only use dones that are not due to timeouts\n # deactivated by default (timeouts is initialized as an array of False)\n (self.dones[batch_inds, env_indices] * (1 - self.timeouts[batch_inds, env_indices])).reshape(-1, 1),\n self._normalize_reward(self.rewards[batch_inds, env_indices].reshape(-1, 1), env),\n )\n return ReplayBufferSamples(*tuple(map(self.to_torch, data)))" }, { "identifier": "ActionNoise", "path": "stable_baselines3/common/noise.py", "snippet": "class ActionNoise(ABC):\n \"\"\"\n The action noise base class\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n\n def reset(self) -> None:\n \"\"\"\n call end of episode reset for the noise\n \"\"\"\n pass\n\n @abstractmethod\n def __call__(self) -> np.ndarray:\n raise NotImplementedError()" }, { "identifier": "OffPolicyAlgorithm", "path": "stable_baselines3/common/off_policy_algorithm.py", "snippet": "class OffPolicyAlgorithm(BaseAlgorithm):\n \"\"\"\n The base for Off-Policy algorithms (ex: SAC/TD3)\n\n :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)\n :param env: The environment to learn from\n (if registered in Gym, can be str. Can be None for loading trained models)\n :param learning_rate: learning rate for the optimizer,\n it can be a function of the current progress remaining (from 1 to 0)\n :param buffer_size: size of the replay buffer\n :param learning_starts: how many steps of the model to collect transitions for before learning starts\n :param batch_size: Minibatch size for each gradient update\n :param tau: the soft update coefficient (\"Polyak update\", between 0 and 1)\n :param gamma: the discount factor\n :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit\n like ``(5, \"step\")`` or ``(2, \"episode\")``.\n :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)\n Set to ``-1`` means to do as many gradient steps as steps done in the environment\n during the rollout.\n :param action_noise: the action noise type (None by default), this can help\n for hard exploration problem. Cf common.noise for the different action noise type.\n :param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``).\n If ``None``, it will be automatically selected.\n :param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation.\n :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n :param policy_kwargs: Additional arguments to be passed to the policy on creation\n :param tensorboard_log: the log location for tensorboard (if None, no logging)\n :param verbose: Verbosity level: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for\n debug messages\n :param device: Device on which the code should run.\n By default, it will try to use a Cuda compatible device and fallback to cpu\n if it is not possible.\n :param support_multi_env: Whether the algorithm supports training\n with multiple environments (as in A2C)\n :param monitor_wrapper: When creating an environment, whether to wrap it\n or not in a Monitor wrapper.\n :param seed: Seed for the pseudo random generators\n :param use_sde: Whether to use State Dependent Exploration (SDE)\n instead of action noise exploration (default: False)\n :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE\n Default: -1 (only sample at the beginning of the rollout)\n :param use_sde_at_warmup: Whether to use gSDE instead of uniform sampling\n during the warm up phase (before learning starts)\n :param sde_support: Whether the model support gSDE or not\n :param supported_action_spaces: The action spaces supported by the algorithm.\n \"\"\"\n\n def __init__(\n self,\n policy: Union[str, Type[BasePolicy]],\n env: Union[GymEnv, str],\n learning_rate: Union[float, Schedule],\n buffer_size: int = 1_000_000, # 1e6\n learning_starts: int = 100,\n batch_size: int = 256,\n tau: float = 0.005,\n gamma: float = 0.99,\n train_freq: Union[int, Tuple[int, str]] = (1, \"step\"),\n gradient_steps: int = 1,\n action_noise: Optional[ActionNoise] = None,\n replay_buffer_class: Optional[Type[ReplayBuffer]] = None,\n replay_buffer_kwargs: Optional[Dict[str, Any]] = None,\n optimize_memory_usage: bool = False,\n policy_kwargs: Optional[Dict[str, Any]] = None,\n tensorboard_log: Optional[str] = None,\n verbose: int = 0,\n device: Union[th.device, str] = \"auto\",\n support_multi_env: bool = False,\n monitor_wrapper: bool = True,\n seed: Optional[int] = None,\n use_sde: bool = False,\n sde_sample_freq: int = -1,\n use_sde_at_warmup: bool = False,\n sde_support: bool = True,\n supported_action_spaces: Optional[Tuple[spaces.Space, ...]] = None,\n ):\n\n super().__init__(\n policy=policy,\n env=env,\n learning_rate=learning_rate,\n policy_kwargs=policy_kwargs,\n tensorboard_log=tensorboard_log,\n verbose=verbose,\n device=device,\n support_multi_env=support_multi_env,\n monitor_wrapper=monitor_wrapper,\n seed=seed,\n use_sde=use_sde,\n sde_sample_freq=sde_sample_freq,\n supported_action_spaces=supported_action_spaces,\n )\n self.buffer_size = buffer_size\n self.batch_size = batch_size\n self.learning_starts = learning_starts\n self.tau = tau\n self.gamma = gamma\n self.gradient_steps = gradient_steps\n self.action_noise = action_noise\n self.optimize_memory_usage = optimize_memory_usage\n self.replay_buffer_class = replay_buffer_class\n if replay_buffer_kwargs is None:\n replay_buffer_kwargs = {}\n self.replay_buffer_kwargs = replay_buffer_kwargs\n self._episode_storage = None\n\n # Save train freq parameter, will be converted later to TrainFreq object\n self.train_freq = train_freq\n\n self.actor = None # type: Optional[th.nn.Module]\n self.replay_buffer = None # type: Optional[ReplayBuffer]\n # Update policy keyword arguments\n if sde_support:\n self.policy_kwargs[\"use_sde\"] = self.use_sde\n # For gSDE only\n self.use_sde_at_warmup = use_sde_at_warmup\n\n def _convert_train_freq(self) -> None:\n \"\"\"\n Convert `train_freq` parameter (int or tuple)\n to a TrainFreq object.\n \"\"\"\n if not isinstance(self.train_freq, TrainFreq):\n train_freq = self.train_freq\n\n # The value of the train frequency will be checked later\n if not isinstance(train_freq, tuple):\n train_freq = (train_freq, \"step\")\n\n try:\n train_freq = (train_freq[0], TrainFrequencyUnit(train_freq[1]))\n except ValueError as e:\n raise ValueError(\n f\"The unit of the `train_freq` must be either 'step' or 'episode' not '{train_freq[1]}'!\"\n ) from e\n\n if not isinstance(train_freq[0], int):\n raise ValueError(f\"The frequency of `train_freq` must be an integer and not {train_freq[0]}\")\n\n self.train_freq = TrainFreq(*train_freq)\n\n def _setup_model(self) -> None:\n self._setup_lr_schedule()\n self.set_random_seed(self.seed)\n\n # Use DictReplayBuffer if needed\n if self.replay_buffer_class is None:\n if isinstance(self.observation_space, spaces.Dict):\n self.replay_buffer_class = DictReplayBuffer\n else:\n self.replay_buffer_class = ReplayBuffer\n\n elif self.replay_buffer_class == HerReplayBuffer:\n assert self.env is not None, \"You must pass an environment when using `HerReplayBuffer`\"\n\n # If using offline sampling, we need a classic replay buffer too\n if self.replay_buffer_kwargs.get(\"online_sampling\", True):\n replay_buffer = None\n else:\n replay_buffer = DictReplayBuffer(\n self.buffer_size,\n self.observation_space,\n self.action_space,\n device=self.device,\n optimize_memory_usage=self.optimize_memory_usage,\n )\n\n self.replay_buffer = HerReplayBuffer(\n self.env,\n self.buffer_size,\n device=self.device,\n replay_buffer=replay_buffer,\n **self.replay_buffer_kwargs,\n )\n\n if self.replay_buffer is None:\n self.replay_buffer = self.replay_buffer_class(\n self.buffer_size,\n self.observation_space,\n self.action_space,\n device=self.device,\n n_envs=self.n_envs,\n optimize_memory_usage=self.optimize_memory_usage,\n **self.replay_buffer_kwargs,\n )\n\n self.policy = self.policy_class( # pytype:disable=not-instantiable\n self.observation_space,\n self.action_space,\n self.lr_schedule,\n **self.policy_kwargs, # pytype:disable=not-instantiable\n )\n self.policy = self.policy.to(self.device)\n\n # Convert train freq parameter to TrainFreq object\n self._convert_train_freq()\n\n def save_replay_buffer(self, path: Union[str, pathlib.Path, io.BufferedIOBase]) -> None:\n \"\"\"\n Save the replay buffer as a pickle file.\n\n :param path: Path to the file where the replay buffer should be saved.\n if path is a str or pathlib.Path, the path is automatically created if necessary.\n \"\"\"\n assert self.replay_buffer is not None, \"The replay buffer is not defined\"\n save_to_pkl(path, self.replay_buffer, self.verbose)\n\n def load_replay_buffer(\n self,\n path: Union[str, pathlib.Path, io.BufferedIOBase],\n truncate_last_traj: bool = True,\n ) -> None:\n \"\"\"\n Load a replay buffer from a pickle file.\n\n :param path: Path to the pickled replay buffer.\n :param truncate_last_traj: When using ``HerReplayBuffer`` with online sampling:\n If set to ``True``, we assume that the last trajectory in the replay buffer was finished\n (and truncate it).\n If set to ``False``, we assume that we continue the same trajectory (same episode).\n \"\"\"\n self.replay_buffer = load_from_pkl(path, self.verbose)\n assert isinstance(self.replay_buffer, ReplayBuffer), \"The replay buffer must inherit from ReplayBuffer class\"\n\n # Backward compatibility with SB3 < 2.1.0 replay buffer\n # Keep old behavior: do not handle timeout termination separately\n if not hasattr(self.replay_buffer, \"handle_timeout_termination\"): # pragma: no cover\n self.replay_buffer.handle_timeout_termination = False\n self.replay_buffer.timeouts = np.zeros_like(self.replay_buffer.dones)\n\n if isinstance(self.replay_buffer, HerReplayBuffer):\n assert self.env is not None, \"You must pass an environment at load time when using `HerReplayBuffer`\"\n self.replay_buffer.set_env(self.get_env())\n if truncate_last_traj:\n self.replay_buffer.truncate_last_trajectory()\n\n def _setup_learn(\n self,\n total_timesteps: int,\n callback: MaybeCallback = None,\n reset_num_timesteps: bool = True,\n tb_log_name: str = \"run\",\n progress_bar: bool = False,\n ) -> Tuple[int, BaseCallback]:\n \"\"\"\n cf `BaseAlgorithm`.\n \"\"\"\n # Prevent continuity issue by truncating trajectory\n # when using memory efficient replay buffer\n # see https://github.com/DLR-RM/stable-baselines3/issues/46\n\n # Special case when using HerReplayBuffer,\n # the classic replay buffer is inside it when using offline sampling\n if isinstance(self.replay_buffer, HerReplayBuffer):\n replay_buffer = self.replay_buffer.replay_buffer\n else:\n replay_buffer = self.replay_buffer\n\n truncate_last_traj = (\n self.optimize_memory_usage\n and reset_num_timesteps\n and replay_buffer is not None\n and (replay_buffer.full or replay_buffer.pos > 0)\n )\n\n if truncate_last_traj:\n warnings.warn(\n \"The last trajectory in the replay buffer will be truncated, \"\n \"see https://github.com/DLR-RM/stable-baselines3/issues/46.\"\n \"You should use `reset_num_timesteps=False` or `optimize_memory_usage=False`\"\n \"to avoid that issue.\"\n )\n # Go to the previous index\n pos = (replay_buffer.pos - 1) % replay_buffer.buffer_size\n replay_buffer.dones[pos] = True\n\n return super()._setup_learn(\n total_timesteps,\n callback,\n reset_num_timesteps,\n tb_log_name,\n progress_bar,\n )\n\n def learn(\n self: SelfOffPolicyAlgorithm,\n total_timesteps: int,\n callback: MaybeCallback = None,\n log_interval: int = 4,\n tb_log_name: str = \"run\",\n reset_num_timesteps: bool = True,\n progress_bar: bool = False,\n ) -> SelfOffPolicyAlgorithm:\n\n total_timesteps, callback = self._setup_learn(\n total_timesteps,\n callback,\n reset_num_timesteps,\n tb_log_name,\n progress_bar,\n )\n\n callback.on_training_start(locals(), globals())\n\n while self.num_timesteps < total_timesteps:\n rollout = self.collect_rollouts(\n self.env,\n train_freq=self.train_freq,\n action_noise=self.action_noise,\n callback=callback,\n learning_starts=self.learning_starts,\n replay_buffer=self.replay_buffer,\n log_interval=log_interval,\n )\n\n if rollout.continue_training is False:\n break\n\n if self.num_timesteps > 0 and self.num_timesteps > self.learning_starts:\n # If no `gradient_steps` is specified,\n # do as many gradients steps as steps performed during the rollout\n gradient_steps = self.gradient_steps if self.gradient_steps >= 0 else rollout.episode_timesteps\n # Special case when the user passes `gradient_steps=0`\n if gradient_steps > 0:\n self.train(batch_size=self.batch_size, gradient_steps=gradient_steps)\n\n callback.on_training_end()\n\n return self\n\n def train(self, gradient_steps: int, batch_size: int) -> None:\n \"\"\"\n Sample the replay buffer and do the updates\n (gradient descent and update target networks)\n \"\"\"\n raise NotImplementedError()\n\n def _sample_action(\n self,\n learning_starts: int,\n action_noise: Optional[ActionNoise] = None,\n n_envs: int = 1,\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Sample an action according to the exploration policy.\n This is either done by sampling the probability distribution of the policy,\n or sampling a random action (from a uniform distribution over the action space)\n or by adding noise to the deterministic output.\n\n :param action_noise: Action noise that will be used for exploration\n Required for deterministic policy (e.g. TD3). This can also be used\n in addition to the stochastic policy for SAC.\n :param learning_starts: Number of steps before learning for the warm-up phase.\n :param n_envs:\n :return: action to take in the environment\n and scaled action that will be stored in the replay buffer.\n The two differs when the action space is not normalized (bounds are not [-1, 1]).\n \"\"\"\n # Select action randomly or according to policy\n if self.num_timesteps < learning_starts and not (self.use_sde and self.use_sde_at_warmup):\n # Warmup phase\n unscaled_action = np.array([self.action_space.sample() for _ in range(n_envs)])\n else:\n # Note: when using continuous actions,\n # we assume that the policy uses tanh to scale the action\n # We use non-deterministic action in the case of SAC, for TD3, it does not matter\n unscaled_action, _ = self.predict(self._last_obs, deterministic=False)\n\n # Rescale the action from [low, high] to [-1, 1]\n if isinstance(self.action_space, spaces.Box):\n scaled_action = self.policy.scale_action(unscaled_action)\n\n # Add noise to the action (improve exploration)\n if action_noise is not None:\n scaled_action = np.clip(scaled_action + action_noise(), -1, 1)\n\n # We store the scaled action in the buffer\n buffer_action = scaled_action\n action = self.policy.unscale_action(scaled_action)\n else:\n # Discrete case, no need to normalize or clip\n buffer_action = unscaled_action\n action = buffer_action\n return action, buffer_action\n\n def _dump_logs(self) -> None:\n \"\"\"\n Write log.\n \"\"\"\n time_elapsed = max((time.time_ns() - self.start_time) / 1e9, sys.float_info.epsilon)\n fps = int((self.num_timesteps - self._num_timesteps_at_start) / time_elapsed)\n self.logger.record(\"time/episodes\", self._episode_num, exclude=\"tensorboard\")\n if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0:\n self.logger.record(\"rollout/ep_rew_mean\", safe_mean([ep_info[\"r\"] for ep_info in self.ep_info_buffer]))\n self.logger.record(\"rollout/ep_len_mean\", safe_mean([ep_info[\"l\"] for ep_info in self.ep_info_buffer]))\n self.logger.record(\"time/fps\", fps)\n self.logger.record(\"time/time_elapsed\", int(time_elapsed), exclude=\"tensorboard\")\n self.logger.record(\"time/total_timesteps\", self.num_timesteps, exclude=\"tensorboard\")\n if self.use_sde:\n self.logger.record(\"train/std\", (self.actor.get_std()).mean().item())\n\n if len(self.ep_success_buffer) > 0:\n self.logger.record(\"rollout/success_rate\", safe_mean(self.ep_success_buffer))\n # Pass the number of timesteps for tensorboard\n self.logger.dump(step=self.num_timesteps)\n\n def _on_step(self) -> None:\n \"\"\"\n Method called after each step in the environment.\n It is meant to trigger DQN target network update\n but can be used for other purposes\n \"\"\"\n pass\n\n def _store_transition(\n self,\n replay_buffer: ReplayBuffer,\n buffer_action: np.ndarray,\n new_obs: Union[np.ndarray, Dict[str, np.ndarray]],\n reward: np.ndarray,\n dones: np.ndarray,\n infos: List[Dict[str, Any]],\n ) -> None:\n \"\"\"\n Store transition in the replay buffer.\n We store the normalized action and the unnormalized observation.\n It also handles terminal observations (because VecEnv resets automatically).\n\n :param replay_buffer: Replay buffer object where to store the transition.\n :param buffer_action: normalized action\n :param new_obs: next observation in the current episode\n or first observation of the episode (when dones is True)\n :param reward: reward for the current transition\n :param dones: Termination signal\n :param infos: List of additional information about the transition.\n It may contain the terminal observations and information about timeout.\n \"\"\"\n # Store only the unnormalized version\n if self._vec_normalize_env is not None:\n new_obs_ = self._vec_normalize_env.get_original_obs()\n reward_ = self._vec_normalize_env.get_original_reward()\n else:\n # Avoid changing the original ones\n self._last_original_obs, new_obs_, reward_ = self._last_obs, new_obs, reward\n\n # Avoid modification by reference\n next_obs = deepcopy(new_obs_)\n # As the VecEnv resets automatically, new_obs is already the\n # first observation of the next episode\n for i, done in enumerate(dones):\n if done and infos[i].get(\"terminal_observation\") is not None:\n if isinstance(next_obs, dict):\n next_obs_ = infos[i][\"terminal_observation\"]\n # VecNormalize normalizes the terminal observation\n if self._vec_normalize_env is not None:\n next_obs_ = self._vec_normalize_env.unnormalize_obs(next_obs_)\n # Replace next obs for the correct envs\n for key in next_obs.keys():\n next_obs[key][i] = next_obs_[key]\n else:\n next_obs[i] = infos[i][\"terminal_observation\"]\n # VecNormalize normalizes the terminal observation\n if self._vec_normalize_env is not None:\n next_obs[i] = self._vec_normalize_env.unnormalize_obs(next_obs[i, :])\n\n replay_buffer.add(\n self._last_original_obs,\n next_obs,\n buffer_action,\n reward_,\n dones,\n infos,\n )\n\n self._last_obs = new_obs\n # Save the unnormalized observation\n if self._vec_normalize_env is not None:\n self._last_original_obs = new_obs_\n\n def collect_rollouts(\n self,\n env: VecEnv,\n callback: BaseCallback,\n train_freq: TrainFreq,\n replay_buffer: ReplayBuffer,\n action_noise: Optional[ActionNoise] = None,\n learning_starts: int = 0,\n log_interval: Optional[int] = None,\n ) -> RolloutReturn:\n \"\"\"\n Collect experiences and store them into a ``ReplayBuffer``.\n\n :param env: The training environment\n :param callback: Callback that will be called at each step\n (and at the beginning and end of the rollout)\n :param train_freq: How much experience to collect\n by doing rollouts of current policy.\n Either ``TrainFreq(<n>, TrainFrequencyUnit.STEP)``\n or ``TrainFreq(<n>, TrainFrequencyUnit.EPISODE)``\n with ``<n>`` being an integer greater than 0.\n :param action_noise: Action noise that will be used for exploration\n Required for deterministic policy (e.g. TD3). This can also be used\n in addition to the stochastic policy for SAC.\n :param learning_starts: Number of steps before learning for the warm-up phase.\n :param replay_buffer:\n :param log_interval: Log data every ``log_interval`` episodes\n :return:\n \"\"\"\n # Switch to eval mode (this affects batch norm / dropout)\n self.policy.set_training_mode(False)\n\n num_collected_steps, num_collected_episodes = 0, 0\n\n assert isinstance(env, VecEnv), \"You must pass a VecEnv\"\n assert train_freq.frequency > 0, \"Should at least collect one step or episode.\"\n\n if env.num_envs > 1:\n assert train_freq.unit == TrainFrequencyUnit.STEP, \"You must use only one env when doing episodic training.\"\n\n # Vectorize action noise if needed\n if action_noise is not None and env.num_envs > 1 and not isinstance(action_noise, VectorizedActionNoise):\n action_noise = VectorizedActionNoise(action_noise, env.num_envs)\n\n if self.use_sde:\n self.actor.reset_noise(env.num_envs)\n\n callback.on_rollout_start()\n continue_training = True\n\n while should_collect_more_steps(train_freq, num_collected_steps, num_collected_episodes):\n if self.use_sde and self.sde_sample_freq > 0 and num_collected_steps % self.sde_sample_freq == 0:\n # Sample a new noise matrix\n self.actor.reset_noise(env.num_envs)\n\n # Select action randomly or according to policy\n actions, buffer_actions = self._sample_action(learning_starts, action_noise, env.num_envs)\n\n # Rescale and perform action\n new_obs, rewards, dones, infos = env.step(actions)\n\n self.num_timesteps += env.num_envs\n num_collected_steps += 1\n\n # Give access to local variables\n callback.update_locals(locals())\n # Only stop training if return value is False, not when it is None.\n if callback.on_step() is False:\n return RolloutReturn(num_collected_steps * env.num_envs, num_collected_episodes, continue_training=False)\n\n # Retrieve reward and episode length if using Monitor wrapper\n self._update_info_buffer(infos, dones)\n\n # Store data in replay buffer (normalized action and unnormalized observation)\n self._store_transition(replay_buffer, buffer_actions, new_obs, rewards, dones, infos)\n\n self._update_current_progress_remaining(self.num_timesteps, self._total_timesteps)\n\n # For DQN, check if the target network should be updated\n # and update the exploration schedule\n # For SAC/TD3, the update is dones as the same time as the gradient update\n # see https://github.com/hill-a/stable-baselines/issues/900\n self._on_step()\n\n for idx, done in enumerate(dones):\n if done:\n # Update stats\n num_collected_episodes += 1\n self._episode_num += 1\n\n if action_noise is not None:\n kwargs = dict(indices=[idx]) if env.num_envs > 1 else {}\n action_noise.reset(**kwargs)\n\n # Log training infos\n if log_interval is not None and self._episode_num % log_interval == 0:\n self._dump_logs()\n callback.on_rollout_end()\n\n return RolloutReturn(num_collected_steps * env.num_envs, num_collected_episodes, continue_training)" }, { "identifier": "BasePolicy", "path": "stable_baselines3/common/policies.py", "snippet": "class BasePolicy(BaseModel, ABC):\n \"\"\"The base policy object.\n\n Parameters are mostly the same as `BaseModel`; additions are documented below.\n\n :param args: positional arguments passed through to `BaseModel`.\n :param kwargs: keyword arguments passed through to `BaseModel`.\n :param squash_output: For continuous actions, whether the output is squashed\n or not using a ``tanh()`` function.\n \"\"\"\n\n def __init__(self, *args, squash_output: bool = False, **kwargs):\n super().__init__(*args, **kwargs)\n self._squash_output = squash_output\n\n @staticmethod\n def _dummy_schedule(progress_remaining: float) -> float:\n \"\"\"(float) Useful for pickling policy.\"\"\"\n del progress_remaining\n return 0.0\n\n @property\n def squash_output(self) -> bool:\n \"\"\"(bool) Getter for squash_output.\"\"\"\n return self._squash_output\n\n @staticmethod\n def init_weights(module: nn.Module, gain: float = 1) -> None:\n \"\"\"\n Orthogonal initialization (used in PPO and A2C)\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Conv2d)):\n nn.init.orthogonal_(module.weight, gain=gain)\n if module.bias is not None:\n module.bias.data.fill_(0.0)\n\n @abstractmethod\n def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:\n \"\"\"\n Get the action according to the policy for a given observation.\n\n By default provides a dummy implementation -- not all BasePolicy classes\n implement this, e.g. if they are a Critic in an Actor-Critic method.\n\n :param observation:\n :param deterministic: Whether to use stochastic or deterministic actions\n :return: Taken action according to the policy\n \"\"\"\n\n def predict(\n self,\n observation: Union[np.ndarray, Dict[str, np.ndarray]],\n state: Optional[Tuple[np.ndarray, ...]] = None,\n episode_start: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:\n \"\"\"\n Get the policy action from an observation (and optional hidden state).\n Includes sugar-coating to handle different observations (e.g. normalizing images).\n\n :param observation: the input observation\n :param state: The last hidden states (can be None, used in recurrent policies)\n :param episode_start: The last masks (can be None, used in recurrent policies)\n this correspond to beginning of episodes,\n where the hidden states of the RNN must be reset.\n :param deterministic: Whether or not to return deterministic actions.\n :return: the model's action and the next hidden state\n (used in recurrent policies)\n \"\"\"\n # TODO (GH/1): add support for RNN policies\n # if state is None:\n # state = self.initial_state\n # if episode_start is None:\n # episode_start = [False for _ in range(self.n_envs)]\n # Switch to eval mode (this affects batch norm / dropout)\n self.set_training_mode(False)\n\n observation, vectorized_env = self.obs_to_tensor(observation)\n\n with th.no_grad():\n actions = self._predict(observation, deterministic=deterministic)\n # Convert to numpy, and reshape to the original action shape\n actions = actions.cpu().numpy().reshape((-1,) + self.action_space.shape)\n\n if isinstance(self.action_space, spaces.Box):\n if self.squash_output:\n # Rescale to proper domain when using squashing\n actions = self.unscale_action(actions)\n else:\n # Actions could be on arbitrary scale, so clip the actions to avoid\n # out of bound error (e.g. if sampling from a Gaussian distribution)\n actions = np.clip(actions, self.action_space.low, self.action_space.high)\n\n # Remove batch dimension if needed\n if not vectorized_env:\n actions = actions.squeeze(axis=0)\n\n return actions, state\n\n def scale_action(self, action: np.ndarray) -> np.ndarray:\n \"\"\"\n Rescale the action from [low, high] to [-1, 1]\n (no need for symmetric action space)\n\n :param action: Action to scale\n :return: Scaled action\n \"\"\"\n low, high = self.action_space.low, self.action_space.high\n return 2.0 * ((action - low) / (high - low)) - 1.0\n\n def unscale_action(self, scaled_action: np.ndarray) -> np.ndarray:\n \"\"\"\n Rescale the action from [-1, 1] to [low, high]\n (no need for symmetric action space)\n\n :param scaled_action: Action to un-scale\n \"\"\"\n low, high = self.action_space.low, self.action_space.high\n return low + (0.5 * (scaled_action + 1.0) * (high - low))" }, { "identifier": "GymEnv", "path": "stable_baselines3/common/type_aliases.py", "snippet": "class RolloutBufferSamples(NamedTuple):\nclass DictRolloutBufferSamples(NamedTuple):\nclass ReplayBufferSamples(NamedTuple):\nclass DictReplayBufferSamples(NamedTuple):\nclass RolloutReturn(NamedTuple):\nclass TrainFrequencyUnit(Enum):\nclass TrainFreq(NamedTuple):\nclass PolicyPredictor(Protocol):\n STEP = \"step\"\n EPISODE = \"episode\"\n def predict(\n self,\n observation: Union[np.ndarray, Dict[str, np.ndarray]],\n state: Optional[Tuple[np.ndarray, ...]] = None,\n episode_start: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:" }, { "identifier": "get_parameters_by_name", "path": "stable_baselines3/common/utils.py", "snippet": "def get_parameters_by_name(model: th.nn.Module, included_names: Iterable[str]) -> List[th.Tensor]:\n \"\"\"\n Extract parameters from the state dict of ``model``\n if the name contains one of the strings in ``included_names``.\n\n :param model: the model where the parameters come from.\n :param included_names: substrings of names to include.\n :return: List of parameters values (Pytorch tensors)\n that matches the queried names.\n \"\"\"\n return [param for name, param in model.state_dict().items() if any([key in name for key in included_names])]" }, { "identifier": "polyak_update", "path": "stable_baselines3/common/utils.py", "snippet": "def polyak_update(\n params: Iterable[th.Tensor],\n target_params: Iterable[th.Tensor],\n tau: float,\n) -> None:\n \"\"\"\n Perform a Polyak average update on ``target_params`` using ``params``:\n target parameters are slowly updated towards the main parameters.\n ``tau``, the soft update coefficient controls the interpolation:\n ``tau=1`` corresponds to copying the parameters to the target ones whereas nothing happens when ``tau=0``.\n The Polyak update is done in place, with ``no_grad``, and therefore does not create intermediate tensors,\n or a computation graph, reducing memory cost and improving performance. We scale the target params\n by ``1-tau`` (in-place), add the new weights, scaled by ``tau`` and store the result of the sum in the target\n params (in place).\n See https://github.com/DLR-RM/stable-baselines3/issues/93\n\n :param params: parameters to use to update the target params\n :param target_params: parameters to update\n :param tau: the soft update coefficient (\"Polyak update\", between 0 and 1)\n \"\"\"\n with th.no_grad():\n # zip does not raise an exception if length of parameters does not match.\n for param, target_param in zip_strict(params, target_params):\n target_param.data.mul_(1 - tau)\n th.add(target_param.data, param.data, alpha=tau, out=target_param.data)" }, { "identifier": "ResidualCnnPolicy", "path": "stable_baselines3/sac_residual/policies.py", "snippet": "LOG_STD_MAX = 2\nLOG_STD_MIN = -20\nclass ResidualSACPolicy(SACPolicy):\nclass ResidualCnnPolicy(ResidualSACPolicy):\nclass ResidualMultiInputPolicy(ResidualSACPolicy):\n def __init__(\n self,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n lr_schedule: Schedule,\n net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None,\n activation_fn: Type[nn.Module] = nn.ReLU,\n use_sde: bool = False,\n log_std_init: float = -3,\n use_expln: bool = False,\n clip_mean: float = 2.0,\n features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n n_critics: int = 2,\n share_features_extractor: bool = False,\n ):\n def __init__(\n self,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n lr_schedule: Schedule,\n net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None,\n activation_fn: Type[nn.Module] = nn.ReLU,\n use_sde: bool = False,\n log_std_init: float = -3,\n use_expln: bool = False,\n clip_mean: float = 2.0,\n features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n n_critics: int = 2,\n share_features_extractor: bool = False,\n ):\n def __init__(\n self,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n lr_schedule: Schedule,\n net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None,\n activation_fn: Type[nn.Module] = nn.ReLU,\n use_sde: bool = False,\n log_std_init: float = -3,\n use_expln: bool = False,\n clip_mean: float = 2.0,\n features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n n_critics: int = 2,\n share_features_extractor: bool = False,\n ):" }, { "identifier": "SAC", "path": "stable_baselines3/sac/sac.py", "snippet": "class SAC(OffPolicyAlgorithm):\n \"\"\"\n Soft Actor-Critic (SAC)\n Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor,\n This implementation borrows code from original implementation (https://github.com/haarnoja/sac)\n from OpenAI Spinning Up (https://github.com/openai/spinningup), from the softlearning repo\n (https://github.com/rail-berkeley/softlearning/)\n and from Stable Baselines (https://github.com/hill-a/stable-baselines)\n Paper: https://arxiv.org/abs/1801.01290\n Introduction to SAC: https://spinningup.openai.com/en/latest/algorithms/sac.html\n\n Note: we use double q target and not value target as discussed\n in https://github.com/hill-a/stable-baselines/issues/270\n\n :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)\n :param env: The environment to learn from (if registered in Gym, can be str)\n :param learning_rate: learning rate for adam optimizer,\n the same learning rate will be used for all networks (Q-Values, Actor and Value function)\n it can be a function of the current progress remaining (from 1 to 0)\n :param buffer_size: size of the replay buffer\n :param learning_starts: how many steps of the model to collect transitions for before learning starts\n :param batch_size: Minibatch size for each gradient update\n :param tau: the soft update coefficient (\"Polyak update\", between 0 and 1)\n :param gamma: the discount factor\n :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit\n like ``(5, \"step\")`` or ``(2, \"episode\")``.\n :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)\n Set to ``-1`` means to do as many gradient steps as steps done in the environment\n during the rollout.\n :param action_noise: the action noise type (None by default), this can help\n for hard exploration problem. Cf common.noise for the different action noise type.\n :param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``).\n If ``None``, it will be automatically selected.\n :param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation.\n :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n :param ent_coef: Entropy regularization coefficient. (Equivalent to\n inverse of reward scale in the original SAC paper.) Controlling exploration/exploitation trade-off.\n Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value)\n :param target_update_interval: update the target network every ``target_network_update_freq``\n gradient steps.\n :param target_entropy: target entropy when learning ``ent_coef`` (``ent_coef = 'auto'``)\n :param use_sde: Whether to use generalized State Dependent Exploration (gSDE)\n instead of action noise exploration (default: False)\n :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE\n Default: -1 (only sample at the beginning of the rollout)\n :param use_sde_at_warmup: Whether to use gSDE instead of uniform sampling\n during the warm up phase (before learning starts)\n :param policy_kwargs: additional arguments to be passed to the policy on creation\n :param verbose: Verbosity level: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for\n debug messages\n :param seed: Seed for the pseudo random generators\n :param device: Device (cpu, cuda, ...) on which the code should be run.\n Setting it to auto, the code will be run on the GPU if possible.\n :param _init_setup_model: Whether or not to build the network at the creation of the instance\n \"\"\"\n\n policy_aliases: Dict[str, Type[BasePolicy]] = {\n \"MlpPolicy\": MlpPolicy,\n \"CnnPolicy\": CnnPolicy,\n \"MultiInputPolicy\": MultiInputPolicy,\n }\n\n def __init__(\n self,\n policy: Union[str, Type[SACPolicy]],\n env: Union[GymEnv, str],\n learning_rate: Union[float, Schedule] = 3e-4,\n buffer_size: int = 1_000_000, # 1e6\n learning_starts: int = 100,\n batch_size: int = 256,\n tau: float = 0.005,\n gamma: float = 0.99,\n train_freq: Union[int, Tuple[int, str]] = 1,\n gradient_steps: int = 1,\n action_noise: Optional[ActionNoise] = None,\n replay_buffer_class: Optional[Type[ReplayBuffer]] = None,\n replay_buffer_kwargs: Optional[Dict[str, Any]] = None,\n optimize_memory_usage: bool = False,\n ent_coef: Union[str, float] = \"auto\",\n target_update_interval: int = 1,\n target_entropy: Union[str, float] = \"auto\",\n use_sde: bool = False,\n sde_sample_freq: int = -1,\n use_sde_at_warmup: bool = False,\n tensorboard_log: Optional[str] = None,\n policy_kwargs: Optional[Dict[str, Any]] = None,\n verbose: int = 0,\n seed: Optional[int] = None,\n device: Union[th.device, str] = \"auto\",\n _init_setup_model: bool = True,\n ):\n\n super().__init__(\n policy,\n env,\n learning_rate,\n buffer_size,\n learning_starts,\n batch_size,\n tau,\n gamma,\n train_freq,\n gradient_steps,\n action_noise,\n replay_buffer_class=replay_buffer_class,\n replay_buffer_kwargs=replay_buffer_kwargs,\n policy_kwargs=policy_kwargs,\n tensorboard_log=tensorboard_log,\n verbose=verbose,\n device=device,\n seed=seed,\n use_sde=use_sde,\n sde_sample_freq=sde_sample_freq,\n use_sde_at_warmup=use_sde_at_warmup,\n optimize_memory_usage=optimize_memory_usage,\n supported_action_spaces=(spaces.Box),\n support_multi_env=True,\n )\n\n self.target_entropy = target_entropy\n self.log_ent_coef = None # type: Optional[th.Tensor]\n # Entropy coefficient / Entropy temperature\n # Inverse of the reward scale\n self.ent_coef = ent_coef\n self.target_update_interval = target_update_interval\n self.ent_coef_optimizer = None\n\n if _init_setup_model:\n self._setup_model()\n\n def _setup_model(self) -> None:\n super()._setup_model()\n self._create_aliases()\n # Running mean and running var\n self.batch_norm_stats = get_parameters_by_name(self.critic, [\"running_\"])\n self.batch_norm_stats_target = get_parameters_by_name(self.critic_target, [\"running_\"])\n # Target entropy is used when learning the entropy coefficient\n if self.target_entropy == \"auto\":\n # automatically set target entropy if needed\n self.target_entropy = -np.prod(self.env.action_space.shape).astype(np.float32)\n else:\n # Force conversion\n # this will also throw an error for unexpected string\n self.target_entropy = float(self.target_entropy)\n\n # The entropy coefficient or entropy can be learned automatically\n # see Automating Entropy Adjustment for Maximum Entropy RL section\n # of https://arxiv.org/abs/1812.05905\n if isinstance(self.ent_coef, str) and self.ent_coef.startswith(\"auto\"):\n # Default initial value of ent_coef when learned\n init_value = 1.0\n if \"_\" in self.ent_coef:\n init_value = float(self.ent_coef.split(\"_\")[1])\n assert init_value > 0.0, \"The initial value of ent_coef must be greater than 0\"\n\n # Note: we optimize the log of the entropy coeff which is slightly different from the paper\n # as discussed in https://github.com/rail-berkeley/softlearning/issues/37\n self.log_ent_coef = th.log(th.ones(1, device=self.device) * init_value).requires_grad_(True)\n self.ent_coef_optimizer = th.optim.Adam([self.log_ent_coef], lr=self.lr_schedule(1))\n else:\n # Force conversion to float\n # this will throw an error if a malformed string (different from 'auto')\n # is passed\n self.ent_coef_tensor = th.tensor(float(self.ent_coef), device=self.device)\n\n def _create_aliases(self) -> None:\n self.actor = self.policy.actor\n self.critic = self.policy.critic\n self.critic_target = self.policy.critic_target\n\n def train(self, gradient_steps: int, batch_size: int = 64) -> None:\n # Switch to train mode (this affects batch norm / dropout)\n self.policy.set_training_mode(True)\n # Update optimizers learning rate\n optimizers = [self.actor.optimizer, self.critic.optimizer]\n if self.ent_coef_optimizer is not None:\n optimizers += [self.ent_coef_optimizer]\n\n # Update learning rate according to lr schedule\n self._update_learning_rate(optimizers)\n\n ent_coef_losses, ent_coefs = [], []\n actor_losses, critic_losses = [], []\n\n for gradient_step in range(gradient_steps):\n # Sample replay buffer\n replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)\n\n # We need to sample because `log_std` may have changed between two gradient steps\n if self.use_sde:\n self.actor.reset_noise()\n\n # Action by the current actor for the sampled state\n actions_pi, log_prob = self.actor.action_log_prob(replay_data.observations)\n log_prob = log_prob.reshape(-1, 1)\n\n ent_coef_loss = None\n if self.ent_coef_optimizer is not None:\n # Important: detach the variable from the graph\n # so we don't change it with other losses\n # see https://github.com/rail-berkeley/softlearning/issues/60\n ent_coef = th.exp(self.log_ent_coef.detach())\n ent_coef_loss = -(self.log_ent_coef * (log_prob + self.target_entropy).detach()).mean()\n ent_coef_losses.append(ent_coef_loss.item())\n else:\n ent_coef = self.ent_coef_tensor\n\n ent_coefs.append(ent_coef.item())\n\n # Optimize entropy coefficient, also called\n # entropy temperature or alpha in the paper\n if ent_coef_loss is not None:\n self.ent_coef_optimizer.zero_grad()\n ent_coef_loss.backward()\n self.ent_coef_optimizer.step()\n\n with th.no_grad():\n # Select action according to policy\n next_actions, next_log_prob = self.actor.action_log_prob(replay_data.next_observations)\n # Compute the next Q values: min over all critics targets\n next_q_values = th.cat(self.critic_target(replay_data.next_observations, next_actions), dim=1)\n next_q_values, _ = th.min(next_q_values, dim=1, keepdim=True)\n # add entropy term\n next_q_values = next_q_values - ent_coef * next_log_prob.reshape(-1, 1)\n # td error + entropy term\n target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values\n\n # Get current Q-values estimates for each critic network\n # using action from the replay buffer\n current_q_values = self.critic(replay_data.observations, replay_data.actions)\n\n # Compute critic loss\n critic_loss = 0.5 * sum(F.mse_loss(current_q, target_q_values) for current_q in current_q_values)\n critic_losses.append(critic_loss.item())\n\n # Optimize the critic\n self.critic.optimizer.zero_grad()\n critic_loss.backward()\n self.critic.optimizer.step()\n\n # Compute actor loss\n # Alternative: actor_loss = th.mean(log_prob - qf1_pi)\n # Min over all critic networks\n q_values_pi = th.cat(self.critic(replay_data.observations, actions_pi), dim=1)\n min_qf_pi, _ = th.min(q_values_pi, dim=1, keepdim=True)\n actor_loss = (ent_coef * log_prob - min_qf_pi).mean()\n actor_losses.append(actor_loss.item())\n\n # Optimize the actor\n self.actor.optimizer.zero_grad()\n actor_loss.backward()\n self.actor.optimizer.step()\n\n # Update target networks\n if gradient_step % self.target_update_interval == 0:\n polyak_update(self.critic.parameters(), self.critic_target.parameters(), self.tau)\n # Copy running stats, see GH issue #996\n polyak_update(self.batch_norm_stats, self.batch_norm_stats_target, 1.0)\n\n self._n_updates += gradient_steps\n\n self.logger.record(\"train/n_updates\", self._n_updates, exclude=\"tensorboard\")\n self.logger.record(\"train/ent_coef\", np.mean(ent_coefs))\n self.logger.record(\"train/actor_loss\", np.mean(actor_losses))\n self.logger.record(\"train/critic_loss\", np.mean(critic_losses))\n if len(ent_coef_losses) > 0:\n self.logger.record(\"train/ent_coef_loss\", np.mean(ent_coef_losses))\n\n def learn(\n self: SelfSAC,\n total_timesteps: int,\n callback: MaybeCallback = None,\n log_interval: int = 4,\n tb_log_name: str = \"SAC\",\n reset_num_timesteps: bool = True,\n progress_bar: bool = False,\n ) -> SelfSAC:\n\n return super().learn(\n total_timesteps=total_timesteps,\n callback=callback,\n log_interval=log_interval,\n tb_log_name=tb_log_name,\n reset_num_timesteps=reset_num_timesteps,\n progress_bar=progress_bar,\n )\n\n def _excluded_save_params(self) -> List[str]:\n return super()._excluded_save_params() + [\"actor\", \"critic\", \"critic_target\"]\n\n def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:\n state_dicts = [\"policy\", \"actor.optimizer\", \"critic.optimizer\"]\n if self.ent_coef_optimizer is not None:\n saved_pytorch_variables = [\"log_ent_coef\"]\n state_dicts.append(\"ent_coef_optimizer\")\n else:\n saved_pytorch_variables = [\"ent_coef_tensor\"]\n return state_dicts, saved_pytorch_variables" } ]
from typing import Any, Dict, List, Optional, Tuple, Type, TypeVar, Union from gym import spaces from torch.nn import functional as F from stable_baselines3.common.buffers import ReplayBuffer from stable_baselines3.common.noise import ActionNoise from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm from stable_baselines3.common.policies import BasePolicy from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule from stable_baselines3.common.utils import get_parameters_by_name, polyak_update from stable_baselines3.sac_residual.policies import ResidualCnnPolicy, ResidualMlpPolicy, ResidualMultiInputPolicy, ResidualSACPolicy from stable_baselines3.sac.sac import SAC import numpy as np import torch as th
17,049
target_update_interval=target_update_interval, target_entropy=target_entropy, use_sde=use_sde, sde_sample_freq=sde_sample_freq, use_sde_at_warmup=use_sde_at_warmup, tensorboard_log=tensorboard_log, policy_kwargs=policy_kwargs, verbose=verbose, seed=seed, device=device, _init_setup_model=_init_setup_model, ) def _setup_model(self) -> None: super()._setup_model() self.policy.prior_model = SAC.load(self.prior_model_path,env=self.env) self.policy.prior_model.policy.set_training_mode(False) # freeze prior model parameters def train(self, gradient_steps: int, batch_size: int = 64) -> None: # Switch to train mode (this affects batch norm / dropout) self.policy.set_training_mode(True) # Update optimizers learning rate optimizers = [self.actor.optimizer, self.critic.optimizer] if self.ent_coef_optimizer is not None: optimizers += [self.ent_coef_optimizer] # Update learning rate according to lr schedule self._update_learning_rate(optimizers) ent_coef_losses, ent_coefs = [], [] actor_losses, critic_losses = [], [] for gradient_step in range(gradient_steps): # Sample replay buffer replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env) # We need to sample because `log_std` may have changed between two gradient steps if self.use_sde: self.actor.reset_noise() # Action by the current actor for the sampled state actions_pi, log_prob = self.actor.action_log_prob(replay_data.observations) log_prob = log_prob.reshape(-1, 1) # SAC and GAIL prior policy prior_log_prob = self.policy.prior_model.actor.give_action_log_prob(replay_data.observations, actions_pi) prior_log_prob = prior_log_prob.reshape(-1, 1) ent_coef_loss = None if self.ent_coef_optimizer is not None: # Important: detach the variable from the graph # so we don't change it with other losses # see https://github.com/rail-berkeley/softlearning/issues/60 ent_coef = th.exp(self.log_ent_coef.detach()) ent_coef_loss = -(self.log_ent_coef * (log_prob + self.target_entropy).detach()).mean() ent_coef_losses.append(ent_coef_loss.item()) else: ent_coef = self.ent_coef_tensor ent_coefs.append(ent_coef.item()) # pre reward normalize to current reward. ent_coef_prior = self.policy.prior_model.ent_coef if not isinstance(self.policy.prior_model.ent_coef, str) else ent_coef if ent_coef_prior == 0: ent_coef_prior = ent_coef if self.num_timesteps < self.warmstarting_num_timesteps: ent_coef_prior = self.warmstarting_scale * ent_coef_prior # Optimize entropy coefficient, also called # entropy temperature or alpha in the paper if ent_coef_loss is not None: self.ent_coef_optimizer.zero_grad() ent_coef_loss.backward() self.ent_coef_optimizer.step() with th.no_grad(): # Select action according to policy next_actions, next_log_prob = self.actor.action_log_prob(replay_data.next_observations) next_log_prob = next_log_prob.reshape(-1, 1) # SAC and GAIL next_prior_log_prob = self.policy.prior_model.actor.give_action_log_prob(replay_data.next_observations, next_actions) next_prior_log_prob = next_prior_log_prob.reshape(-1, 1) # Compute the next Q values: min over all critics targets next_q_values = th.cat(self.critic_target(replay_data.next_observations, next_actions), dim=1) next_q_values, _ = th.min(next_q_values, dim=1, keepdim=True) # add entropy term and prior policy logprob next_q_values = next_q_values + ent_coef_prior * next_prior_log_prob - ent_coef * next_log_prob # td error + entropy term target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values # Get current Q-values estimates for each critic network # using action from the replay buffer current_q_values = self.critic(replay_data.observations, replay_data.actions) # Compute critic loss critic_loss = 0.5 * sum(F.mse_loss(current_q, target_q_values) for current_q in current_q_values) critic_losses.append(critic_loss.item()) # Optimize the critic self.critic.optimizer.zero_grad() critic_loss.backward() self.critic.optimizer.step() # Compute actor loss # Alternative: actor_loss = th.mean(log_prob - qf1_pi) # Min over all critic networks q_values_pi = th.cat(self.critic(replay_data.observations, actions_pi), dim=1) min_qf_pi, _ = th.min(q_values_pi, dim=1, keepdim=True) actor_loss = (ent_coef * log_prob - min_qf_pi - ent_coef_prior * prior_log_prob).mean() actor_losses.append(actor_loss.item()) # Optimize the actor self.actor.optimizer.zero_grad() actor_loss.backward() self.actor.optimizer.step() # Update target networks if gradient_step % self.target_update_interval == 0:
SelfResidualSAC = TypeVar("SelfResidualSAC", bound="ResidualSAC") class ResidualSAC(SAC): """ Residual Soft Actor-Critic (SAC) Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor, This implementation borrows code from original implementation (https://github.com/haarnoja/sac) from OpenAI Spinning Up (https://github.com/openai/spinningup), from the softlearning repo (https://github.com/rail-berkeley/softlearning/) and from Stable Baselines (https://github.com/hill-a/stable-baselines) Paper: https://arxiv.org/abs/1801.01290 Introduction to SAC: https://spinningup.openai.com/en/latest/algorithms/sac.html Note: we use double q target and not value target as discussed in https://github.com/hill-a/stable-baselines/issues/270 :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...) :param env: The environment to learn from (if registered in Gym, can be str) :param learning_rate: learning rate for adam optimizer, the same learning rate will be used for all networks (Q-Values, Actor and Value function) it can be a function of the current progress remaining (from 1 to 0) :param buffer_size: size of the replay buffer :param learning_starts: how many steps of the model to collect transitions for before learning starts :param batch_size: Minibatch size for each gradient update :param tau: the soft update coefficient ("Polyak update", between 0 and 1) :param gamma: the discount factor :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit like ``(5, "step")`` or ``(2, "episode")``. :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``) Set to ``-1`` means to do as many gradient steps as steps done in the environment during the rollout. :param action_noise: the action noise type (None by default), this can help for hard exploration problem. Cf common.noise for the different action noise type. :param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``). If ``None``, it will be automatically selected. :param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation. :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer at a cost of more complexity. See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 :param ent_coef: Entropy regularization coefficient. (Equivalent to inverse of reward scale in the original SAC paper.) Controlling exploration/exploitation trade-off. Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value) :param target_update_interval: update the target network every ``target_network_update_freq`` gradient steps. :param target_entropy: target entropy when learning ``ent_coef`` (``ent_coef = 'auto'``) :param use_sde: Whether to use generalized State Dependent Exploration (gSDE) instead of action noise exploration (default: False) :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE Default: -1 (only sample at the beginning of the rollout) :param use_sde_at_warmup: Whether to use gSDE instead of uniform sampling during the warm up phase (before learning starts) :param policy_kwargs: additional arguments to be passed to the policy on creation :param verbose: Verbosity level: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for debug messages :param seed: Seed for the pseudo random generators :param device: Device (cpu, cuda, ...) on which the code should be run. Setting it to auto, the code will be run on the GPU if possible. :param _init_setup_model: Whether or not to build the network at the creation of the instance """ policy_aliases: Dict[str, Type[BasePolicy]] = { "MlpPolicy": ResidualMlpPolicy, "CnnPolicy": ResidualCnnPolicy, "MultiInputPolicy": ResidualMultiInputPolicy, } def __init__( self, policy: Union[str, Type[ResidualSACPolicy]], env: Union[GymEnv, str], prior_model_path: str, learning_rate: Union[float, Schedule] = 3e-4, buffer_size: int = 1_000_000, # 1e6 learning_starts: int = 100, batch_size: int = 256, tau: float = 0.005, gamma: float = 0.99, train_freq: Union[int, Tuple[int, str]] = 1, gradient_steps: int = 1, warmstarting_num_timesteps: int = 0, warmstarting_scale: int = 10, action_noise: Optional[ActionNoise] = None, replay_buffer_class: Optional[Type[ReplayBuffer]] = None, replay_buffer_kwargs: Optional[Dict[str, Any]] = None, optimize_memory_usage: bool = False, ent_coef: Union[str, float] = "auto", target_update_interval: int = 1, target_entropy: Union[str, float] = "auto", use_sde: bool = False, sde_sample_freq: int = -1, use_sde_at_warmup: bool = False, tensorboard_log: Optional[str] = None, policy_kwargs: Optional[Dict[str, Any]] = None, verbose: int = 0, seed: Optional[int] = None, device: Union[th.device, str] = "auto", _init_setup_model: bool = True, ): self.warmstarting_num_timesteps = warmstarting_num_timesteps self.warmstarting_scale = warmstarting_scale self.prior_model_path = prior_model_path super().__init__( policy, env, learning_rate, buffer_size, learning_starts, batch_size, tau, gamma, train_freq, gradient_steps, action_noise, replay_buffer_class=replay_buffer_class, replay_buffer_kwargs=replay_buffer_kwargs, optimize_memory_usage=optimize_memory_usage, ent_coef=ent_coef, target_update_interval=target_update_interval, target_entropy=target_entropy, use_sde=use_sde, sde_sample_freq=sde_sample_freq, use_sde_at_warmup=use_sde_at_warmup, tensorboard_log=tensorboard_log, policy_kwargs=policy_kwargs, verbose=verbose, seed=seed, device=device, _init_setup_model=_init_setup_model, ) def _setup_model(self) -> None: super()._setup_model() self.policy.prior_model = SAC.load(self.prior_model_path,env=self.env) self.policy.prior_model.policy.set_training_mode(False) # freeze prior model parameters def train(self, gradient_steps: int, batch_size: int = 64) -> None: # Switch to train mode (this affects batch norm / dropout) self.policy.set_training_mode(True) # Update optimizers learning rate optimizers = [self.actor.optimizer, self.critic.optimizer] if self.ent_coef_optimizer is not None: optimizers += [self.ent_coef_optimizer] # Update learning rate according to lr schedule self._update_learning_rate(optimizers) ent_coef_losses, ent_coefs = [], [] actor_losses, critic_losses = [], [] for gradient_step in range(gradient_steps): # Sample replay buffer replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env) # We need to sample because `log_std` may have changed between two gradient steps if self.use_sde: self.actor.reset_noise() # Action by the current actor for the sampled state actions_pi, log_prob = self.actor.action_log_prob(replay_data.observations) log_prob = log_prob.reshape(-1, 1) # SAC and GAIL prior policy prior_log_prob = self.policy.prior_model.actor.give_action_log_prob(replay_data.observations, actions_pi) prior_log_prob = prior_log_prob.reshape(-1, 1) ent_coef_loss = None if self.ent_coef_optimizer is not None: # Important: detach the variable from the graph # so we don't change it with other losses # see https://github.com/rail-berkeley/softlearning/issues/60 ent_coef = th.exp(self.log_ent_coef.detach()) ent_coef_loss = -(self.log_ent_coef * (log_prob + self.target_entropy).detach()).mean() ent_coef_losses.append(ent_coef_loss.item()) else: ent_coef = self.ent_coef_tensor ent_coefs.append(ent_coef.item()) # pre reward normalize to current reward. ent_coef_prior = self.policy.prior_model.ent_coef if not isinstance(self.policy.prior_model.ent_coef, str) else ent_coef if ent_coef_prior == 0: ent_coef_prior = ent_coef if self.num_timesteps < self.warmstarting_num_timesteps: ent_coef_prior = self.warmstarting_scale * ent_coef_prior # Optimize entropy coefficient, also called # entropy temperature or alpha in the paper if ent_coef_loss is not None: self.ent_coef_optimizer.zero_grad() ent_coef_loss.backward() self.ent_coef_optimizer.step() with th.no_grad(): # Select action according to policy next_actions, next_log_prob = self.actor.action_log_prob(replay_data.next_observations) next_log_prob = next_log_prob.reshape(-1, 1) # SAC and GAIL next_prior_log_prob = self.policy.prior_model.actor.give_action_log_prob(replay_data.next_observations, next_actions) next_prior_log_prob = next_prior_log_prob.reshape(-1, 1) # Compute the next Q values: min over all critics targets next_q_values = th.cat(self.critic_target(replay_data.next_observations, next_actions), dim=1) next_q_values, _ = th.min(next_q_values, dim=1, keepdim=True) # add entropy term and prior policy logprob next_q_values = next_q_values + ent_coef_prior * next_prior_log_prob - ent_coef * next_log_prob # td error + entropy term target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values # Get current Q-values estimates for each critic network # using action from the replay buffer current_q_values = self.critic(replay_data.observations, replay_data.actions) # Compute critic loss critic_loss = 0.5 * sum(F.mse_loss(current_q, target_q_values) for current_q in current_q_values) critic_losses.append(critic_loss.item()) # Optimize the critic self.critic.optimizer.zero_grad() critic_loss.backward() self.critic.optimizer.step() # Compute actor loss # Alternative: actor_loss = th.mean(log_prob - qf1_pi) # Min over all critic networks q_values_pi = th.cat(self.critic(replay_data.observations, actions_pi), dim=1) min_qf_pi, _ = th.min(q_values_pi, dim=1, keepdim=True) actor_loss = (ent_coef * log_prob - min_qf_pi - ent_coef_prior * prior_log_prob).mean() actor_losses.append(actor_loss.item()) # Optimize the actor self.actor.optimizer.zero_grad() actor_loss.backward() self.actor.optimizer.step() # Update target networks if gradient_step % self.target_update_interval == 0:
polyak_update(self.critic.parameters(), self.critic_target.parameters(), self.tau)
6
2023-10-28 01:09:21+00:00
24k
pytabular-ai/auto-scikit-dl
utils/model.py
[ { "identifier": "MLP", "path": "models/mlp.py", "snippet": "class MLP(TabModel):\n def __init__(\n self,\n model_config: dict,\n n_num_features: int,\n categories: ty.Optional[ty.List[int]],\n n_labels: int,\n device: ty.Union[str, torch.device] = 'cuda',\n ):\n super().__init__()\n model_config = self.preproc_config(model_config)\n self.model = _MLP(\n d_in=n_num_features,\n categories=categories,\n d_out=n_labels,\n **model_config\n ).to(device)\n self.base_name = 'mlp'\n self.device = torch.device(device)\n \n def preproc_config(self, model_config: dict):\n \"\"\"MLP config preprocessing\"\"\"\n # process mlp configs\n self.saved_model_config = model_config.copy()\n d_layers = []\n n_layers, first_dim, mid_dim, last_dim = \\\n (\n model_config.pop('n_layers'), model_config.pop('first_dim'),\n model_config.pop('mid_dim'), model_config.pop('last_dim')\n )\n for i in range(n_layers):\n if i == 0:\n d_layers.append(first_dim)\n elif i == n_layers - 1 and n_layers > 1:\n d_layers.append(last_dim)\n else:\n d_layers.append(mid_dim)\n model_config['d_layers'] = d_layers\n return model_config\n\n def fit(\n self,\n # API for specical sampler like curriculum learning\n train_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal sampler if is None\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None,\n y_std: ty.Optional[float] = None, # for RMSE\n eval_set: ty.Tuple[torch.Tensor, np.ndarray] = None,\n patience: int = 0,\n task: str = None,\n training_args: dict = None,\n meta_args: ty.Optional[dict] = None,\n ):\n def train_step(model, x_num, x_cat, y): # input is X and y\n # process input (model-specific)\n # define your running time calculation\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time # don't forget backward time, calculate in outer loop\n return logits, used_time\n \n # to custom other training paradigm\n # 1. add self.dnn_fit2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_fit in abstract class\n self.dnn_fit( # uniform training paradigm\n dnn_fit_func=train_step,\n # training data\n train_loader=train_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std,\n # dev data\n eval_set=eval_set, patience=patience, task=task,\n # args\n training_args=training_args,\n meta_args=meta_args,\n )\n \n def predict(\n self,\n dev_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None, \n y_std: ty.Optional[float] = None, # for RMSE\n task: str = None,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: ty.Optional[dict] = None,\n ):\n def inference_step(model, x_num, x_cat): # input only X (y inaccessible)\n \"\"\"\n Inference Process\n `no_grad` will be applied in `dnn_predict'\n \"\"\"\n # process input (model-specific)\n # define your running time calculation\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other inference paradigm\n # 1. add self.dnn_predict2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_predict in abstract class\n return self.dnn_predict( # uniform training paradigm\n dnn_predict_func=inference_step,\n dev_loader=dev_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std, task=task,\n return_probs=return_probs, return_metric=return_metric, return_loss=return_loss,\n meta_args=meta_args\n )\n \n def save(self, output_dir):\n check_dir(output_dir)\n self.save_pt_model(output_dir)\n self.save_history(output_dir)\n self.save_config(output_dir)" }, { "identifier": "FTTransformer", "path": "models/ft_transformer.py", "snippet": "class FTTransformer(TabModel):\n def __init__(\n self,\n model_config: dict,\n n_num_features: int,\n categories: ty.Optional[ty.List[int]],\n n_labels: int,\n device: ty.Union[str, torch.device] = 'cuda',\n ):\n super().__init__()\n model_config = self.preproc_config(model_config)\n self.model = rtdl.FTTransformer.make_baseline(\n n_num_features=n_num_features,\n cat_cardinalities=categories,\n d_out=n_labels,\n **model_config\n ).to(device)\n self.base_name = 'ft-transformer'\n self.device = torch.device(device)\n \n def preproc_config(self, model_config: dict):\n self.saved_model_config = model_config.copy()\n # process ftt configs\n if 'ffn_d_factor' in model_config:\n model_config['ffn_d_hidden'] = \\\n int(model_config['d_token'] * model_config.pop('ffn_d_factor'))\n return model_config\n \n def fit(\n self,\n # API for specical sampler like curriculum learning\n train_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal sampler if is None\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None,\n y_std: ty.Optional[float] = None, # for RMSE\n eval_set: ty.Tuple[torch.Tensor, np.ndarray] = None,\n patience: int = 0,\n task: str = None,\n training_args: dict = None,\n meta_args: ty.Optional[dict] = None,\n ):\n def train_step(model, x_num, x_cat, y): # input is X and y\n # process input (model-specific)\n # define your running time calculation\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time # don't forget backward time, calculate in outer loop\n return logits, used_time\n \n # to custom other training paradigm\n # 1. add self.dnn_fit2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_fit in abstract class\n self.dnn_fit( # uniform training paradigm\n dnn_fit_func=train_step,\n # training data\n train_loader=train_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std,\n # dev data\n eval_set=eval_set, patience=patience, task=task,\n # args\n training_args=training_args,\n meta_args=meta_args,\n )\n \n def predict(\n self,\n dev_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None, \n y_std: ty.Optional[float] = None, # for RMSE\n task: str = None,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: ty.Optional[dict] = None,\n ):\n def inference_step(model, x_num, x_cat): # input only X (y inaccessible)\n \"\"\"\n Inference Process\n `no_grad` will be applied in `dnn_predict'\n \"\"\"\n # process input (model-specific)\n # define your running time calculation\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other inference paradigm\n # 1. add self.dnn_predict2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_predict in abstract class\n return self.dnn_predict( # uniform training paradigm\n dnn_predict_func=inference_step,\n dev_loader=dev_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std, task=task,\n return_probs=return_probs, return_metric=return_metric, return_loss=return_loss,\n meta_args=meta_args\n )\n \n def save(self, output_dir):\n check_dir(output_dir)\n self.save_pt_model(output_dir)\n self.save_history(output_dir)\n self.save_config(output_dir)" }, { "identifier": "AutoInt", "path": "models/autoint.py", "snippet": "class AutoInt(TabModel):\n def __init__(\n self,\n model_config: dict,\n n_num_features: int,\n categories: ty.Optional[ty.List[int]],\n n_labels: int,\n device: ty.Union[str, torch.device] = 'cuda',\n ):\n super().__init__()\n model_config = self.preproc_config(model_config)\n self.model = _AutoInt(\n d_numerical=n_num_features,\n categories=categories,\n d_out=n_labels,\n **model_config\n ).to(device)\n self.base_name = 'autoint'\n self.device = torch.device(device)\n \n def preproc_config(self, model_config: dict):\n # process autoint configs\n self.saved_model_config = model_config.copy()\n return model_config\n\n def fit(\n self,\n # API for specical sampler like curriculum learning\n train_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal sampler if is None\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None,\n y_std: ty.Optional[float] = None, # for RMSE\n eval_set: ty.Tuple[torch.Tensor, np.ndarray] = None,\n patience: int = 0,\n task: str = None,\n training_args: dict = None,\n meta_args: ty.Optional[dict] = None,\n ):\n def train_step(model, x_num, x_cat, y): # input is X and y\n # process input (model-specific)\n # define your model API\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other training paradigm\n # 1. add self.dnn_fit2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_fit in abstract class\n self.dnn_fit( # uniform training paradigm\n dnn_fit_func=train_step,\n # training data\n train_loader=train_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std,\n # dev data\n eval_set=eval_set, patience=patience, task=task,\n # args\n training_args=training_args,\n meta_args=meta_args,\n )\n \n def predict(\n self,\n dev_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None, \n y_std: ty.Optional[float] = None, # for RMSE\n task: str = None,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: ty.Optional[dict] = None,\n ):\n def inference_step(model, x_num, x_cat): # input only X (y inaccessible)\n \"\"\"\n Inference Process\n `no_grad` will be applied in `dnn_predict'\n \"\"\"\n # process input (model-specific)\n # define your model API\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other inference paradigm\n # 1. add self.dnn_predict2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_predict in abstract class\n return self.dnn_predict( # uniform training paradigm\n dnn_predict_func=inference_step,\n dev_loader=dev_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std, task=task,\n return_probs=return_probs, return_metric=return_metric, return_loss=return_loss,\n meta_args=meta_args,\n )\n \n def save(self, output_dir):\n check_dir(output_dir)\n self.save_pt_model(output_dir)\n self.save_history(output_dir)\n self.save_config(output_dir)" }, { "identifier": "DCNv2", "path": "models/dcnv2.py", "snippet": "class DCNv2(TabModel):\n def __init__(\n self,\n model_config: dict,\n n_num_features: int,\n categories: ty.Optional[ty.List[int]],\n n_labels: int,\n device: ty.Union[str, torch.device] = 'cuda',\n ):\n super().__init__()\n model_config = self.preproc_config(model_config)\n self.model = _DCNv2(\n d_in=n_num_features,\n categories=categories,\n d_out=n_labels,\n **model_config\n ).to(device)\n self.base_name = 'dcnv2'\n self.device = torch.device(device)\n \n def preproc_config(self, model_config: dict):\n # process autoint configs\n self.saved_model_config = model_config.copy()\n return model_config\n\n def fit(\n self,\n # API for specical sampler like curriculum learning\n train_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal sampler if is None\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None,\n y_std: ty.Optional[float] = None, # for RMSE\n eval_set: ty.Tuple[torch.Tensor, np.ndarray] = None,\n patience: int = 0,\n task: str = None,\n training_args: dict = None,\n meta_args: ty.Optional[dict] = None,\n ):\n def train_step(model, x_num, x_cat, y): # input is X and y\n # process input (model-specific)\n # define your model API\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other training paradigm\n # 1. add self.dnn_fit2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_fit in abstract class\n self.dnn_fit( # uniform training paradigm\n dnn_fit_func=train_step,\n # training data\n train_loader=train_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std,\n # dev data\n eval_set=eval_set, patience=patience, task=task,\n # args\n training_args=training_args,\n meta_args=meta_args,\n )\n \n def predict(\n self,\n dev_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None, \n y_std: ty.Optional[float] = None, # for RMSE\n task: str = None,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: ty.Optional[dict] = None,\n ):\n def inference_step(model, x_num, x_cat): # input only X (y inaccessible)\n \"\"\"\n Inference Process\n `no_grad` will be applied in `dnn_predict'\n \"\"\"\n # process input (model-specific)\n # define your model API\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other inference paradigm\n # 1. add self.dnn_predict2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_predict in abstract class\n return self.dnn_predict( # uniform training paradigm\n dnn_predict_func=inference_step,\n dev_loader=dev_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std, task=task,\n return_probs=return_probs, return_metric=return_metric, return_loss=return_loss,\n meta_args=meta_args\n )\n \n def save(self, output_dir):\n check_dir(output_dir)\n self.save_pt_model(output_dir)\n self.save_history(output_dir)\n self.save_config(output_dir)" }, { "identifier": "NODE", "path": "models/node_model.py", "snippet": "class NODE(TabModel):\n def __init__(\n self,\n model_config: dict,\n n_num_features: int,\n categories: ty.Optional[ty.List[int]],\n n_labels: int,\n device: ty.Union[str, torch.device] = 'cuda',\n ):\n super().__init__()\n model_config = self.preproc_config(model_config)\n self.model = _NODE(\n d_in=n_num_features,\n categories=categories,\n d_out=n_labels,\n tree_dim=n_labels,\n **model_config\n ).to(device)\n self.base_name = 'node'\n self.device = torch.device(device)\n \n def preproc_config(self, model_config: dict):\n # process autoint configs\n self.saved_model_config = model_config.copy()\n return model_config\n\n def fit(\n self,\n # API for specical sampler like curriculum learning\n train_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal sampler if is None\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None,\n y_std: ty.Optional[float] = None, # for RMSE\n eval_set: ty.Tuple[torch.Tensor, np.ndarray] = None,\n patience: int = 0,\n task: str = None,\n training_args: dict = None,\n meta_args: ty.Optional[dict] = None,\n ):\n def train_step(model, x_num, x_cat, y): # input is X and y\n # process input (model-specific)\n # define your model API\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other training paradigm\n # 1. add self.dnn_fit2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_fit in abstract class\n self.dnn_fit( # uniform training paradigm\n dnn_fit_func=train_step,\n # training data\n train_loader=train_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std,\n # dev data\n eval_set=eval_set, patience=patience, task=task,\n # args\n training_args=training_args,\n meta_args=meta_args,\n )\n \n def predict(\n self,\n dev_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None, \n y_std: ty.Optional[float] = None, # for RMSE\n task: str = None,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: ty.Optional[dict] = None,\n ):\n def inference_step(model, x_num, x_cat): # input only X (y inaccessible)\n \"\"\"\n Inference Process\n `no_grad` will be applied in `dnn_predict'\n \"\"\"\n # process input (model-specific)\n # define your running time calculation\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other inference paradigm\n # 1. add self.dnn_predict2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_predict in abstract class\n return self.dnn_predict( # uniform training paradigm\n dnn_predict_func=inference_step,\n dev_loader=dev_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std, task=task,\n return_probs=return_probs, return_metric=return_metric, return_loss=return_loss,\n meta_args=meta_args\n )\n \n def save(self, output_dir):\n check_dir(output_dir)\n self.save_pt_model(output_dir)\n self.save_history(output_dir)\n self.save_config(output_dir)" }, { "identifier": "TabModel", "path": "models/abstract.py", "snippet": "class TabModel(ABC):\n def __init__(self):\n self.model: Optional[nn.Module] = None # true model\n self.base_name = None # model type name\n self.device = None\n self.saved_model_config = None\n self.training_config = None\n self.meta_config = None\n self.post_init()\n\n def post_init(self):\n self.history = {\n 'train': {'loss': [], 'tot_time': 0, 'avg_step_time': 0, 'avg_epoch_time': 0}, \n 'val': {\n 'metric_name': None, 'metric': [], 'best_metric': None, \n 'log_loss': [], 'best_log_loss': None,\n 'best_epoch': None, 'best_step': None,\n 'tot_time': 0, 'avg_step_time': 0, 'avg_epoch_time': 0\n }, \n # 'test': {'loss': [], 'metric': [], 'final_metric': None},\n 'device': torch.cuda.get_device_name(),\n } # save metrics\n self.no_improvement = 0 # for dnn early stop\n \n def preproc_config(self, model_config: dict):\n \"\"\"default preprocessing for model configurations\"\"\"\n self.saved_model_config = model_config\n return model_config\n \n @abstractmethod\n def fit(\n self,\n X_num: Union[torch.Tensor, np.ndarray], \n X_cat: Union[torch.Tensor, np.ndarray], \n ys: Union[torch.Tensor, np.ndarray],\n y_std: Optional[float],\n eval_set: Optional[Tuple[Union[torch.Tensor, np.ndarray]]],\n patience: int,\n task: str,\n training_args: dict,\n meta_args: Optional[dict],\n ):\n \"\"\"\n Training Model with Early Stop(optional)\n load best weights at the end\n \"\"\"\n pass\n \n def dnn_fit(\n self,\n *,\n dnn_fit_func: Optional[DNN_FIT_API] = None,\n # API for specical sampler like curriculum learning\n train_loader: Optional[Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal dataloader sampler if is None\n X_num: Optional[torch.Tensor] = None, \n X_cat: Optional[torch.Tensor] = None, \n ys: Optional[torch.Tensor] = None,\n y_std: Optional[float] = None, # for RMSE\n eval_set: Tuple[torch.Tensor, np.ndarray] = None, # similar API as sk-learn\n patience: int = 0, # <= 0 without early stop\n task: str,\n training_args: dict,\n meta_args: Optional[dict] = None,\n ):\n # DONE: move to abstract class (dnn_fit)\n if dnn_fit_func is None:\n dnn_fit_func = default_dnn_fit\n # meta args\n if meta_args is None:\n meta_args = {}\n meta_args.setdefault('save_path', f'results/{self.base_name}')\n if not os.path.exists(meta_args['save_path']):\n print('create new results dir: ', meta_args['save_path'])\n os.makedirs(meta_args['save_path'])\n self.meta_config = meta_args\n # optimzier and scheduler\n training_args.setdefault('optimizer', 'adamw')\n optimizer, scheduler = TabModel.make_optimizer(self.model, training_args)\n # data loader\n training_args.setdefault('batch_size', 64)\n training_args.setdefault('ghost_batch_size', None)\n if train_loader is not None:\n train_loader, missing_idx = train_loader\n training_args['batch_size'] = train_loader.batch_size\n else:\n train_loader, missing_idx = TabModel.prepare_tensor_loader(\n X_num=X_num, X_cat=X_cat, ys=ys,\n batch_size=training_args['batch_size'],\n shuffle=True,\n )\n if eval_set is not None:\n eval_set = eval_set[0] # only use the first dev set\n dev_loader = TabModel.prepare_tensor_loader(\n X_num=eval_set[0], X_cat=eval_set[1], ys=eval_set[2],\n batch_size=training_args['batch_size'],\n )\n else:\n dev_loader = None\n # training loops\n training_args.setdefault('max_epochs', 1000)\n # training_args.setdefault('report_frequency', 100) # same as save_freq\n # training_args.setdefault('save_frequency', 100) # save per 100 steps\n training_args.setdefault('patience', patience)\n training_args.setdefault('save_frequency', 'epoch') # save per epoch\n self.training_config = training_args\n\n steps_per_backward = 1 if training_args['ghost_batch_size'] is None \\\n else training_args['batch_size'] // training_args['ghost_batch_size']\n steps_per_epoch = len(train_loader)\n tot_step, tot_time = 0, 0\n for e in range(training_args['max_epochs']):\n self.model.train()\n tot_loss = 0\n for step, batch in enumerate(train_loader):\n optimizer.zero_grad()\n x_num, x_cat, y = TabModel.parse_batch(batch, missing_idx, self.device)\n logits, forward_time = dnn_fit_func(self.model, x_num, x_cat, y)\n loss = TabModel.compute_loss(logits, y, task)\n # backward\n start_time = time.time()\n loss.backward()\n backward_time = time.time() - start_time\n self.gradient_policy()\n tot_time += forward_time + backward_time\n optimizer.step()\n if scheduler is not None:\n scheduler.step()\n # print or save infos\n tot_step += 1\n tot_loss += loss.cpu().item()\n if isinstance(training_args['save_frequency'], int) \\\n and tot_step % training_args['save_frequency'] == 0:\n is_early_stop = self.save_evaluate_dnn(\n tot_step, steps_per_epoch, \n tot_loss, tot_time,\n task, training_args['patience'], meta_args['save_path'],\n dev_loader, y_std,\n )\n if is_early_stop:\n self.save(meta_args['save_path'])\n self.load_best_dnn(meta_args['save_path'])\n return\n if training_args['save_frequency'] == 'epoch':\n if hasattr(self.model, 'layer_masks'):\n print('layer_mask: ', self.model.layer_masks > 0)\n is_early_stop = self.save_evaluate_dnn(\n tot_step, steps_per_epoch, \n tot_loss, tot_time,\n task, training_args['patience'], meta_args['save_path'],\n dev_loader, y_std,\n )\n if is_early_stop:\n self.save(meta_args['save_path'])\n self.load_best_dnn(meta_args['save_path'])\n return\n self.save(meta_args['save_path'])\n self.load_best_dnn(meta_args['save_path'])\n \n @abstractmethod\n def predict(\n self,\n dev_loader: Optional[DataLoader],\n X_num: Union[torch.Tensor, np.ndarray], \n X_cat: Union[torch.Tensor, np.ndarray], \n ys: Union[torch.Tensor, np.ndarray],\n y_std: Optional[float],\n task: str,\n return_probs: bool = True,\n return_metric: bool = True,\n return_loss: bool = True,\n meta_args: Optional[dict] = None,\n ):\n \"\"\"\n Prediction\n \"\"\"\n pass\n \n def dnn_predict(\n self,\n *,\n dnn_predict_func: Optional[DNN_PREDICT_API] = None,\n dev_loader: Optional[Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: Optional[torch.Tensor] = None, \n X_cat: Optional[torch.Tensor] = None, \n ys: Optional[torch.Tensor] = None, \n y_std: Optional[float] = None, # for RMSE\n task: str,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: Optional[dict] = None,\n ):\n # DONE: move to abstract class (dnn_predict)\n if dnn_predict_func is None:\n dnn_predict_func = default_dnn_predict\n if dev_loader is None:\n dev_loader, missing_idx = TabModel.prepare_tensor_loader(\n X_num=X_num, X_cat=X_cat, ys=ys,\n batch_size=128,\n )\n else:\n dev_loader, missing_idx = dev_loader\n # print(\"Evaluate...\")\n predictions, golds = [], []\n tot_time = 0\n self.model.eval()\n for batch in dev_loader:\n x_num, x_cat, y = TabModel.parse_batch(batch, missing_idx, self.device)\n with torch.no_grad():\n logits, used_time = dnn_predict_func(self.model, x_num, x_cat)\n tot_time += used_time\n predictions.append(logits)\n golds.append(y)\n self.model.train()\n predictions = torch.cat(predictions).squeeze(-1)\n golds = torch.cat(golds)\n if return_loss:\n loss = TabModel.compute_loss(predictions, golds, task).cpu().item()\n else:\n loss = None\n if return_probs and task != 'regression':\n predictions = (\n predictions.sigmoid()\n if task == 'binclass'\n else predictions.softmax(-1)\n )\n prediction_type = 'probs'\n elif task == 'regression':\n prediction_type = None\n else:\n prediction_type = 'logits'\n predictions = predictions.cpu().numpy()\n golds = golds.cpu().numpy()\n if return_metric:\n metric = TabModel.calculate_metric(\n golds, predictions,\n task, prediction_type, y_std\n )\n logloss = (\n log_loss(golds, np.stack([1-predictions, predictions], axis=1), labels=[0,1])\n if task == 'binclass'\n else log_loss(golds, predictions, labels=list(range(len(set(golds)))))\n if task == 'multiclass'\n else None\n )\n else:\n metric, logloss = None, None\n results = {'loss': loss, 'metric': metric, 'time': tot_time, 'log_loss': logloss}\n if meta_args is not None:\n self.save_prediction(meta_args['save_path'], results)\n return predictions, results\n \n def gradient_policy(self):\n \"\"\"For post porcess model gradient\"\"\"\n pass\n \n @abstractmethod\n def save(self, output_dir):\n \"\"\"\n Save model weights and configs,\n the following default save functions\n can be combined to override this function\n \"\"\"\n pass\n\n def save_pt_model(self, output_dir):\n print('saving pt model weights...')\n # save model params\n torch.save(self.model.state_dict(), Path(output_dir) / 'final.bin')\n \n def save_tree_model(self, output_dir):\n print('saving tree model...')\n pass\n\n def save_history(self, output_dir):\n # save metrics\n with open(Path(output_dir) / 'results.json', 'w') as f:\n json.dump(self.history, f, indent=4)\n \n def save_prediction(self, output_dir, results, file='prediction'):\n check_dir(output_dir)\n # save test results\n print(\"saving prediction results\")\n saved_results = {\n 'loss': results['loss'], \n 'metric_name': results['metric'][1], \n 'metric': results['metric'][0], \n 'time': results['time'],\n 'log_loss': results['log_loss'],\n }\n with open(Path(output_dir) / f'{file}.json', 'w') as f:\n json.dump(saved_results, f, indent=4)\n \n def save_config(self, output_dir):\n def serialize(config: dict):\n for key in config:\n # serialized object to store yaml or json files \n if any(isinstance(config[key], obj) for obj in [Path, ]):\n config[key] = str(config[key])\n return config\n # save all configs\n with open(Path(output_dir) / 'configs.yaml', 'w') as f:\n configs = {\n 'model': self.saved_model_config, \n 'training': self.training_config,\n 'meta': serialize(self.meta_config)\n }\n yaml.dump(configs, f, indent=2)\n\n @staticmethod\n def make_optimizer(\n model: nn.Module,\n training_args: dict,\n ) -> Tuple[optim.Optimizer, optim.lr_scheduler._LRScheduler]:\n training_args.setdefault('optimizer', 'adamw')\n training_args.setdefault('no_wd_group', None)\n training_args.setdefault('scheduler', None)\n # optimizer\n if training_args['no_wd_group'] is not None:\n assert isinstance(training_args['no_wd_group'], list)\n def needs_wd(name):\n return all(x not in name for x in training_args['no_wd_group'])\n parameters_with_wd = [v for k, v in model.named_parameters() if needs_wd(k)]\n parameters_without_wd = [v for k, v in model.named_parameters() if not needs_wd(k)]\n model_params = [\n {'params': parameters_with_wd},\n {'params': parameters_without_wd, 'weight_decay': 0.0},\n ]\n else:\n model_params = model.parameters()\n optimizer = make_optimizer(\n training_args['optimizer'],\n model_params,\n training_args['lr'],\n training_args['weight_decay'],\n )\n # scheduler\n if training_args['scheduler'] is not None:\n scheduler = None\n else:\n scheduler = None\n\n return optimizer, scheduler\n \n @staticmethod\n def prepare_tensor_loader(\n X_num: Optional[torch.Tensor],\n X_cat: Optional[torch.Tensor],\n ys: torch.Tensor,\n batch_size: int = 64,\n shuffle: bool = False,\n ):\n assert not all(x is None for x in [X_num, X_cat])\n missing_placeholder = 0 if X_num is None else 1 if X_cat is None else -1\n datas = [x for x in [X_num, X_cat, ys] if x is not None]\n tensor_dataset = TensorDataset(*datas)\n tensor_loader = DataLoader(\n tensor_dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n )\n return tensor_loader, missing_placeholder\n \n @staticmethod\n def parse_batch(batch: Tuple[torch.Tensor], missing_idx, device: torch.device):\n if batch[0].device.type != device.type:\n # if batch[0].device != device: # initialize self.device with model.device rather than torch.device()\n # batch = (x.to(device) for x in batch) # generator\n batch = tuple([x.to(device) for x in batch]) # list\n if missing_idx == -1:\n return batch\n else:\n return batch[:missing_idx] + [None,] + batch[missing_idx:]\n \n @staticmethod\n def compute_loss(logits: torch.Tensor, targets: torch.Tensor, task: str, reduction: str = 'mean'):\n loss_fn = {\n 'binclass': F.binary_cross_entropy_with_logits,\n 'multiclass': F.cross_entropy,\n 'regression': F.mse_loss,\n }[task]\n return loss_fn(logits.squeeze(-1), targets, reduction=reduction)\n \n @staticmethod\n def calculate_metric(\n golds,\n predictions,\n task: str,\n prediction_type: Optional[str] = None,\n y_std: Optional[float] = None,\n ):\n \"\"\"Calculate metrics\"\"\"\n metric = {\n 'regression': 'rmse', \n 'binclass': 'roc_auc', \n 'multiclass': 'accuracy'\n }[task]\n \n return calculate_metrics(\n golds, predictions,\n task, prediction_type, y_std\n )[metric], metric\n \n def better_result(self, dev_metric, task, is_loss=False):\n if is_loss: # logloss\n best_dev_metric = self.history['val']['best_log_loss']\n if best_dev_metric is None or best_dev_metric > dev_metric:\n self.history['val']['best_log_loss'] = dev_metric\n return True\n else:\n return False\n best_dev_metric = self.history['val']['best_metric']\n if best_dev_metric is None:\n self.history['val']['best_metric'] = dev_metric\n return True\n elif task == 'regression': # rmse\n if best_dev_metric > dev_metric:\n self.history['val']['best_metric'] = dev_metric\n return True\n else:\n return False\n else:\n if best_dev_metric < dev_metric:\n self.history['val']['best_metric'] = dev_metric\n return True\n else:\n return False\n \n def early_stop_handler(self, epoch, tot_step, dev_metric, task, patience, save_path):\n if task != 'regression' and self.better_result(dev_metric['log_loss'], task, is_loss=True):\n # record best logloss\n torch.save(self.model.state_dict(), Path(save_path) / 'best-logloss.bin')\n if self.better_result(dev_metric['metric'], task):\n print('<<< Best Dev Result', end='')\n torch.save(self.model.state_dict(), Path(save_path) / 'best.bin')\n self.no_improvement = 0\n self.history['val']['best_epoch'] = epoch\n self.history['val']['best_step'] = tot_step\n else:\n self.no_improvement += 1\n print(f'| [no improvement] {self.no_improvement}', end='')\n if patience <= 0:\n return False\n else:\n return self.no_improvement >= patience\n \n def save_evaluate_dnn(\n self, \n # print and saved infos\n tot_step, steps_per_epoch, \n tot_loss, tot_time,\n # evaluate infos\n task, patience, save_path,\n dev_loader, y_std\n ):\n \"\"\"For DNN models\"\"\"\n epoch, step = tot_step // steps_per_epoch, (tot_step - 1) % steps_per_epoch + 1\n avg_loss = tot_loss / step\n self.history['train']['loss'].append(avg_loss)\n self.history['train']['tot_time'] = tot_time\n self.history['train']['avg_step_time'] = tot_time / tot_step\n self.history['train']['avg_epoch_time'] = self.history['train']['avg_step_time'] * steps_per_epoch\n print(f\"[epoch] {epoch} | [step] {step} | [tot_step] {tot_step} | [used time] {tot_time:.4g} | [train_loss] {avg_loss:.4g} \", end='')\n if dev_loader is not None:\n _, results = self.predict(dev_loader=dev_loader, y_std=y_std, task=task, return_metric=True)\n dev_metric, metric_name = results['metric']\n print(f\"| [{metric_name}] {dev_metric:.4g} \", end='')\n if task != 'regression':\n print(f\"| [log-loss] {results['log_loss']:.4g} \", end='')\n self.history['val']['log_loss'].append(results['log_loss'])\n self.history['val']['metric_name'] = metric_name\n self.history['val']['metric'].append(dev_metric)\n self.history['val']['tot_time'] += results['time']\n self.history['val']['avg_step_time'] = self.history['val']['tot_time'] / tot_step\n self.history['val']['avg_epoch_time'] = self.history['val']['avg_step_time'] * steps_per_epoch\n dev_metric = {'metric': dev_metric, 'log_loss': results['log_loss']}\n if self.early_stop_handler(epoch, tot_step, dev_metric, task, patience, save_path):\n print(' <<< Early Stop')\n return True\n print()\n return False\n \n def load_best_dnn(self, save_path, file='best'):\n model_file = Path(save_path) / f\"{file}.bin\"\n if not os.path.exists(model_file):\n print(f'There is no {file} checkpoint, loading the last one...')\n model_file = Path(save_path) / 'final.bin'\n else:\n print(f'Loading {file} model...')\n self.model.load_state_dict(torch.load(model_file))\n print('successfully')" }, { "identifier": "check_dir", "path": "models/abstract.py", "snippet": "def check_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)" }, { "identifier": "Dataset", "path": "data/utils.py", "snippet": "class Dataset:\n X_num: Optional[ArrayDict]\n X_cat: Optional[ArrayDict]\n y: ArrayDict\n y_info: Dict[str, Any]\n task_type: TaskType\n n_classes: Optional[int]\n name: Optional[str]\n\n @classmethod\n def from_dir(cls, dir_: Union[Path, str]) -> 'Dataset':\n dir_ = Path(dir_)\n\n def load(item) -> ArrayDict:\n def _load(file: Path):\n return cast(np.ndarray, np.load(file)) if file.exists() else None\n return {\n x: _load(dir_ / f'{item}_{x}.npy')\n for x in ['train', 'val', 'test']\n }\n\n info = load_json(dir_ / 'info.json')\n\n return Dataset(\n load('X_num') if dir_.joinpath('X_num_train.npy').exists() else None,\n load('X_cat') if dir_.joinpath('X_cat_train.npy').exists() else None,\n load('y'),\n {},\n TaskType(info['task_type']),\n info.get('n_classes'),\n info.get('name'),\n )\n\n @property\n def is_binclass(self) -> bool:\n return self.task_type == TaskType.BINCLASS\n\n @property\n def is_multiclass(self) -> bool:\n return self.task_type == TaskType.MULTICLASS\n\n @property\n def is_regression(self) -> bool:\n return self.task_type == TaskType.REGRESSION\n\n @property\n def n_num_features(self) -> int:\n return 0 if self.X_num is None else self.X_num['train'].shape[1]\n\n @property\n def n_cat_features(self) -> int:\n return 0 if self.X_cat is None else self.X_cat['train'].shape[1]\n\n @property\n def n_features(self) -> int:\n return self.n_num_features + self.n_cat_features\n\n def size(self, part: Optional[str]) -> int:\n return sum(map(len, self.y.values())) if part is None else len(self.y[part])\n\n @property\n def nn_output_dim(self) -> int:\n if self.is_multiclass:\n assert self.n_classes is not None\n return self.n_classes\n else:\n return 1\n\n def get_category_sizes(self, part: str) -> List[int]:\n return [] if self.X_cat is None else get_category_sizes(self.X_cat[part])" }, { "identifier": "DataProcessor", "path": "data/processor.py", "snippet": "class DataProcessor:\n \"\"\"Base class to process a single dataset\"\"\"\n def __init__(\n self, \n normalization: Optional[Normalization] = None,\n num_nan_policy: Optional[NumNanPolicy] = None,\n cat_nan_policy: Optional[CatNanPolicy] = None,\n cat_min_frequency: Optional[float] = None,\n cat_encoding: Optional[CatEncoding] = None,\n y_policy: Optional[YPolicy] = 'default',\n seed: int = 42,\n cache_dir: Optional[str] = None,\n ):\n self.transformation = Transformations(\n seed=seed, \n normalization=normalization, \n num_nan_policy=num_nan_policy,\n cat_nan_policy=cat_nan_policy,\n cat_min_frequency=cat_min_frequency,\n cat_encoding=cat_encoding,\n y_policy=y_policy\n )\n self.cache_dir = cache_dir\n \n def apply(self, dataset: Dataset):\n return transform_dataset(dataset, self.transformation, self.cache_dir)\n \n def save(self, file, **kwargs):\n data_config = {\n 'transformation': vars(self.transformation),\n 'cache_dir': str(self.cache_dir),\n 'meta': kwargs,\n }\n with open(file, 'w') as f:\n yaml.dump(data_config, f, indent=2)\n \n @staticmethod\n def check_splits(dataset: Dataset):\n valid_splits = True\n if 'train' in dataset.y:\n if 'test' not in dataset.y:\n warnings.warn(\"Missing test split, unable to prediction\")\n valid_splits = False\n if 'val' not in dataset.y:\n warnings.warn(\"Missing dev split, unable to early stop, or ignore this message if no early stop needed.\")\n valid_splits = False\n if valid_splits:\n print(\"ready for training!\")\n else:\n raise ValueError(\"Missing training split in the dataset\")\n \n @staticmethod\n def prepare(dataset: Dataset, model: Optional[TabModel] = None, device: str = 'cuda'):\n assert model is not None or device is not None\n def get_spl(X: Optional[Union[ArrayDict, TensorDict]], spl):\n return None if X is None else X[spl]\n if device is not None or isinstance(model.model, nn.Module):\n device = device or model.model.device\n X_num, X_cat, ys = prepare_tensors(dataset, device)\n return {spl: (\n get_spl(X_num, spl), \n get_spl(X_cat, spl), \n get_spl(ys, spl)\n ) for spl in ys}\n else:\n return {spl: (\n get_spl(dataset.X_num, spl), \n get_spl(dataset.X_cat, spl), \n get_spl(dataset.y, spl)\n ) for spl in dataset.y}\n \n @staticmethod\n def load_preproc_default(\n output_dir, # output preprocessing infos\n model_name, \n dataset_name, \n benchmark_name: Optional[str] = None, \n seed: int = 42, \n cache_dir: Optional[str] = None\n ):\n global DATASETS, CUSTOM_DATASETS\n \"\"\"default data preprocessing pipeline\"\"\"\n if dataset_name in DATASETS or dataset_name in CUSTOM_DATASETS:\n data_src = DATASETS if dataset_name in DATASETS else CUSTOM_DATASETS\n data_config = data_src[dataset_name]\n data_path = Path(data_config['path'])\n data_config.setdefault('normalization', 'quantile')\n normalization = data_config['normalization']\n elif benchmark_name is not None:\n assert benchmark_name in BENCHMARKS, f\"Benchmark '{benchmark_name}' is not included, \\\n please choose one of '{list(BENCHMARKS.keys())}', for include your benchmark manually.\"\n benchmark_info = BENCHMARKS[benchmark_name]\n assert dataset_name in benchmark_info['datasets'], f\"dataset '{dataset_name}' not in benchmark '{benchmark_name}'\"\n data_path = Path(benchmark_info['path']) / dataset_name\n normalization = 'quantile'\n else:\n raise ValueError(f\"No dataset '{dataset_name}' is available, \\\n if you want to use a custom dataset (from csv file), using `add_custom_dataset`\")\n \n dataset = Dataset.from_dir(data_path)\n # default preprocess settings\n num_nan_policy = 'mean' if dataset.X_num is not None and \\\n any(np.isnan(dataset.X_num[spl]).any() for spl in dataset.X_num) else None\n cat_nan_policy = None\n if model_name in ['xgboost', 'catboost', 'lightgbm']: # for tree models or other sklearn algorithms\n normalization = None\n cat_min_frequency = None\n cat_encoding = 'one-hot'\n if model_name in ['catboost']:\n cat_encoding = None\n else: # for dnns\n # BUG: (dataset.X_cat[spl] == CAT_MISSING_VALUE).any() has different action\n # dtype: int -> bool, dtype: string -> array[bool], dtype: object -> np.load error\n # CURRENT: uniformly using string type to store catgorical features\n if dataset.X_cat is not None and \\\n any((dataset.X_cat[spl] == CAT_MISSING_VALUE).any() for spl in dataset.X_cat):\n cat_nan_policy = 'most_frequent'\n cat_min_frequency = None\n cat_encoding = None\n cache_dir = cache_dir or data_path\n processor = DataProcessor(\n normalization=normalization,\n num_nan_policy=num_nan_policy,\n cat_nan_policy=cat_nan_policy,\n cat_min_frequency=cat_min_frequency,\n cat_encoding=cat_encoding,\n seed=seed,\n cache_dir=Path(cache_dir),\n )\n dataset = processor.apply(dataset)\n # check train, val, test splits\n DataProcessor.check_splits(dataset)\n # save preprocessing infos\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n processor.save(\n Path(output_dir) / 'data_config.yaml',\n benchmark=str(benchmark_name),\n dataset=dataset_name\n )\n return dataset\n\n @staticmethod\n def split(\n X_num: Optional[np.ndarray] = None, \n X_cat: Optional[np.ndarray] = None, \n ys: np.ndarray = None, \n train_ratio: float = 0.8,\n stratify: bool = True,\n seed: int = 42,\n ):\n assert 0 < train_ratio < 1\n assert ys is not None\n sample_idx = np.arange(len(ys))\n test_ratio = 1 - train_ratio\n _stratify = None if not stratify else ys\n train_idx, test_idx = train_test_split(sample_idx, test_size=test_ratio, random_state=seed, stratify=_stratify)\n _stratify = None if not stratify else ys[train_idx]\n train_idx, val_idx = train_test_split(train_idx, test_size=test_ratio, random_state=seed, stratify=_stratify)\n if X_num is not None:\n X_num = {'train': X_num[train_idx], 'val': X_num[val_idx], 'test': X_num[test_idx]}\n if X_cat is not None:\n X_cat = {'train': X_cat[train_idx], 'val': X_cat[val_idx], 'test': X_cat[test_idx]}\n ys = {'train': ys[train_idx], 'val': ys[val_idx], 'test': ys[test_idx]}\n idx = {'train': train_idx, 'val': val_idx, 'test': test_idx}\n return X_num, X_cat, ys, idx\n \n @staticmethod\n def del_custom_dataset(\n dataset_names: Union[str, List[str]]\n ):\n global DATASETS, CUSTOM_DATASETS\n all_infos = read_custom_infos()\n if isinstance(dataset_names, str):\n dataset_names = [dataset_names]\n for dataset_name in dataset_names:\n if dataset_name not in CUSTOM_DATASETS:\n print(f\"custom dataset: {dataset_name} not exist\")\n continue\n elif dataset_name in DATASETS:\n print(f\"can not delete an in-built dataset: {dataset_name}\")\n continue\n data_info = CUSTOM_DATASETS[dataset_name]\n task = data_info['task_type']\n data_path = data_info['path']\n data_idx = [info['name'] for info in all_infos['data_list']].index(dataset_name)\n all_infos['data_list'].pop(data_idx)\n all_infos['n_datasets'] -= 1\n all_infos[task] -= 1\n shutil.rmtree(data_path)\n print(f\"delete dataset: {dataset_name} successfully\")\n write_custom_infos(all_infos)\n from .env import CUSTOM_DATASETS # BUG: refresh the global variable\n\n @staticmethod\n def add_custom_dataset(\n file: Union[str, Path],\n format: DataFileType = 'csv',\n dataset_name: Optional[str] = None,\n task: Optional[str] = None,\n num_cols: Optional[List[int]] = None,\n cat_cols: Optional[List[int]] = None,\n label_index: int = -1, # label column index\n header: Optional[int] = 0, # header row\n max_cat_num: int = 16,\n train_ratio: float = 0.8, # split train / test, train / val\n seed: float = 42, # random split seed\n ):\n \"\"\"\n Support for adding a custom dataset from a single data file\n ---\n read a raw csv file, process into 3 splits (train, val, test), and add to custom_datasets\n\n TODO: adding a dataset from prepared data split files \n TODO: support no validation split\n \"\"\"\n global DATASETS, CUSTOM_DATASETS\n file_name = Path(file).name\n assert file_name.endswith(format), f'please check if the file \\\n is in {format} format, or add the suffix manually'\n dataset_name = dataset_name or file_name[:-len(format)-1]\n assert dataset_name not in DATASETS, f'same dataset name as an in-built dataset: {dataset_name}'\n assert dataset_name not in CUSTOM_DATASETS, f\"existing custom dataset '{dataset_name}' found\"\n \n if format == 'csv':\n datas: pd.DataFrame = pd.read_csv(file, header=header)\n columns = datas.columns if header is not None else None\n elif format == 'npy':\n header = None # numpy file has no headers\n columns = None\n datas = np.load(file)\n raise NotImplementedError(\"only support load csv file now\")\n else:\n raise ValueError(\"other support format to be add further\")\n \n X_idx = list(range(datas.shape[1]))\n y_idx = X_idx.pop(label_index)\n label_name = columns[y_idx] if columns is not None else None\n # numerical and categorical feature detection\n if num_cols is None or cat_cols is None:\n print('automatically detect column type...')\n print('max category amount: ', max_cat_num)\n num_cols, cat_cols = [], []\n num_names, cat_names = [], []\n for i in X_idx:\n if datas.iloc[:, i].values.dtype == float:\n num_cols.append(i)\n if columns is not None:\n num_names.append(columns[i])\n else: # int or object (str)\n if len(set(datas.iloc[:, i].values)) <= max_cat_num:\n cat_cols.append(i)\n if columns is not None:\n cat_names.append(columns[i])\n elif datas.iloc[:, i].values.dtype == int:\n num_cols.append(i)\n if columns is not None:\n num_names.append(columns[i])\n if not num_names and not cat_names:\n num_names, cat_names = None, None\n elif columns:\n num_names = [columns[i] for i in num_cols]\n cat_names = [columns[i] for i in cat_cols]\n else:\n num_names, cat_names = None, None\n n_num_features = len(num_cols)\n n_cat_features = len(cat_cols)\n # build X_num and X_cat\n X_num, ys = None, datas.iloc[:, y_idx].values\n if len(num_cols) > 0:\n X_num = datas.iloc[:, num_cols].values.astype(np.float32)\n # check data type\n X_cat = []\n for i in cat_cols:\n if datas.iloc[:, i].values.dtype == int:\n x = datas.iloc[:, i].values.astype(np.int64)\n # ordered by value\n # x = OrdinalEncoder(categories=[sorted(list(set(x)))]).fit_transform(x.reshape(-1, 1))\n else: # string object\n x = datas.iloc[:, i].values.astype(object)\n # most_common = [item[0] for item in Counter(x).most_common()]\n # ordered by frequency\n # x = OrdinalEncoder(categories=[most_common]).fit_transform(x.reshape(-1, 1))\n X_cat.append(x.astype(np.str0)) # Encoder Later, compatible with Line 140\n X_cat = np.stack(X_cat, axis=1) if len(X_cat) > 0 else None # if using OrdinalEncoder, np.concatenate\n # detect task type\n def process_non_regression_labels(ys: np.ndarray, task):\n if ys.dtype in [int, float]:\n ys = OrdinalEncoder(categories=[sorted(list(set(ys)))]).fit_transform(ys.reshape(-1, 1))\n else:\n most_common = [item[0] for item in Counter(ys).most_common()]\n ys = OrdinalEncoder(categories=most_common).fit_transform(ys.reshape(-1, 1))\n ys = ys[:, 0]\n return ys.astype(np.float32) if task == 'binclass' else ys.astype(np.int64)\n \n if task is None:\n if ys.dtype in [int, object]:\n task = 'binclass' if len(set(ys)) == 2 else 'multiclass'\n ys = process_non_regression_labels(ys, task)\n elif ys.dtype == float:\n if len(set(ys)) == 2:\n task = 'binclass'\n ys = process_non_regression_labels(ys, task)\n else:\n task = 'regression'\n ys = ys.astype(np.float32)\n else:\n if task == 'regression':\n ys = ys.astype(np.float32)\n else:\n ys = process_non_regression_labels(ys, task)\n\n # split datasets\n stratify = task != 'regression'\n X_num, X_cat, ys, idx = DataProcessor.split(X_num, X_cat, ys, train_ratio, stratify, seed)\n # push to CUSTOM_DATASETS\n data_info = {\n 'name': dataset_name,\n 'id': f'{dataset_name.lower()}--custom',\n 'task_type': task,\n 'label_name': label_name,\n 'n_num_features': n_num_features,\n 'num_feature_names': num_names,\n 'n_cat_features': n_cat_features,\n 'cat_feature_names': cat_names,\n 'test_size': len(ys['test']),\n 'train_size': len(ys['train']),\n 'val_size': len(ys['val'])}\n push_custom_datasets(X_num, X_cat, ys, idx, data_info)\n from .env import CUSTOM_DATASETS # refresh global variable\n print(f'finish, now you can load your dataset with `load_preproc_default({dataset_name})`')" } ]
import os import time import json import yaml import shutil import random import datetime import numpy as np import torch import optuna from pathlib import Path from typing import Dict, List, Tuple, Union, Optional, Literal from models import MLP, FTTransformer, AutoInt, DCNv2, NODE from models.abstract import TabModel, check_dir from data.utils import Dataset from data.processor import DataProcessor
15,711
MODEL_CARDS = { 'xgboost': None, 'catboost': None, 'lightgbm': None, 'mlp': MLP, 'autoint': AutoInt, 'dcnv2': DCNv2, 'node': NODE, 'ft-transformer': FTTransformer, 'saint': None, 't2g-former': None, 'excel-former': None, } HPOLib = Literal['optuna', 'hyperopt'] # TODO: add 'hyperopt' support def get_model_cards(): return { 'ready': sorted(list([key for key, value in MODEL_CARDS.items() if value])), 'comming soon': sorted(list([key for key, value in MODEL_CARDS.items() if not value])) } def seed_everything(seed=42): ''' Sets the seed of the entire notebook so results are the same every time we run. This is for REPRODUCIBILITY. ''' random.seed(seed) # Set a fixed value for the hash seed os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) # When running on the CuDNN backend, two further options must be set torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False def load_config_from_file(file): file = str(file) if file.endswith('.yaml'): with open(file, 'r') as f: cfg = yaml.safe_load(f) elif file.endswith('.json'): with open(file, 'r') as f: cfg = json.load(f) else: raise AssertionError('Config files only support yaml or json format now.') return cfg def extract_config(model_config: dict, is_large_data: bool = False): """selection of different search spaces""" used_cfgs = {"model": {}, "training": {}, 'meta': model_config.get('meta', {})} for field in ['model', 'training']: for k in model_config[field]: cfgs = model_config[field][k] if 'type2' not in cfgs: used_cfg = cfgs else: if not is_large_data: used_cfg = {k: v for k, v in cfgs.items() if not k.endswith('2')} else: used_cfg = {k[:-1]: v for k, v in cfgs.items() if k.endswith('2')} used_cfgs[field][k] = used_cfg return used_cfgs def make_baseline( model_name, model_config: Union[dict, str], n_num: int, cat_card: Optional[List[int]], n_labels: int, sparsity_scheme: Optional[str] = None, device: Union[str, torch.device] = 'cuda', ) -> TabModel: """Process Model Configs and Call Specific Model APIs""" assert model_name in MODEL_CARDS, f"unrecognized `{model_name}` model name, choose one of valid models in {MODEL_CARDS}" if isinstance(model_config, str): model_config = load_config_from_file(model_config)['model'] if MODEL_CARDS[model_name] is None: raise NotImplementedError("Please add corresponding model implementation to `models` module") if sparsity_scheme is not None: assert 'mlp' in model_name return MODEL_CARDS[model_name]( model_config=model_config, n_num_features=n_num, categories=cat_card, n_labels=n_labels, sparsity_scheme=sparsity_scheme) return MODEL_CARDS[model_name]( model_config=model_config, n_num_features=n_num, categories=cat_card, n_labels=n_labels) def tune( model_name: str = None, search_config: Union[dict, str] = None,
MODEL_CARDS = { 'xgboost': None, 'catboost': None, 'lightgbm': None, 'mlp': MLP, 'autoint': AutoInt, 'dcnv2': DCNv2, 'node': NODE, 'ft-transformer': FTTransformer, 'saint': None, 't2g-former': None, 'excel-former': None, } HPOLib = Literal['optuna', 'hyperopt'] # TODO: add 'hyperopt' support def get_model_cards(): return { 'ready': sorted(list([key for key, value in MODEL_CARDS.items() if value])), 'comming soon': sorted(list([key for key, value in MODEL_CARDS.items() if not value])) } def seed_everything(seed=42): ''' Sets the seed of the entire notebook so results are the same every time we run. This is for REPRODUCIBILITY. ''' random.seed(seed) # Set a fixed value for the hash seed os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) # When running on the CuDNN backend, two further options must be set torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False def load_config_from_file(file): file = str(file) if file.endswith('.yaml'): with open(file, 'r') as f: cfg = yaml.safe_load(f) elif file.endswith('.json'): with open(file, 'r') as f: cfg = json.load(f) else: raise AssertionError('Config files only support yaml or json format now.') return cfg def extract_config(model_config: dict, is_large_data: bool = False): """selection of different search spaces""" used_cfgs = {"model": {}, "training": {}, 'meta': model_config.get('meta', {})} for field in ['model', 'training']: for k in model_config[field]: cfgs = model_config[field][k] if 'type2' not in cfgs: used_cfg = cfgs else: if not is_large_data: used_cfg = {k: v for k, v in cfgs.items() if not k.endswith('2')} else: used_cfg = {k[:-1]: v for k, v in cfgs.items() if k.endswith('2')} used_cfgs[field][k] = used_cfg return used_cfgs def make_baseline( model_name, model_config: Union[dict, str], n_num: int, cat_card: Optional[List[int]], n_labels: int, sparsity_scheme: Optional[str] = None, device: Union[str, torch.device] = 'cuda', ) -> TabModel: """Process Model Configs and Call Specific Model APIs""" assert model_name in MODEL_CARDS, f"unrecognized `{model_name}` model name, choose one of valid models in {MODEL_CARDS}" if isinstance(model_config, str): model_config = load_config_from_file(model_config)['model'] if MODEL_CARDS[model_name] is None: raise NotImplementedError("Please add corresponding model implementation to `models` module") if sparsity_scheme is not None: assert 'mlp' in model_name return MODEL_CARDS[model_name]( model_config=model_config, n_num_features=n_num, categories=cat_card, n_labels=n_labels, sparsity_scheme=sparsity_scheme) return MODEL_CARDS[model_name]( model_config=model_config, n_num_features=n_num, categories=cat_card, n_labels=n_labels) def tune( model_name: str = None, search_config: Union[dict, str] = None,
dataset: Dataset = None,
7
2023-10-30 14:55:44+00:00
24k
hyperspy/exspy
exspy/tests/models/test_eelsmodel.py
[ { "identifier": "elements_db", "path": "exspy/misc/elements.py", "snippet": "" }, { "identifier": "_GOSH_URL", "path": "exspy/misc/eels/gosh_gos.py", "snippet": "_GOSH_URL = f\"doi:{_GOSH_DOI}/Segger_Guzzinati_Kohl_1.5.0.gosh\"" }, { "identifier": "_GOSH_KNOWN_HASH", "path": "exspy/misc/eels/gosh_gos.py", "snippet": "_GOSH_KNOWN_HASH = \"md5:7fee8891c147a4f769668403b54c529b\"" }, { "identifier": "EELSSpectrum", "path": "exspy/signals/eels.py", "snippet": "class EELSSpectrum(Signal1D):\n\n \"\"\"Signal class for EELS spectra.\"\"\"\n\n _signal_type = \"EELS\"\n _alias_signal_types = [\"TEM EELS\"]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # Attributes defaults\n self.subshells = set()\n self.elements = set()\n self.edges = list()\n if hasattr(self.metadata, \"Sample\") and hasattr(\n self.metadata.Sample, \"elements\"\n ):\n self.add_elements(self.metadata.Sample.elements)\n self.axes_manager.signal_axes[0].is_binned = True\n self._edge_markers = {\"names\": [], \"lines\": None, \"texts\": None}\n\n def add_elements(self, elements, include_pre_edges=False):\n \"\"\"Declare the elemental composition of the sample.\n\n The ionisation edges of the elements present in the current\n energy range will be added automatically.\n\n Parameters\n ----------\n elements : tuple of strings\n The symbol of the elements. Note this input must always be\n in the form of a tuple. Meaning: add_elements(('C',)) will\n work, while add_elements(('C')) will NOT work.\n include_pre_edges : bool\n If True, the ionization edges with an onset below the lower\n energy limit of the SI will be included\n\n Examples\n --------\n\n >>> s = hs.signals.EELSSpectrum(np.arange(1024))\n >>> s.add_elements(('C', 'O'))\n\n Raises\n ------\n ValueError\n\n \"\"\"\n if not isiterable(elements) or isinstance(elements, str):\n raise ValueError(\n \"Input must be in the form of a tuple. For example, \"\n \"if `s` is the variable containing this EELS spectrum:\\n \"\n \">>> s.add_elements(('C',))\\n\"\n \"See the docstring for more information.\"\n )\n\n for element in elements:\n if isinstance(element, bytes):\n element = element.decode()\n if element in elements_db:\n self.elements.add(element)\n else:\n raise ValueError(\n \"%s is not a valid symbol of a chemical element\" % element\n )\n if not hasattr(self.metadata, \"Sample\"):\n self.metadata.add_node(\"Sample\")\n self.metadata.Sample.elements = list(self.elements)\n if self.elements:\n self.generate_subshells(include_pre_edges)\n\n def generate_subshells(self, include_pre_edges=False):\n \"\"\"Calculate the subshells for the current energy range for the\n elements present in self.elements\n\n Parameters\n ----------\n include_pre_edges : bool\n If True, the ionization edges with an onset below the lower\n energy limit of the SI will be included\n\n \"\"\"\n Eaxis = self.axes_manager.signal_axes[0].axis\n if not include_pre_edges:\n start_energy = Eaxis[0]\n else:\n start_energy = 0.0\n end_energy = Eaxis[-1]\n for element in self.elements:\n e_shells = list()\n for shell in elements_db[element][\"Atomic_properties\"][\"Binding_energies\"]:\n if shell[-1] != \"a\":\n energy = elements_db[element][\"Atomic_properties\"][\n \"Binding_energies\"\n ][shell][\"onset_energy (eV)\"]\n if start_energy <= energy <= end_energy:\n subshell = \"%s_%s\" % (element, shell)\n if subshell not in self.subshells:\n self.subshells.add(\"%s_%s\" % (element, shell))\n e_shells.append(subshell)\n\n def edges_at_energy(\n self,\n energy=\"interactive\",\n width=10,\n only_major=False,\n order=\"closest\",\n display=True,\n toolkit=None,\n ):\n \"\"\"Show EELS edges according to an energy range selected from the\n spectrum or within a provided energy window\n\n Parameters\n ----------\n energy : 'interactive' or float\n If it is 'interactive', a table with edges are shown and it depends\n on the energy range selected in the spectrum. If it is a float, a\n table with edges are shown and it depends on the energy window\n defined by energy +/- (width/2). The default is 'interactive'.\n width : float\n Width of window, in eV, around energy in which to find nearby\n energies, i.e. a value of 10 eV (the default) means to\n search +/- 5 eV. The default is 10.\n only_major : bool\n Whether to show only the major edges. The default is False.\n order : str\n Sort the edges, if 'closest', return in the order of energy\n difference, if 'ascending', return in ascending order, similarly\n for 'descending'. The default is 'closest'.\n\n Returns\n -------\n An interactive widget if energy is 'interactive', or a html-format\n table or ASCII table, depends on the environment.\n \"\"\"\n\n if energy == \"interactive\":\n er = EdgesRange(self, interactive=True)\n return er.gui(display=display, toolkit=toolkit)\n else:\n self.print_edges_near_energy(energy, width, only_major, order)\n\n @staticmethod\n def print_edges_near_energy(\n energy=None, width=10, only_major=False, order=\"closest\", edges=None\n ):\n \"\"\"Find and print a table of edges near a given energy that are within\n the given energy window.\n\n Parameters\n ----------\n energy : float\n Energy to search, in eV\n width : float\n Width of window, in eV, around energy in which to find nearby\n energies, i.e. a value of 10 eV (the default) means to\n search +/- 5 eV. The default is 10.\n only_major : bool\n Whether to show only the major edges. The default is False.\n order : str\n Sort the edges, if 'closest', return in the order of energy\n difference, if 'ascending', return in ascending order, similarly\n for 'descending'. The default is 'closest'.\n edges : iterable\n A sequence of edges, if provided, it overrides energy, width,\n only_major and order.\n\n Returns\n -------\n A PrettyText object where its representation is ASCII in terminal and\n html-formatted in Jupyter notebook\n \"\"\"\n\n if edges is None and energy is not None:\n edges = get_edges_near_energy(\n energy=energy, width=width, only_major=only_major, order=order\n )\n elif edges is None and energy is None:\n raise ValueError(\"Either energy or edges should be provided.\")\n\n table = PrettyTable()\n table.field_names = [\"edge\", \"onset energy (eV)\", \"relevance\", \"description\"]\n\n for edge in edges:\n element, shell = edge.split(\"_\")\n shell_dict = elements_db[element][\"Atomic_properties\"][\"Binding_energies\"][\n shell\n ]\n\n onset = shell_dict[\"onset_energy (eV)\"]\n relevance = shell_dict[\"relevance\"]\n threshold = shell_dict[\"threshold\"]\n edge_ = shell_dict[\"edge\"]\n description = threshold + \". \" * (threshold != \"\" and edge_ != \"\") + edge_\n\n table.add_row([edge, onset, relevance, description])\n\n # this ensures the html version try its best to mimick the ASCII one\n table.format = True\n\n display(table)\n\n def estimate_zero_loss_peak_centre(self, mask=None):\n \"\"\"Estimate the position of the zero-loss peak.\n\n This function provides just a coarse estimation of the position\n of the zero-loss peak centre by computing the position of the maximum\n of the spectra. For subpixel accuracy use `estimate_shift1D`.\n\n Parameters\n ----------\n mask : Signal1D of bool data type or bool array\n It must have signal_dimension = 0 and navigation_shape equal to the\n navigation shape of the current signal. Where mask is True the\n shift is not computed and set to nan.\n\n Returns\n -------\n zlpc : Signal1D subclass\n The estimated position of the maximum of the ZLP peak.\n\n Notes\n -----\n This function only works when the zero-loss peak is the most\n intense feature in the spectrum. If it is not in most cases\n the spectrum can be cropped to meet this criterion.\n Alternatively use `estimate_shift1D`.\n\n See Also\n --------\n estimate_shift1D, align_zero_loss_peak\n\n \"\"\"\n self._check_signal_dimension_equals_one()\n self._check_navigation_mask(mask)\n if isinstance(mask, BaseSignal):\n mask = mask.data\n zlpc = self.valuemax(-1)\n if mask is not None:\n zlpc.data = np.where(mask, np.nan, zlpc.data)\n zlpc.set_signal_type(\"\")\n title = self.metadata.General.title\n zlpc.metadata.General.title = \"ZLP(%s)\" % title\n return zlpc\n\n def align_zero_loss_peak(\n self,\n calibrate=True,\n also_align=[],\n print_stats=True,\n subpixel=True,\n mask=None,\n signal_range=None,\n show_progressbar=None,\n crop=True,\n **kwargs,\n ):\n \"\"\"Align the zero-loss peak.\n\n This function first aligns the spectra using the result of\n `estimate_zero_loss_peak_centre` which finds the maximum in the\n given energy range, then if subpixel is True,\n proceeds to align with subpixel accuracy using `align1D`. The offset\n is automatically correct if `calibrate` is True.\n\n Parameters\n ----------\n calibrate : bool\n If True, set the offset of the spectral axis so that the\n zero-loss peak is at position zero.\n also_align : list of signals\n A list containing other spectra of identical dimensions to\n align using the shifts applied to the current spectrum.\n If `calibrate` is True, the calibration is also applied to\n the spectra in the list.\n print_stats : bool\n If True, print summary statistics of the ZLP maximum before\n the alignment.\n subpixel : bool\n If True, perform the alignment with subpixel accuracy\n using cross-correlation.\n mask : Signal1D of bool data type or bool array.\n It must have signal_dimension = 0 and navigation_shape equal to\n the shape of the current signal. Where mask is True the shift is\n not computed and set to nan.\n signal_range : tuple of integers, tuple of floats. Optional\n Will only search for the ZLP within the signal_range. If given\n in integers, the range will be in index values. If given floats,\n the range will be in spectrum values. Useful if there are features\n in the spectrum which are more intense than the ZLP.\n Default is searching in the whole signal. Note that ROIs can be used\n in place of a tuple.\n %s\n %s\n\n Raises\n ------\n NotImplementedError\n If the signal axis is a non-uniform axis.\n\n Examples\n --------\n >>> s_ll = hs.signals.EELSSpectrum(np.zeros(1000))\n >>> s_ll.data[100] = 100\n >>> s_ll.align_zero_loss_peak()\n\n Aligning both the lowloss signal and another signal\n\n >>> s = hs.signals.EELSSpectrum(np.range(1000))\n >>> s_ll.align_zero_loss_peak(also_align=[s])\n\n Aligning within a narrow range of the lowloss signal\n\n >>> s_ll.align_zero_loss_peak(signal_range=(-10.,10.))\n\n\n See Also\n --------\n estimate_zero_loss_peak_centre, align1D, estimate_shift1D.\n\n Notes\n -----\n Any extra keyword arguments are passed to `align1D`. For\n more information read its docstring.\n\n \"\"\"\n\n def substract_from_offset(value, signals):\n # Test that axes is uniform\n if not self.axes_manager[-1].is_uniform:\n raise NotImplementedError(\n \"Support for EELS signals with \"\n \"non-uniform signal axes is not yet implemented.\"\n )\n if isinstance(value, da.Array):\n value = value.compute()\n for signal in signals:\n signal.axes_manager[-1].offset -= value\n signal.events.data_changed.trigger(signal)\n\n def estimate_zero_loss_peak_centre(s, mask, signal_range):\n if signal_range:\n zlpc = s.isig[\n signal_range[0] : signal_range[1]\n ].estimate_zero_loss_peak_centre(mask=mask)\n else:\n zlpc = s.estimate_zero_loss_peak_centre(mask=mask)\n return zlpc\n\n zlpc = estimate_zero_loss_peak_centre(\n self, mask=mask, signal_range=signal_range\n )\n\n mean_ = np.nanmean(zlpc.data)\n\n if print_stats is True:\n print(underline(\"Initial ZLP position statistics\"))\n zlpc.print_summary_statistics()\n\n for signal in also_align + [self]:\n shift_array = -zlpc.data + mean_\n if zlpc._lazy:\n # We must compute right now because otherwise any changes to the\n # axes_manager of the signal later in the workflow may result in\n # a wrong shift_array\n shift_array = shift_array.compute()\n signal.shift1D(shift_array, crop=crop, show_progressbar=show_progressbar)\n\n if calibrate is True:\n zlpc = estimate_zero_loss_peak_centre(\n self, mask=mask, signal_range=signal_range\n )\n substract_from_offset(np.nanmean(zlpc.data), also_align + [self])\n\n if subpixel is False:\n return\n\n start, end = signal_range or (-3.0, 3.0)\n\n if calibrate is False:\n start += mean_\n end += mean_\n\n start = (\n start\n if start > self.axes_manager[-1].axis[0]\n else self.axes_manager[-1].axis[0]\n )\n end = (\n end\n if end < self.axes_manager[-1].axis[-1]\n else self.axes_manager[-1].axis[-1]\n )\n\n if self.axes_manager.navigation_size > 1:\n self.align1D(\n start,\n end,\n also_align=also_align,\n show_progressbar=show_progressbar,\n mask=mask,\n crop=crop,\n **kwargs,\n )\n if calibrate is True:\n zlpc = estimate_zero_loss_peak_centre(\n self, mask=mask, signal_range=signal_range\n )\n substract_from_offset(np.nanmean(zlpc.data), also_align + [self])\n\n align_zero_loss_peak.__doc__ %= (SHOW_PROGRESSBAR_ARG, CROP_PARAMETER_DOC)\n\n def get_zero_loss_peak_mask(self, zero_loss_peak_mask_width=5.0, signal_mask=None):\n \"\"\"Return boolean array with True value at the position of the zero\n loss peak. This mask can be used to restrict operation to the signal\n locations not marked as True (masked).\n\n Parameters\n ----------\n zero_loss_peak_mask_width: float\n Width of the zero loss peak mask.\n %s\n\n Returns\n -------\n bool array\n \"\"\"\n zlpc = self.estimate_zero_loss_peak_centre()\n (signal_axis,) = self.axes_manager[self.axes_manager.signal_axes]\n axis = signal_axis.axis\n mini_value = zlpc.data.mean() - zero_loss_peak_mask_width / 2\n maxi_value = zlpc.data.mean() + zero_loss_peak_mask_width / 2\n mask = np.logical_and(mini_value <= axis, axis <= maxi_value)\n if signal_mask is not None:\n signal_mask = np.logical_or(mask, signal_mask)\n else:\n signal_mask = mask\n return signal_mask\n\n get_zero_loss_peak_mask.__doc__ %= SIGNAL_MASK_ARG\n\n def spikes_diagnosis(\n self,\n signal_mask=None,\n navigation_mask=None,\n zero_loss_peak_mask_width=None,\n **kwargs,\n ):\n if zero_loss_peak_mask_width is not None:\n signal_mask = self.get_zero_loss_peak_mask(\n zero_loss_peak_mask_width, signal_mask\n )\n super().spikes_diagnosis(\n signal_mask=signal_mask, navigation_mask=None, **kwargs\n )\n\n spikes_diagnosis.__doc__ = SPIKES_DIAGNOSIS_DOCSTRING % MASK_ZERO_LOSS_PEAK_WIDTH\n\n def spikes_removal_tool(\n self,\n signal_mask=None,\n navigation_mask=None,\n threshold=\"auto\",\n zero_loss_peak_mask_width=None,\n interactive=True,\n display=True,\n toolkit=None,\n ):\n if zero_loss_peak_mask_width is not None:\n axis = self.axes_manager.signal_axes[0].axis\n # check the zero_loss is in the signal\n if (\n axis[0] - zero_loss_peak_mask_width / 2 > 0\n or axis[-1] + zero_loss_peak_mask_width / 2 < 0\n ):\n raise ValueError(\"The zero loss peaks isn't in the energy range.\")\n signal_mask = self.get_zero_loss_peak_mask(\n zero_loss_peak_mask_width, signal_mask\n )\n super().spikes_removal_tool(\n signal_mask=signal_mask,\n navigation_mask=navigation_mask,\n threshold=threshold,\n interactive=interactive,\n display=display,\n toolkit=toolkit,\n )\n\n spikes_removal_tool.__doc__ = SPIKES_REMOVAL_TOOL_DOCSTRING % (\n SIGNAL_MASK_ARG,\n NAVIGATION_MASK_ARG,\n MASK_ZERO_LOSS_PEAK_WIDTH,\n DISPLAY_DT,\n TOOLKIT_DT,\n )\n\n def estimate_elastic_scattering_intensity(self, threshold, show_progressbar=None):\n \"\"\"Rough estimation of the elastic scattering intensity by\n truncation of a EELS low-loss spectrum.\n\n Parameters\n ----------\n threshold : {Signal1D, float, int}\n Truncation energy to estimate the intensity of the elastic\n scattering. The threshold can be provided as a signal of the same\n dimension as the input spectrum navigation space containing the\n threshold value in the energy units. Alternatively a constant\n threshold can be specified in energy/index units by passing\n float/int.\n %s\n\n Returns\n -------\n I0: Signal1D\n The elastic scattering intensity.\n\n See Also\n --------\n estimate_elastic_scattering_threshold\n\n \"\"\"\n # TODO: Write units tests\n self._check_signal_dimension_equals_one()\n\n if show_progressbar is None:\n show_progressbar = hs.preferences.General.show_progressbar\n\n if isinstance(threshold, numbers.Number):\n I0 = self.isig[:threshold].integrate1D(-1)\n else:\n ax = self.axes_manager.signal_axes[0]\n # I0 = self._get_navigation_signal()\n # I0 = I0.transpose(signal_axes=[])\n threshold = threshold.transpose(signal_axes=[])\n binned = ax.is_binned\n\n def estimating_function(data, threshold=None):\n if np.isnan(threshold):\n return np.nan\n else:\n # the object is just an array, so have to reimplement\n # integrate1D. However can make certain assumptions, for\n # example 1D signal and pretty much always binned. Should\n # probably at some point be joint\n ind = ax.value2index(threshold)\n data = data[:ind]\n if binned:\n return data.sum()\n else:\n from scipy.integrate import simps\n\n axis = ax.axis[:ind]\n return simps(y=data, x=axis)\n\n I0 = self.map(\n estimating_function,\n threshold=threshold,\n ragged=False,\n show_progressbar=show_progressbar,\n inplace=False,\n )\n I0.metadata.General.title = self.metadata.General.title + \" elastic intensity\"\n I0.set_signal_type(\"\")\n if self.tmp_parameters.has_item(\"filename\"):\n I0.tmp_parameters.filename = (\n self.tmp_parameters.filename + \"_elastic_intensity\"\n )\n I0.tmp_parameters.folder = self.tmp_parameters.folder\n I0.tmp_parameters.extension = self.tmp_parameters.extension\n return I0\n\n estimate_elastic_scattering_intensity.__doc__ %= SHOW_PROGRESSBAR_ARG\n\n def estimate_elastic_scattering_threshold(\n self, window=10.0, tol=None, window_length=5, polynomial_order=3, start=1.0\n ):\n \"\"\"Calculate the first inflexion point of the spectrum derivative\n within a window.\n\n This method assumes that the zero-loss peak is located at position zero\n in all the spectra. Currently it looks for an inflexion point, that can\n be a local maximum or minimum. Therefore, to estimate the elastic\n scattering threshold `start` + `window` must be less than the first\n maximum for all spectra (often the bulk plasmon maximum). If there is\n more than one inflexion point in energy the window it selects the\n smoother one what, often, but not always, is a good choice in this\n case.\n\n Parameters\n ----------\n window : {None, float}\n If None, the search for the local inflexion point is performed\n using the full energy range. A positive float will restrict\n the search to the (0,window] energy window, where window is given\n in the axis units. If no inflexion point is found in this\n spectral range the window value is returned instead.\n tol : {None, float}\n The threshold tolerance for the derivative. If \"auto\" it is\n automatically calculated as the minimum value that guarantees\n finding an inflexion point in all the spectra in given energy\n range.\n window_length : int\n If non zero performs order three Savitzky-Golay smoothing\n to the data to avoid falling in local minima caused by\n the noise. It must be an odd integer.\n polynomial_order : int\n Savitzky-Golay filter polynomial order.\n start : float\n Position from the zero-loss peak centre from where to start\n looking for the inflexion point.\n\n\n Returns\n -------\n\n threshold : Signal1D\n A Signal1D of the same dimension as the input spectrum\n navigation space containing the estimated threshold. Where the\n threshold couldn't be estimated the value is set to nan.\n\n See Also\n --------\n\n estimate_elastic_scattering_intensity,align_zero_loss_peak,\n find_peaks1D_ohaver, fourier_ratio_deconvolution.\n\n Notes\n -----\n\n The main purpose of this method is to be used as input for\n `estimate_elastic_scattering_intensity`. Indeed, for currently\n achievable energy resolutions, there is not such a thing as a elastic\n scattering threshold. Therefore, please be aware of the limitations of\n this method when using it.\n\n \"\"\"\n self._check_signal_dimension_equals_one()\n # Create threshold with the same shape as the navigation dims.\n threshold = self._get_navigation_signal().transpose(signal_axes=0)\n\n # Progress Bar\n axis = self.axes_manager.signal_axes[0]\n min_index, max_index = axis.value_range_to_indices(start, start + window)\n if max_index < min_index + 10:\n raise ValueError(\"Please select a bigger window\")\n s = self.isig[min_index:max_index].deepcopy()\n if window_length:\n s.smooth_savitzky_golay(\n polynomial_order=polynomial_order,\n window_length=window_length,\n differential_order=1,\n )\n else:\n s = s.derivative(-1)\n if tol is None:\n tol = np.max(abs(s.data).min(axis.index_in_array))\n saxis = s.axes_manager[-1]\n inflexion = (abs(s.data) <= tol).argmax(saxis.index_in_array)\n if isinstance(inflexion, da.Array):\n inflexion = inflexion.compute()\n threshold.data[:] = saxis.index2value(inflexion)\n if isinstance(inflexion, np.ndarray):\n threshold.data[inflexion == 0] = np.nan\n else: # Single spectrum\n if inflexion == 0:\n threshold.data[:] = np.nan\n del s\n if np.isnan(threshold.data).any():\n _logger.warning(\n \"No inflexion point could be found in some positions \"\n \"that have been marked with nans.\"\n )\n # Create spectrum image, stop and return value\n threshold.metadata.General.title = (\n self.metadata.General.title + \" elastic scattering threshold\"\n )\n if self.tmp_parameters.has_item(\"filename\"):\n threshold.tmp_parameters.filename = (\n self.tmp_parameters.filename + \"_elastic_scattering_threshold\"\n )\n threshold.tmp_parameters.folder = self.tmp_parameters.folder\n threshold.tmp_parameters.extension = self.tmp_parameters.extension\n threshold.set_signal_type(\"\")\n return threshold\n\n def estimate_thickness(\n self,\n threshold=None,\n zlp=None,\n density=None,\n mean_free_path=None,\n ):\n \"\"\"Estimates the thickness (relative and absolute)\n of a sample using the log-ratio method.\n\n The current EELS spectrum must be a low-loss spectrum containing\n the zero-loss peak. The hyperspectrum must be well calibrated\n and aligned. To obtain the thickness relative to the mean free path\n don't set the `density` and the `mean_free_path`.\n\n Parameters\n ----------\n threshold : {BaseSignal, float}, optional\n If the zero-loss-peak is not provided, use this energy threshold\n to roughly estimate its intensity by truncation.\n If the threshold is constant across the dataset use a float. Otherwise,\n provide a signal of\n the same dimension as the input spectrum navigation space\n containing the threshold value in the energy units.\n zlp : BaseSignal, optional\n If not None the zero-loss peak intensity is calculated from the ZLP\n spectrum supplied by integration.\n mean_free_path : float, optional\n The mean free path of the material in nanometers.\n If not provided, the thickness\n is given relative to the mean free path.\n density : float, optional\n The density of the material in g/cm**3. This is used to estimate the mean\n free path when the mean free path is not known and to perform the\n angular corrections.\n\n Returns\n -------\n s : BaseSignal\n The thickness relative to the MFP. It returns a Signal1D,\n Signal2D or a BaseSignal, depending on the current navigation\n dimensions.\n\n Notes\n -----\n For details see Egerton, R. Electron Energy-Loss Spectroscopy in the Electron\n Microscope. Springer-Verlag, 2011.\n \"\"\"\n axis = self.axes_manager.signal_axes[0]\n total_intensity = self.integrate1D(axis.index_in_array).data\n if threshold is None and zlp is None:\n raise ValueError(\n \"Please provide one of the following keywords: \" \"`threshold`, `zlp`\"\n )\n if zlp is not None:\n I0 = zlp.integrate1D(axis.index_in_array).data\n else:\n I0 = self.estimate_elastic_scattering_intensity(\n threshold=threshold,\n ).data\n\n t_over_lambda = np.log(total_intensity / I0)\n\n if density is not None:\n if self._are_microscope_parameters_missing():\n raise RuntimeError(\n \"Some microscope parameters are missing. Please use the \"\n \"`set_microscope_parameters()` method to set them. \"\n \"If you don't know them, don't set the `density` keyword.\"\n )\n else:\n md = self.metadata.Acquisition_instrument.TEM\n t_over_lambda *= iMFP_angular_correction(\n beam_energy=md.beam_energy,\n alpha=md.convergence_angle,\n beta=md.Detector.EELS.collection_angle,\n density=density,\n )\n if mean_free_path is None:\n mean_free_path = iMFP_Iakoubovskii(\n electron_energy=self.metadata.Acquisition_instrument.TEM.beam_energy,\n density=density,\n )\n _logger.info(f\"The estimated iMFP is {mean_free_path} nm\")\n else:\n _logger.warning(\n \"Computing the thickness without taking into account the effect of \"\n \"the limited collection angle, what usually leads to underestimating \"\n \"the thickness. To perform the angular corrections you must provide \"\n \"the density of the material.\"\n )\n\n s = self._get_navigation_signal(data=t_over_lambda)\n if mean_free_path is not None:\n s.data *= mean_free_path\n s.metadata.General.title = self.metadata.General.title + \" thickness (nm)\"\n s.metadata.Signal.quantity = \"thickness (nm)\"\n else:\n _logger.warning(\n \"Computing the relative thickness. To compute the absolute \"\n \"thickness provide the `mean_free_path` and/or the `density`\"\n )\n s.metadata.General.title = (\n self.metadata.General.title + \" $\\\\frac{t}{\\\\lambda}$\"\n )\n s.metadata.Signal.quantity = \"$\\\\frac{t}{\\\\lambda}$\"\n if self.tmp_parameters.has_item(\"filename\"):\n s.tmp_parameters.filename = self.tmp_parameters.filename + \"_thickness\"\n s.tmp_parameters.folder = self.tmp_parameters.folder\n s.tmp_parameters.extension = self.tmp_parameters.extension\n s = s.transpose(signal_axes=[])\n s.set_signal_type(\"\")\n return s\n\n def fourier_log_deconvolution(self, zlp, add_zlp=False, crop=False):\n \"\"\"Performs fourier-log deconvolution.\n\n Parameters\n ----------\n zlp : EELSSpectrum\n The corresponding zero-loss peak.\n\n add_zlp : bool\n If True, adds the ZLP to the deconvolved spectrum\n crop : bool\n If True crop the spectrum to leave out the channels that\n have been modified to decay smoothly to zero at the sides\n of the spectrum.\n\n Returns\n -------\n An EELSSpectrum containing the current data deconvolved.\n\n Raises\n ------\n NotImplementedError\n If the signal axis is a non-uniform axis.\n\n Notes\n -----\n For details see: Egerton, R. Electron Energy-Loss\n Spectroscopy in the Electron Microscope. Springer-Verlag, 2011.\n\n \"\"\"\n self._check_signal_dimension_equals_one()\n if not self.axes_manager.signal_axes[0].is_uniform:\n raise NotImplementedError(\n \"This operation is not yet implemented for non-uniform energy axes\"\n )\n s = self.deepcopy()\n zlp_size = zlp.axes_manager.signal_axes[0].size\n self_size = self.axes_manager.signal_axes[0].size\n tapped_channels = s.hanning_taper()\n # Conservative new size to solve the wrap-around problem\n size = zlp_size + self_size - 1\n # Calculate optimal FFT padding for performance\n complex_result = zlp.data.dtype.kind == \"c\" or s.data.dtype.kind == \"c\"\n size = optimal_fft_size(size, not complex_result)\n\n axis = self.axes_manager.signal_axes[0]\n\n z = np.fft.rfft(zlp.data, n=size, axis=axis.index_in_array)\n j = np.fft.rfft(s.data, n=size, axis=axis.index_in_array)\n if self._lazy or zlp._lazy:\n j1 = z * da.log(j / z).map_blocks(np.nan_to_num)\n else:\n j1 = z * np.nan_to_num(np.log(j / z))\n sdata = np.fft.irfft(j1, axis=axis.index_in_array)\n\n s.data = sdata[\n s.axes_manager._get_data_slice(\n [\n (axis.index_in_array, slice(None, self_size)),\n ]\n )\n ]\n if add_zlp is True:\n if self_size >= zlp_size:\n if self._lazy:\n _slices_before = s.axes_manager._get_data_slice(\n [\n (axis.index_in_array, slice(None, zlp_size)),\n ]\n )\n _slices_after = s.axes_manager._get_data_slice(\n [\n (axis.index_in_array, slice(zlp_size, None)),\n ]\n )\n s.data = da.stack(\n (s.data[_slices_before] + zlp.data, s.data[_slices_after]),\n axis=axis.index_in_array,\n )\n else:\n s.data[\n s.axes_manager._get_data_slice(\n [\n (axis.index_in_array, slice(None, zlp_size)),\n ]\n )\n ] += zlp.data\n else:\n s.data += zlp.data[\n s.axes_manager._get_data_slice(\n [\n (axis.index_in_array, slice(None, self_size)),\n ]\n )\n ]\n\n s.metadata.General.title = (\n s.metadata.General.title + \" after Fourier-log deconvolution\"\n )\n if s.tmp_parameters.has_item(\"filename\"):\n s.tmp_parameters.filename = (\n self.tmp_parameters.filename + \"_after_fourier_log_deconvolution\"\n )\n if crop is True:\n s.crop(axis.index_in_axes_manager, None, int(-tapped_channels))\n return s\n\n def fourier_ratio_deconvolution(\n self,\n ll,\n fwhm=None,\n threshold=None,\n extrapolate_lowloss=True,\n extrapolate_coreloss=True,\n ):\n \"\"\"Performs Fourier-ratio deconvolution.\n\n The core-loss should have the background removed. To reduce the noise\n amplification the result is convolved with a Gaussian function.\n\n Parameters\n ----------\n ll: EELSSpectrum\n The corresponding low-loss (ll) EELSSpectrum.\n fwhm : float or None\n Full-width half-maximum of the Gaussian function by which\n the result of the deconvolution is convolved. It can be\n used to select the final SNR and spectral resolution. If\n None, the FWHM of the zero-loss peak of the low-loss is\n estimated and used.\n threshold : {None, float}\n Truncation energy to estimate the intensity of the\n elastic scattering. If None the threshold is taken as the\n first minimum after the ZLP centre.\n extrapolate_lowloss, extrapolate_coreloss : bool\n If True the signals are extrapolated using a power law,\n\n Raises\n ------\n NotImplementedError\n If the signal axis is a non-uniform axis.\n\n Notes\n -----\n For details see: Egerton, R. Electron Energy-Loss\n Spectroscopy in the Electron Microscope. Springer-Verlag, 2011.\n\n \"\"\"\n self._check_signal_dimension_equals_one()\n if not self.axes_manager.signal_axes[0].is_uniform:\n raise NotImplementedError(\n \"This operation is not yet implemented for non-uniform energy axes.\"\n )\n if not ll.axes_manager.signal_axes[0].is_uniform:\n raise NotImplementedError(\n \"The low-loss energy axis is non-uniform. \"\n \"This operation is not yet implemented for non-uniform energy axes\"\n )\n orig_cl_size = self.axes_manager.signal_axes[0].size\n\n if threshold is None:\n threshold = ll.estimate_elastic_scattering_threshold()\n\n if extrapolate_coreloss is True:\n cl = self.power_law_extrapolation(window_size=20, extrapolation_size=100)\n else:\n cl = self.deepcopy()\n\n if extrapolate_lowloss is True:\n ll = ll.power_law_extrapolation(window_size=100, extrapolation_size=100)\n else:\n ll = ll.deepcopy()\n\n ll.hanning_taper()\n cl.hanning_taper()\n\n ll_size = ll.axes_manager.signal_axes[0].size\n cl_size = self.axes_manager.signal_axes[0].size\n # Conservative new size to solve the wrap-around problem\n size = ll_size + cl_size - 1\n # Calculate the optimal FFT size\n size = optimal_fft_size(size)\n\n axis = ll.axes_manager.signal_axes[0]\n if fwhm is None:\n fwhm = float(\n ll.get_current_signal().estimate_peak_width()._get_current_data()\n )\n _logger.info(\"FWHM = %1.2f\" % fwhm)\n\n I0 = ll.estimate_elastic_scattering_intensity(threshold=threshold)\n I0 = I0.data\n if ll.axes_manager.navigation_size > 0:\n I0_shape = list(I0.shape)\n I0_shape.insert(axis.index_in_array, 1)\n I0 = I0.reshape(I0_shape)\n\n from hyperspy.components1d import Gaussian\n\n g = Gaussian()\n g.sigma.value = fwhm / 2.3548\n g.A.value = 1\n g.centre.value = 0\n zl = g.function(\n np.linspace(axis.offset, axis.offset + axis.scale * (size - 1), size)\n )\n z = np.fft.rfft(zl)\n jk = np.fft.rfft(cl.data, n=size, axis=axis.index_in_array)\n jl = np.fft.rfft(ll.data, n=size, axis=axis.index_in_array)\n zshape = [\n 1,\n ] * len(cl.data.shape)\n zshape[axis.index_in_array] = jk.shape[axis.index_in_array]\n cl.data = np.fft.irfft(z.reshape(zshape) * jk / jl, axis=axis.index_in_array)\n cl.data *= I0\n cl.crop(-1, None, int(orig_cl_size))\n cl.metadata.General.title = (\n self.metadata.General.title + \" after Fourier-ratio deconvolution\"\n )\n if cl.tmp_parameters.has_item(\"filename\"):\n cl.tmp_parameters.filename = (\n self.tmp_parameters.filename + \"after_fourier_ratio_deconvolution\"\n )\n return cl\n\n def richardson_lucy_deconvolution(\n self, psf, iterations=15, show_progressbar=None, num_workers=None\n ):\n \"\"\"1D Richardson-Lucy Poissonian deconvolution of\n the spectrum by the given kernel.\n\n Parameters\n ----------\n psf : EELSSpectrum\n It must have the same signal dimension as the current\n spectrum and a spatial dimension of 0 or the same as the\n current spectrum.\n iterations : int\n Number of iterations of the deconvolution. Note that\n increasing the value will increase the noise amplification.\n %s\n %s\n\n Raises\n ------\n NotImplementedError\n If the signal axis is a non-uniform axis.\n\n Notes\n -----\n For details on the algorithm see Gloter, A., A. Douiri,\n M. Tence, and C. Colliex. “Improving Energy Resolution of\n EELS Spectra: An Alternative to the Monochromator Solution.”\n Ultramicroscopy 96, no. 3–4 (September 2003): 385–400.\n\n \"\"\"\n if not self.axes_manager.signal_axes[0].is_uniform:\n raise NotImplementedError(\n \"This operation is not yet implemented for non-uniform energy axes.\"\n )\n if show_progressbar is None:\n show_progressbar = hs.preferences.General.show_progressbar\n self._check_signal_dimension_equals_one()\n psf_size = psf.axes_manager.signal_axes[0].size\n maxval = self.axes_manager.navigation_size\n show_progressbar = show_progressbar and (maxval > 0)\n\n def deconv_function(signal, kernel=None, iterations=15, psf_size=None):\n imax = kernel.argmax()\n result = np.array(signal).copy()\n mimax = psf_size - 1 - imax\n for _ in range(iterations):\n first = np.convolve(kernel, result)[imax : imax + psf_size]\n result *= np.convolve(kernel[::-1], signal / first)[\n mimax : mimax + psf_size\n ]\n return result\n\n ds = self.map(\n deconv_function,\n kernel=psf,\n iterations=iterations,\n psf_size=psf_size,\n show_progressbar=show_progressbar,\n num_workers=num_workers,\n ragged=False,\n inplace=False,\n )\n\n ds.metadata.General.title += (\n \" after Richardson-Lucy deconvolution %i iterations\" % iterations\n )\n if ds.tmp_parameters.has_item(\"filename\"):\n ds.tmp_parameters.filename += \"_after_R-L_deconvolution_%iiter\" % iterations\n return ds\n\n richardson_lucy_deconvolution.__doc__ %= (SHOW_PROGRESSBAR_ARG, NUM_WORKERS_ARG)\n\n def _are_microscope_parameters_missing(self, ignore_parameters=[]):\n \"\"\"\n Check if the EELS parameters necessary to calculate the GOS\n are defined in metadata. If not, in interactive mode\n raises an UI item to fill the values.\n The `ignore_parameters` list can be to ignore parameters.\n \"\"\"\n must_exist = (\n \"Acquisition_instrument.TEM.convergence_angle\",\n \"Acquisition_instrument.TEM.beam_energy\",\n \"Acquisition_instrument.TEM.Detector.EELS.collection_angle\",\n )\n missing_parameters = []\n for item in must_exist:\n exists = self.metadata.has_item(item)\n if exists is False and item.split(\".\")[-1] not in ignore_parameters:\n missing_parameters.append(item)\n if missing_parameters:\n _logger.info(\"Missing parameters {}\".format(missing_parameters))\n return True\n else:\n return False\n\n def set_microscope_parameters(\n self,\n beam_energy=None,\n convergence_angle=None,\n collection_angle=None,\n toolkit=None,\n display=True,\n ):\n if set((beam_energy, convergence_angle, collection_angle)) == {None}:\n tem_par = EELSTEMParametersUI(self)\n return tem_par.gui(toolkit=toolkit, display=display)\n mp = self.metadata\n if beam_energy is not None:\n mp.set_item(\"Acquisition_instrument.TEM.beam_energy\", beam_energy)\n if convergence_angle is not None:\n mp.set_item(\n \"Acquisition_instrument.TEM.convergence_angle\", convergence_angle\n )\n if collection_angle is not None:\n mp.set_item(\n \"Acquisition_instrument.TEM.Detector.EELS.collection_angle\",\n collection_angle,\n )\n\n set_microscope_parameters.__doc__ = \"\"\"\n Set the microscope parameters that are necessary to calculate\n the GOS.\n\n If not all of them are defined, in interactive mode\n raises an UI item to fill the values.\n\n beam_energy: float\n The energy of the electron beam in keV.\n convergence_angle : float\n The microscope convergence semi-angle in mrad.\n collection_angle : float\n The collection semi-angle in mrad.\n {}\n {}\n \"\"\".format(\n TOOLKIT_DT, DISPLAY_DT\n )\n\n def power_law_extrapolation(\n self, window_size=20, extrapolation_size=1024, add_noise=False, fix_neg_r=False\n ):\n \"\"\"\n Extrapolate the spectrum to the right using a powerlaw.\n\n Parameters\n ----------\n window_size : int\n The number of channels from the right side of the\n spectrum that are used to estimate the power law\n parameters.\n extrapolation_size : int\n Size of the extrapolation in number of channels\n add_noise : bool\n If True, add poissonian noise to the extrapolated spectrum.\n fix_neg_r : bool\n If True, the negative values for the \"components.PowerLaw\"\n parameter r will be flagged and the extrapolation will be\n done with a constant zero-value.\n\n Returns\n -------\n A new spectrum, with the extrapolation.\n\n \"\"\"\n self._check_signal_dimension_equals_one()\n axis = self.axes_manager.signal_axes[0]\n s = self.deepcopy()\n s.metadata.General.title += \" %i channels extrapolated\" % extrapolation_size\n if s.tmp_parameters.has_item(\"filename\"):\n s.tmp_parameters.filename += (\n \"_%i_channels_extrapolated\" % extrapolation_size\n )\n new_shape = list(self.data.shape)\n new_shape[axis.index_in_array] += extrapolation_size\n if self._lazy:\n left_data = s.data\n right_shape = list(self.data.shape)\n right_shape[axis.index_in_array] = extrapolation_size\n right_chunks = list(self.data.chunks)\n right_chunks[axis.index_in_array] = (extrapolation_size,)\n right_data = da.zeros(\n shape=tuple(right_shape),\n chunks=tuple(right_chunks),\n dtype=self.data.dtype,\n )\n s.data = da.concatenate([left_data, right_data], axis=axis.index_in_array)\n else:\n # just old code\n s.data = np.zeros(new_shape)\n s.data[..., : axis.size] = self.data\n s.get_dimensions_from_data()\n pl = PowerLaw()\n pl._axes_manager = self.axes_manager\n A, r = pl.estimate_parameters(\n s,\n axis.index2value(axis.size - window_size),\n axis.index2value(axis.size - 1),\n out=True,\n )\n if fix_neg_r is True:\n A = np.where(r <= 0, 0, A)\n # If the signal is binned we need to bin the extrapolated power law\n # what, in a first approximation, can be done by multiplying by the\n # axis step size.\n if self.axes_manager[-1].is_binned:\n factor = s.axes_manager[-1].scale\n else:\n factor = 1\n if self._lazy:\n # only need new axes if the navigation dimension is not 0\n if s.axes_manager.navigation_dimension:\n rightslice = (..., None)\n axisslice = (None, slice(axis.size, None))\n else:\n rightslice = (...,)\n axisslice = (slice(axis.size, None),)\n right_chunks[axis.index_in_array] = 1\n x = da.from_array(\n s.axes_manager.signal_axes[0].axis[axisslice],\n chunks=(extrapolation_size,),\n )\n A = A[rightslice]\n r = r[rightslice]\n right_data = factor * A * x ** (-r)\n s.data = da.concatenate([left_data, right_data], axis=axis.index_in_array)\n else:\n s.data[..., axis.size :] = (\n factor\n * A[..., np.newaxis]\n * s.axes_manager.signal_axes[0].axis[np.newaxis, axis.size :]\n ** (-r[..., np.newaxis])\n )\n return s\n\n def kramers_kronig_analysis(\n self, zlp=None, iterations=1, n=None, t=None, delta=0.5, full_output=False\n ):\n r\"\"\"\n Calculate the complex dielectric function from a single scattering\n distribution (SSD) using the Kramers-Kronig relations.\n\n It uses the FFT method as in [1]_. The SSD is an\n EELSSpectrum instance containing SSD low-loss EELS with no zero-loss\n peak. The internal loop is devised to approximately subtract the\n surface plasmon contribution supposing an unoxidized planar surface and\n neglecting coupling between the surfaces. This method does not account\n for retardation effects, instrumental broadening and surface plasmon\n excitation in particles.\n\n Note that either refractive index or thickness are required.\n If both are None or if both are provided an exception is raised.\n\n Parameters\n ----------\n zlp : {None, number, Signal1D}\n ZLP intensity. It is optional (can be None) if `t` is None and `n`\n is not None and the thickness estimation is not required. If `t`\n is not None, the ZLP is required to perform the normalization and\n if `t` is not None, the ZLP is required to calculate the thickness.\n If the ZLP is the same for all spectra, the integral of the ZLP\n can be provided as a number. Otherwise, if the ZLP intensity is not\n the same for all spectra, it can be provided as i) a Signal1D\n of the same dimensions as the current signal containing the ZLP\n spectra for each location ii) a BaseSignal of signal dimension 0\n and navigation_dimension equal to the current signal containing the\n integrated ZLP intensity.\n iterations : int\n Number of the iterations for the internal loop to remove the\n surface plasmon contribution. If 1 the surface plasmon contribution\n is not estimated and subtracted (the default is 1).\n n : {None, float}\n The medium refractive index. Used for normalization of the\n SSD to obtain the energy loss function. If given the thickness\n is estimated and returned. It is only required when `t` is None.\n t : {None, number, Signal1D}\n The sample thickness in nm. Used for normalization of the SSD\n to obtain the energy loss function. It is only required when\n `n` is None. If the thickness is the same for all spectra it can be\n given by a number. Otherwise, it can be provided as a BaseSignal\n with signal dimension 0 and navigation_dimension equal to the\n current signal.\n delta : float\n A small number (0.1-0.5 eV) added to the energy axis in\n specific steps of the calculation the surface loss correction to\n improve stability.\n full_output : bool\n If True, return a dictionary that contains the estimated\n thickness if `t` is None and the estimated surface plasmon\n excitation and the spectrum corrected from surface plasmon\n excitations if `iterations` > 1.\n\n Returns\n -------\n eps: DielectricFunction instance\n The complex dielectric function results,\n\n .. math::\n \\epsilon = \\epsilon_1 + i*\\epsilon_2,\n\n contained in an DielectricFunction instance.\n output: Dictionary (optional)\n A dictionary of optional outputs with the following keys\n\n * ``thickness``: the estimated thickness in nm calculated by\n normalization of the SSD (only when ``t`` is None)\n * ``surface plasmon estimation``: the estimated surface plasmon\n excitation (only if ``iterations`` > 1.)\n\n Raises\n ------\n ValueError\n If both `n` and `t` are undefined (None).\n AttributeError\n If the beam_energy or the collection semi-angle are not defined in\n metadata.\n NotImplementedError\n If the signal axis is a non-uniform axis.\n\n Notes\n -----\n This method is based in Egerton's Matlab code [1]_ with a\n minor difference: the wrap-around problem when computing the FFTs is\n workarounded by padding the signal instead of subtracting the\n reflected tail.\n\n .. [1] Ray Egerton, \"Electron Energy-Loss Spectroscopy in the Electron\n Microscope\", Springer-Verlag, 2011.\n\n \"\"\"\n if not self.axes_manager.signal_axes[0].is_uniform:\n raise NotImplementedError(\n \"This operation is not yet implemented for non-uniform energy axes.\"\n )\n output = {}\n if iterations == 1:\n # In this case s.data is not modified so there is no need to make\n # a deep copy.\n s = self.isig[0.0:]\n else:\n s = self.isig[0.0:].deepcopy()\n\n sorig = self.isig[0.0:]\n # Avoid singularity at 0\n if s.axes_manager.signal_axes[0].axis[0] == 0:\n s = s.isig[1:]\n sorig = self.isig[1:]\n\n # Constants and units\n me = constants.value(\"electron mass energy equivalent in MeV\") * 1e3 # keV\n\n # Mapped parameters\n self._are_microscope_parameters_missing(ignore_parameters=[\"convergence_angle\"])\n e0 = s.metadata.Acquisition_instrument.TEM.beam_energy\n beta = s.metadata.Acquisition_instrument.TEM.Detector.EELS.collection_angle\n\n axis = s.axes_manager.signal_axes[0]\n eaxis = axis.axis.copy()\n\n if isinstance(zlp, hyperspy.signal.BaseSignal):\n if (\n zlp.axes_manager.navigation_dimension\n == self.axes_manager.navigation_dimension\n ):\n if zlp.axes_manager.signal_dimension == 0:\n i0 = zlp.data\n else:\n i0 = zlp.integrate1D(axis.index_in_axes_manager).data\n else:\n raise ValueError(\n \"The ZLP signal dimensions are not \"\n \"compatible with the dimensions of the \"\n \"low-loss signal\"\n )\n # The following prevents errors if the signal is a single spectrum\n if len(i0) != 1:\n i0 = i0.reshape(np.insert(i0.shape, axis.index_in_array, 1))\n elif isinstance(zlp, numbers.Number):\n i0 = zlp\n else:\n raise ValueError(\n \"The zero-loss peak input is not valid, it must be\\\n in the BaseSignal class or a Number.\"\n )\n\n if isinstance(t, hyperspy.signal.BaseSignal):\n if (\n t.axes_manager.navigation_dimension\n == self.axes_manager.navigation_dimension\n ) and (t.axes_manager.signal_dimension == 0):\n t = t.data\n t = t.reshape(np.insert(t.shape, axis.index_in_array, 1))\n else:\n raise ValueError(\n \"The thickness signal dimensions are not \"\n \"compatible with the dimensions of the \"\n \"low-loss signal\"\n )\n elif isinstance(t, np.ndarray) and t.shape and t.shape != (1,):\n raise ValueError(\n \"thickness must be a HyperSpy signal or a number,\" \" not a NumPy array.\"\n )\n\n # Slicer to get the signal data from 0 to axis.size\n slicer = s.axes_manager._get_data_slice(\n [\n (axis.index_in_array, slice(None, axis.size)),\n ]\n )\n\n # Kinetic definitions\n ke = e0 * (1 + e0 / 2.0 / me) / (1 + e0 / me) ** 2\n tgt = e0 * (2 * me + e0) / (me + e0)\n rk0 = 2590 * (1 + e0 / me) * np.sqrt(2 * ke / me)\n\n for io in range(iterations):\n # Calculation of the ELF by normalization of the SSD\n # Norm(SSD) = Imag(-1/epsilon) (Energy Loss Function, ELF)\n\n # We start by the \"angular corrections\"\n Im = s.data / (np.log(1 + (beta * tgt / eaxis) ** 2)) / axis.scale\n if n is None and t is None:\n raise ValueError(\n \"The thickness and the refractive index are \"\n \"not defined. Please provide one of them.\"\n )\n elif n is not None and t is not None:\n raise ValueError(\n \"Please provide the refractive index OR the \"\n \"thickness information, not both\"\n )\n elif n is not None:\n # normalize using the refractive index.\n K = (Im / eaxis).sum(\n axis=axis.index_in_array, keepdims=True\n ) * axis.scale\n K = K / (np.pi / 2) / (1 - 1.0 / n**2)\n # K = (K / (np.pi / 2) / (1 - 1. / n ** 2)).reshape(\n # np.insert(K.shape, axis.index_in_array, 1))\n # Calculate the thickness only if possible and required\n if zlp is not None and (full_output is True or iterations > 1):\n te = 332.5 * K * ke / i0\n if full_output is True:\n output[\"thickness\"] = te\n elif t is not None:\n if zlp is None:\n raise ValueError(\n \"The ZLP must be provided when the \"\n \"thickness is used for normalization.\"\n )\n # normalize using the thickness\n K = t * i0 / (332.5 * ke)\n te = t\n Im = Im / K\n\n # Kramers Kronig Transform:\n # We calculate KKT(Im(-1/epsilon))=1+Re(1/epsilon) with FFT\n # Follows: D W Johnson 1975 J. Phys. A: Math. Gen. 8 490\n # Use an optimal FFT size to speed up the calculation, and\n # make it double the closest upper value to workaround the\n # wrap-around problem.\n esize = optimal_fft_size(2 * axis.size)\n q = -2 * np.fft.fft(Im, esize, axis.index_in_array).imag / esize\n\n q[slicer] *= -1\n q = np.fft.fft(q, axis=axis.index_in_array)\n # Final touch, we have Re(1/eps)\n Re = q[slicer].real + 1\n\n # Egerton does this to correct the wrap-around problem, but in our\n # case this is not necessary because we compute the fft on an\n # extended and padded spectrum to avoid this problem.\n # Re=real(q)\n # Tail correction\n # vm=Re[axis.size-1]\n # Re[:(axis.size-1)]=Re[:(axis.size-1)]+1-(0.5*vm*((axis.size-1) /\n # (axis.size*2-arange(0,axis.size-1)))**2)\n # Re[axis.size:]=1+(0.5*vm*((axis.size-1) /\n # (axis.size+arange(0,axis.size)))**2)\n\n # Epsilon appears:\n # We calculate the real and imaginary parts of the CDF\n e1 = Re / (Re**2 + Im**2)\n e2 = Im / (Re**2 + Im**2)\n\n if iterations > 1 and zlp is not None:\n # Surface losses correction:\n # Calculates the surface ELF from a vacuum border effect\n # A simulated surface plasmon is subtracted from the ELF\n Srfelf = 4 * e2 / ((e1 + 1) ** 2 + e2**2) - Im\n adep = tgt / (eaxis + delta) * np.arctan(\n beta * tgt / axis.axis\n ) - beta / 1000.0 / (beta**2 + axis.axis**2.0 / tgt**2)\n Srfint = 2000 * K * adep * Srfelf / rk0 / te * axis.scale\n s.data = sorig.data - Srfint\n _logger.debug(\"Iteration number: %d / %d\", io + 1, iterations)\n if iterations == io + 1 and full_output is True:\n sp = sorig._deepcopy_with_new_data(Srfint)\n sp.metadata.General.title += (\n \" estimated surface plasmon excitation.\"\n )\n output[\"surface plasmon estimation\"] = sp\n del sp\n del Srfint\n\n eps = s._deepcopy_with_new_data(e1 + e2 * 1j)\n del s\n eps.set_signal_type(\"DielectricFunction\")\n eps.metadata.General.title = (\n self.metadata.General.title + \"dielectric function \"\n \"(from Kramers-Kronig analysis)\"\n )\n if eps.tmp_parameters.has_item(\"filename\"):\n eps.tmp_parameters.filename = (\n self.tmp_parameters.filename + \"_CDF_after_Kramers_Kronig_transform\"\n )\n if \"thickness\" in output:\n # As above,prevent errors if the signal is a single spectrum\n if len(te) != 1:\n te = te[self.axes_manager._get_data_slice([(axis.index_in_array, 0)])]\n thickness = eps._get_navigation_signal(data=te)\n thickness.metadata.General.title = (\n self.metadata.General.title + \" thickness \"\n \"(calculated using Kramers-Kronig analysis)\"\n )\n output[\"thickness\"] = thickness\n if full_output is False:\n return eps\n else:\n return eps, output\n\n def create_model(\n self,\n low_loss=None,\n auto_background=True,\n auto_add_edges=True,\n GOS=\"gosh\",\n gos_file_path=None,\n dictionary=None,\n ):\n \"\"\"Create a model for the current EELS data.\n\n Parameters\n ----------\n %s\n\n Returns\n -------\n model : :class:`~.models.eelsmodel.EELSModel` instance.\n\n Raises\n ------\n NotImplementedError\n If the signal axis is a non-uniform axis.\n \"\"\"\n from exspy.models.eelsmodel import EELSModel\n\n if low_loss is not None and not self.axes_manager.signal_axes[0].is_uniform:\n raise NotImplementedError(\n \"Multiple scattering is not implemented for spectra with a \"\n \"non-uniform energy axis. To create a model that does not \"\n \"account for multiple-scattering do not set the `ll` keyword.\"\n )\n model = EELSModel(\n self,\n low_loss=low_loss,\n auto_background=auto_background,\n auto_add_edges=auto_add_edges,\n GOS=GOS,\n dictionary=dictionary,\n )\n return model\n\n create_model.__doc__ %= EELSMODEL_PARAMETERS\n\n def plot(self, plot_edges=False, only_edges=(\"Major\", \"Minor\"), **kwargs):\n \"\"\"\n Plot the EELS spectrum. Markers indicating the position of the\n EELS edges can be added.\n\n Parameters\n ----------\n plot_edges : {False, True, list of string or string}\n If True, draws on s.metadata.Sample.elements for edges.\n Alternatively, provide a string of a single edge, or an iterable\n containing a list of valid elements, EELS families or edges. For\n example, an element should be 'Zr', an element edge family should\n be 'Zr_L' or an EELS edge 'Zr_L3'.\n only_edges : tuple of string\n Either 'Major' or 'Minor'. Defaults to both.\n kwargs\n The extra keyword arguments for plot()\n \"\"\"\n\n super().plot(**kwargs)\n\n if plot_edges:\n # edges is a mapping {edge_name:edge_energy}\n edges = self._get_edges_to_plot(plot_edges, only_edges)\n self._plot_edge_labels(edges)\n\n self._plot.signal_plot.events.closed.connect(self._on_signal_plot_closing, [])\n\n def _on_signal_plot_closing(self):\n self._edge_markers = {\"lines\": None, \"texts\": None, \"names\": []}\n\n def _get_offsets_and_segments(self, edges):\n index = np.array([float(v) for v in edges.values()]) # dictionaries\n segments = np.empty((len(index), 2, 2))\n offsets = np.empty((len(index), 2))\n for i, ind in enumerate(index):\n segments[i] = [[ind, 1], [ind, 1.1]]\n offsets[i] = [ind, 1.1]\n return offsets, segments\n\n def _initialise_markers(self):\n self._edge_markers[\"lines\"] = Lines(\n segments=np.empty((0, 2, 2)),\n transform=\"relative\",\n color=\"black\",\n shift=np.array([0.0, 0.19]),\n )\n self._edge_markers[\"texts\"] = Texts(\n offsets=np.empty((0, 2)),\n texts=np.empty((0,)),\n offset_transform=\"relative\",\n rotation=np.pi / 2,\n horizontalalignment=\"left\",\n verticalalignment=\"bottom\",\n facecolor=\"black\",\n shift=0.2,\n )\n for key in [\"lines\", \"texts\"]:\n self.add_marker(self._edge_markers[key], render_figure=False)\n\n def _plot_edge_labels(self, edges):\n \"\"\"\n Plot the EELS edge label (vertical line segment and text box) on\n the signal\n\n Parameters\n ----------\n edges : dictionary\n A dictionary with the labels as keys and their energies as values.\n For example, {'Fe_L2': 721.0, 'O_K': 532.0}\n\n \"\"\"\n # the object is needed to connect replot method when axes_manager\n # indices changed\n _ = EdgesRange(self, interactive=False)\n self._add_edge_labels(edges)\n\n def _get_edges_to_plot(self, plot_edges, only_edges):\n # get the dictionary of the edge to be shown\n extra_element_edge_family = []\n if plot_edges is True:\n try:\n elements = self.metadata.Sample.elements\n except AttributeError:\n raise ValueError(\n \"No elements defined. Add them with \"\n \"s.add_elements, or specify elements, edge \"\n \"families or edges directly\"\n )\n else:\n extra_element_edge_family.extend(np.atleast_1d(plot_edges))\n try:\n elements = self.metadata.Sample.elements\n except:\n elements = []\n\n element_edge_family = elements + extra_element_edge_family\n edges_dict = self._get_edges(element_edge_family, only_edges)\n\n return edges_dict\n\n def _get_edges(self, element_edge_family, only_edges):\n # get corresponding information depending on whether it is an element\n # a particular edge or a family of edge\n axis_min = self.axes_manager[-1].low_value\n axis_max = self.axes_manager[-1].high_value\n\n names_and_energies = {}\n shells = [\"K\", \"L\", \"M\", \"N\", \"O\"]\n\n errmsg = \"Edge family '{}' is not supported. Supported edge family \" \"is {}.\"\n for member in element_edge_family:\n try:\n element, ss = member.split(\"_\")\n\n if len(ss) == 1:\n memtype = \"family\"\n if ss not in shells:\n raise AttributeError(errmsg.format(ss, shells))\n if len(ss) == 2:\n memtype = \"edge\"\n if ss[0] not in shells:\n raise AttributeError(errmsg.format(ss[0], shells))\n except ValueError:\n element = member\n ss = \"\"\n memtype = \"element\"\n\n try:\n Binding_energies = elements_db[element][\"Atomic_properties\"][\n \"Binding_energies\"\n ]\n except KeyError as err:\n raise ValueError(\"'{}' is not a valid element\".format(element)) from err\n\n for edge in Binding_energies.keys():\n relevance = Binding_energies[edge][\"relevance\"]\n energy = Binding_energies[edge][\"onset_energy (eV)\"]\n\n isInRel = relevance in only_edges\n isInRng = axis_min < energy < axis_max\n isSameFamily = ss in edge\n\n if memtype == \"element\":\n flag = isInRel & isInRng\n edge_key = element + \"_\" + edge\n elif memtype == \"edge\":\n flag = isInRng & (edge == ss)\n edge_key = member\n elif memtype == \"family\":\n flag = isInRel & isInRng & isSameFamily\n edge_key = element + \"_\" + edge\n\n if flag:\n names_and_energies[edge_key] = energy\n\n return names_and_energies\n\n def _remove_edge_labels(self, edge_names=None, render_figure=True):\n \"\"\"\n Remove EELS edges markers to the signal\n\n Parameters\n ----------\n edge_names : str, list of str or None\n The string must be the name of edges, e. g. 'Fe_L2'.\n If ``None`` (default), remove all edges.\n render_figure : bool\n If True, render the figure after adding the markers\n \"\"\"\n if edge_names is None:\n edge_names = self._edge_markers[\"names\"]\n if isinstance(edge_names, set):\n # convert to list to find the index\n edge_names = list(edge_names)\n if not isinstance(edge_names, (list, tuple, np.ndarray)):\n edge_names = [edge_names]\n\n ind = np.where(np.isin(self._edge_markers[\"names\"], edge_names))\n\n if self._edge_markers[\"lines\"] is not None:\n self._edge_markers[\"lines\"].remove_items(ind)\n if self._edge_markers[\"texts\"] is not None:\n self._edge_markers[\"texts\"].remove_items(ind)\n if self._edge_markers[\"names\"] is not []:\n self._edge_markers[\"names\"] = np.delete(self._edge_markers[\"names\"], ind)\n\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])\n\n def _add_edge_labels(self, edges, render_figure=True):\n \"\"\"\n Add EELS edges markers to the signal\n\n Parameters\n ----------\n edge_name : dictionary or set\n If dictionary must be the name of edge as key and energy as values,\n e.g. {'Cr_L2': 584.0}. If list or set, must the name of the edge,\n e.g. set('Cr_L2', )\n render_figure : bool\n If True, render the figure after adding the markers\n \"\"\"\n if isinstance(edges, set):\n edges_dict = {}\n for edge in edges:\n element, ss = edge.split(\"_\")\n Binding_energies = elements_db[element][\"Atomic_properties\"][\n \"Binding_energies\"\n ]\n edges_dict[edge] = Binding_energies[ss][\"onset_energy (eV)\"]\n edges = edges_dict\n\n offsets, segments = self._get_offsets_and_segments(edges)\n names = list(edges.keys())\n\n self._edge_markers[\"lines\"].add_items(segments=segments)\n self._edge_markers[\"lines\"].update()\n self._edge_markers[\"texts\"].add_items(offsets=offsets, texts=names)\n self._edge_markers[\"lines\"].update()\n self._edge_markers[\"names\"] = np.append(self._edge_markers[\"names\"], names)\n\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])\n\n def _get_complementary_edges(self, edges, only_major=False):\n \"\"\"\n Get other edges of the same element present within the energy\n range of the axis\n\n Parameters\n ----------\n edges : iterable\n A sequence of strings contains edges in the format of\n element_subshell for EELS. For example, ['Fe_L2', 'O_K']\n only_major : bool\n Whether to show only the major edges. The default is False.\n\n Returns\n -------\n complmt_edges : list\n A list containing all the complementary edges of the same element\n present within the energy range of the axis\n \"\"\"\n\n emin = self.axes_manager[-1].low_value\n emax = self.axes_manager[-1].high_value\n complmt_edges = []\n\n elements = set()\n for edge in edges:\n element, _ = edge.split(\"_\")\n elements.update([element])\n\n for element in elements:\n ss_info = elements_db[element][\"Atomic_properties\"][\"Binding_energies\"]\n\n for subshell in ss_info:\n sse = ss_info[subshell][\"onset_energy (eV)\"]\n ssr = ss_info[subshell][\"relevance\"]\n\n if only_major:\n if ssr != \"Major\":\n continue\n\n edge = element + \"_\" + subshell\n if (\n (emin <= sse <= emax)\n and (subshell[-1] != \"a\")\n and (edge not in edges)\n ):\n complmt_edges.append(edge)\n\n return complmt_edges\n\n def rebin(self, new_shape=None, scale=None, crop=True, dtype=None, out=None):\n factors = self._validate_rebin_args_and_get_factors(\n new_shape=new_shape, scale=scale\n )\n m = super().rebin(\n new_shape=new_shape, scale=scale, crop=crop, dtype=dtype, out=out\n )\n m = out or m\n time_factor = np.prod(\n [factors[axis.index_in_array] for axis in m.axes_manager.navigation_axes]\n )\n mdeels = m.metadata\n m.get_dimensions_from_data()\n if m.metadata.get_item(\"Acquisition_instrument.TEM.Detector.EELS\"):\n mdeels = m.metadata.Acquisition_instrument.TEM.Detector.EELS\n if \"dwell_time\" in mdeels:\n mdeels.dwell_time *= time_factor\n if \"exposure\" in mdeels:\n mdeels.exposure *= time_factor\n else:\n _logger.info(\n \"No dwell_time could be found in the metadata so \"\n \"this has not been updated.\"\n )\n if out is None:\n return m\n else:\n out.events.data_changed.trigger(obj=out)\n return m\n\n rebin.__doc__ = hyperspy.signal.BaseSignal.rebin.__doc__\n\n def vacuum_mask(\n self, threshold=10.0, start_energy=None, closing=True, opening=False\n ):\n \"\"\"\n Generate mask of the vacuum region\n\n Parameters\n ----------\n threshold: float\n For a given navigation coordinate, mean value in the energy axis\n below which the pixel is considered as vacuum.\n start_energy: float, None\n Minimum energy included in the calculation of the mean intensity.\n If None, consider only the last quarter of the spectrum to\n calculate the mask.\n closing: bool\n If True, a morphological closing is applied to the mask.\n opening: bool\n If True, a morphological opening is applied to the mask.\n\n Returns\n -------\n mask: signal\n The mask of the region.\n \"\"\"\n if self.axes_manager.navigation_dimension == 0:\n raise RuntimeError(\n \"Navigation dimenstion must be higher than 0 \"\n \"to estimate a vacuum mask.\"\n )\n signal_axis = self.axes_manager.signal_axes[0]\n if start_energy is None:\n start_energy = 0.75 * signal_axis.high_value\n\n mask = self.isig[start_energy:].mean(-1) <= threshold\n\n from scipy.ndimage import binary_dilation, binary_erosion\n\n if closing:\n mask.data = binary_dilation(mask.data, border_value=0)\n mask.data = binary_erosion(mask.data, border_value=1)\n if opening:\n mask.data = binary_erosion(mask.data, border_value=1)\n mask.data = binary_dilation(mask.data, border_value=0)\n return mask" } ]
import contextlib import io import numpy as np import pooch import pytest import hyperspy.api as hs from unittest import mock from exspy.misc.elements import elements_db as elements from hyperspy.decorators import lazifyTestClass from exspy.misc.eels.gosh_gos import _GOSH_URL, _GOSH_KNOWN_HASH from exspy.signals import EELSSpectrum from exspy.models.eelsmodel import EELSModel from hyperspy.components1d import PowerLaw from hyperspy.components1d import PowerLaw
18,768
# -*- coding: utf-8 -*- # Copyright 2007-2023 The exSpy developers # # This file is part of exSpy. # # exSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # exSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with exSpy. If not, see <https://www.gnu.org/licenses/#GPL>. # Dask does not always work nicely with np.errstate, # see: https://github.com/dask/dask/issues/3245, so # filter out divide-by-zero warnings that only appear # when the test is lazy. When the test is not lazy, # internal use of np.errstate means the warnings never # appear in the first place. @pytest.mark.filterwarnings( "ignore:invalid value encountered in subtract:RuntimeWarning" ) @pytest.mark.filterwarnings("ignore:divide by zero encountered in log:RuntimeWarning") @lazifyTestClass class TestCreateEELSModel: def setup_method(self, method):
# -*- coding: utf-8 -*- # Copyright 2007-2023 The exSpy developers # # This file is part of exSpy. # # exSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # exSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with exSpy. If not, see <https://www.gnu.org/licenses/#GPL>. # Dask does not always work nicely with np.errstate, # see: https://github.com/dask/dask/issues/3245, so # filter out divide-by-zero warnings that only appear # when the test is lazy. When the test is not lazy, # internal use of np.errstate means the warnings never # appear in the first place. @pytest.mark.filterwarnings( "ignore:invalid value encountered in subtract:RuntimeWarning" ) @pytest.mark.filterwarnings("ignore:divide by zero encountered in log:RuntimeWarning") @lazifyTestClass class TestCreateEELSModel: def setup_method(self, method):
s = EELSSpectrum(np.zeros(200))
3
2023-10-28 20:04:10+00:00
24k
Elfenreigen/UniChest
train.py
[ { "identifier": "utils", "path": "factory/utils.py", "snippet": "class SmoothedValue(object):\nclass MetricLogger(object):\nclass AttrDict(dict):\n def __init__(self, window_size=20, fmt=None):\n def update(self, value, n=1):\n def synchronize_between_processes(self):\n def median(self):\n def avg(self):\n def global_avg(self):\n def max(self):\n def value(self):\n def __str__(self):\n def __init__(self, delimiter=\"\\t\"):\n def update(self, **kwargs):\n def __getattr__(self, attr):\n def __str__(self):\n def global_avg(self):\n def synchronize_between_processes(self):\n def add_meter(self, name, meter):\n def log_every(self, iterable, print_freq, header=None):\n def __init__(self, *args, **kwargs):\ndef compute_acc(logits, label, reduction='mean'):\ndef compute_n_params(model, return_str=True):\ndef setup_for_distributed(is_master):\n def print(*args, **kwargs):\ndef seed_worker(worker_id):\ndef is_dist_avail_and_initialized():\ndef get_world_size():\ndef get_rank():\ndef is_main_process():\ndef save_on_master(*args, **kwargs):\ndef init_distributed_mode(args):\n MB = 1024.0 * 1024.0" }, { "identifier": "create_scheduler", "path": "scheduler/scheduler_factory.py", "snippet": "def create_scheduler(args, optimizer):\n num_epochs = args.epochs\n\n if getattr(args, 'lr_noise', None) is not None:\n lr_noise = getattr(args, 'lr_noise')\n if isinstance(lr_noise, (list, tuple)):\n noise_range = [n * num_epochs for n in lr_noise]\n if len(noise_range) == 1:\n noise_range = noise_range[0]\n else:\n noise_range = lr_noise * num_epochs\n else:\n noise_range = None\n\n lr_scheduler = None\n if args.sched == 'cosine':\n lr_scheduler = CosineLRScheduler(\n optimizer,\n t_initial=num_epochs,\n t_mul=getattr(args, 'lr_cycle_mul', 1.),\n lr_min=args.min_lr,\n decay_rate=args.decay_rate,\n warmup_lr_init=args.warmup_lr,\n warmup_t=args.warmup_epochs,\n cycle_limit=getattr(args, 'lr_cycle_limit', 1),\n t_in_epochs=True,\n noise_range_t=noise_range,\n noise_pct=getattr(args, 'lr_noise_pct', 0.67),\n noise_std=getattr(args, 'lr_noise_std', 1.),\n noise_seed=getattr(args, 'seed', 42),\n )\n num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs\n elif args.sched == 'tanh':\n lr_scheduler = TanhLRScheduler(\n optimizer,\n t_initial=num_epochs,\n t_mul=getattr(args, 'lr_cycle_mul', 1.),\n lr_min=args.min_lr,\n warmup_lr_init=args.warmup_lr,\n warmup_t=args.warmup_epochs,\n cycle_limit=getattr(args, 'lr_cycle_limit', 1),\n t_in_epochs=True,\n noise_range_t=noise_range,\n noise_pct=getattr(args, 'lr_noise_pct', 0.67),\n noise_std=getattr(args, 'lr_noise_std', 1.),\n noise_seed=getattr(args, 'seed', 42),\n )\n num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs\n elif args.sched == 'step':\n lr_scheduler = StepLRScheduler(\n optimizer,\n decay_t=args.decay_epochs,\n decay_rate=args.decay_rate,\n warmup_lr_init=args.warmup_lr,\n warmup_t=args.warmup_epochs,\n noise_range_t=noise_range,\n noise_pct=getattr(args, 'lr_noise_pct', 0.67),\n noise_std=getattr(args, 'lr_noise_std', 1.),\n noise_seed=getattr(args, 'seed', 42),\n )\n elif args.sched == 'plateau':\n mode = 'min' if 'loss' in getattr(args, 'eval_metric', '') else 'max'\n lr_scheduler = PlateauLRScheduler(\n optimizer,\n decay_rate=args.decay_rate,\n patience_t=args.patience_epochs,\n lr_min=args.min_lr,\n mode=mode,\n warmup_lr_init=args.warmup_lr,\n warmup_t=args.warmup_epochs,\n cooldown_t=0,\n noise_range_t=noise_range,\n noise_pct=getattr(args, 'lr_noise_pct', 0.67),\n noise_std=getattr(args, 'lr_noise_std', 1.),\n noise_seed=getattr(args, 'seed', 42),\n )\n\n return lr_scheduler, num_epochs" }, { "identifier": "create_optimizer", "path": "optim/optim_factory.py", "snippet": "def create_optimizer(args, model, image_encoder,text_encoder, filter_bias_and_bn=True):\n opt_lower = args.opt.lower()\n weight_decay = args.weight_decay\n if weight_decay and filter_bias_and_bn:\n skip = {}\n if hasattr(model, 'no_weight_decay'):\n skip = model.no_weight_decay()\n parameters = add_weight_decay(model,image_encoder,text_encoder, weight_decay, skip)\n weight_decay = 0.\n else:\n parameters = [filter(lambda p: p.requires_grad, model.parameters()),filter(lambda p: p.requires_grad, image_encoder.parameters()),filter(lambda p: p.requires_grad, text_encoder.parameters())]\n #model.parameters()\n\n # print(parameters)\n if 'fused' in opt_lower:\n assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'\n\n opt_args = dict(lr=args.lr, weight_decay=weight_decay)\n if hasattr(args, 'opt_eps') and args.opt_eps is not None:\n opt_args['eps'] = args.opt_eps\n if hasattr(args, 'opt_betas') and args.opt_betas is not None:\n opt_args['betas'] = args.opt_betas\n if hasattr(args, 'opt_args') and args.opt_args is not None:\n opt_args.update(args.opt_args)\n\n opt_split = opt_lower.split('_')\n opt_lower = opt_split[-1]\n if opt_lower == 'sgd' or opt_lower == 'nesterov':\n opt_args.pop('eps', None)\n optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)\n elif opt_lower == 'momentum':\n opt_args.pop('eps', None)\n optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)\n elif opt_lower == 'adam':\n optimizer = optim.Adam(parameters, **opt_args)\n elif opt_lower == 'adamw':\n optimizer = optim.AdamW(parameters, **opt_args)\n elif opt_lower == 'nadam':\n optimizer = Nadam(parameters, **opt_args)\n elif opt_lower == 'radam':\n optimizer = RAdam(parameters, **opt_args)\n elif opt_lower == 'adamp': \n optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)\n elif opt_lower == 'sgdp': \n optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args)\n elif opt_lower == 'adadelta':\n optimizer = optim.Adadelta(parameters, **opt_args)\n elif opt_lower == 'adafactor':\n if not args.lr:\n opt_args['lr'] = None\n optimizer = Adafactor(parameters, **opt_args)\n elif opt_lower == 'adahessian':\n optimizer = Adahessian(parameters, **opt_args)\n elif opt_lower == 'rmsprop':\n optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args)\n elif opt_lower == 'rmsproptf':\n optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args)\n elif opt_lower == 'novograd':\n optimizer = NovoGrad(parameters, **opt_args)\n elif opt_lower == 'nvnovograd':\n optimizer = NvNovoGrad(parameters, **opt_args)\n elif opt_lower == 'fusedsgd':\n opt_args.pop('eps', None)\n optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)\n elif opt_lower == 'fusedmomentum':\n opt_args.pop('eps', None)\n optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)\n elif opt_lower == 'fusedadam':\n optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)\n elif opt_lower == 'fusedadamw':\n optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)\n elif opt_lower == 'fusedlamb':\n optimizer = FusedLAMB(parameters, **opt_args)\n elif opt_lower == 'fusednovograd':\n opt_args.setdefault('betas', (0.95, 0.98))\n optimizer = FusedNovoGrad(parameters, **opt_args)\n else:\n assert False and \"Invalid optimizer\"\n raise ValueError\n\n if len(opt_split) > 1:\n if opt_split[0] == 'lookahead':\n optimizer = Lookahead(optimizer)\n\n return optimizer" }, { "identifier": "train", "path": "engine/train.py", "snippet": "def train(model, image_encoder, text_encoder, tokenizer, data_loader, optimizer, epoch, warmup_steps, device, scheduler, args, config, writer):\n clip_loss = ClipLoss()\n ce_loss = nn.CrossEntropyLoss(ignore_index=-1)\n \n if args.add_dataset:\n ASL_loss = AsymmetricLossAdd(gamma_neg=6, gamma_pos=0, clip=0.05, disable_torch_grad_focal_loss=True)\n else:\n ASL_loss = AsymmetricLoss(gamma_neg=6, gamma_pos=0, clip=0.05, disable_torch_grad_focal_loss=True)\n\n loss_m = AverageMeter()\n loss_clip_m = AverageMeter()\n loss_ce_m = AverageMeter()\n loss_ce_image_m = AverageMeter()\n loss_ce_text_m = AverageMeter()\n batch_time_m = AverageMeter()\n data_time_m = AverageMeter()\n end = time.time()\n\n model.train() \n image_encoder.train() \n text_encoder.train()\n metric_logger = utils.MetricLogger(delimiter=\" \")\n metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))\n metric_logger.add_meter('loss', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))\n metric_logger.add_meter('loss_ce', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))\n metric_logger.add_meter('loss_ce_image', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))\n if args.use_entity_features:\n metric_logger.add_meter('loss_ce_text', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))\n metric_logger.add_meter('loss_clip', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))\n metric_logger.update(loss=1.0)\n metric_logger.update(lr = scheduler._get_lr(epoch)[0])\n\n header = 'Train Epoch: [{}]'.format(epoch)\n print_freq = 50 \n step_size = 100\n warmup_iterations = warmup_steps*step_size \n scalar_step = epoch*len(data_loader)\n num_batches_per_epoch = data_loader.num_batches\n sample_digits = math.ceil(math.log(data_loader.num_samples + 1, 10))\n\n for i, sample in enumerate(metric_logger.log_every(data_loader, print_freq, header)):\n if args.fourier:\n image = fourier_aug(sample['image'].to(device))\n else:\n image = sample['image'].to(device) \n label = sample['label'].long().to(device)\n\n if args.ignore_index:\n pass\n else:\n label[label==-1]=0\n entity = sample['entity']\n\n if args.add_dataset:\n dataset_label = sample['label_dataset']\n\n data_time_m.update(time.time() - end)\n\n optimizer.zero_grad()\n\n if args.add_dataset:\n text_list = ['normal', 'pleural effusion', 'opacity', 'pneumothorax', 'edema', 'atelectasis', 'tube', 'consolidation','enlarged cardiomediastinum','tip', 'pneumonia','line','cardiomegaly', 'fracture','calcification',\n 'device','engorgement', 'nodule', 'wire', 'pacemaker', 'pleural thicken', 'marking', 'scar', 'hyperinflate', 'blunt', 'collapse', 'emphysema', 'aerate', 'mass','infiltration', 'obscure', 'deformity', 'hernia',\n 'drainage', 'distention', 'shift', 'stent', 'lesion', 'hardware', 'dilation', 'aspiration',\n 'fibrosis',\t'No Finding', 'Pleural Other', 'Support Devices', 'Aortic enlargement',\n 'Clavicle fracture', 'Enlarged PA', 'ILD', 'Lung cavity', 'Lung cyst', 'Mediastinal shift',\t\n 'Nodule/Mass', 'Pulmonary fibrosis', 'Rib fracture', 'Other lesion', 'COPD', 'Lung tumor', 'Tuberculosis',\n 'Other diseases']\n\n else:\n\n text_list = ['normal', 'pleural effusion', 'opacity', 'pneumothorax', 'edema', 'atelectasis', 'tube', 'consolidation','enlarged cardiomediastinum','tip', 'pneumonia','line','cardiomegaly', 'fracture','calcification',\n 'device','engorgement', 'nodule', 'wire', 'pacemaker', 'pleural thicken', 'marking', 'scar', 'hyperinflate', 'blunt', 'collapse', 'emphysema', 'aerate', 'mass','infiltration', 'obscure', 'deformity', 'hernia',\n 'drainage', 'distention', 'shift', 'stent', 'lesion', 'hardware', 'dilation', 'aspiration']\n \n \n text_features = get_text_features(text_encoder,text_list,tokenizer,device,max_length=args.max_length)\n entity_features = get_text_features(text_encoder,entity,tokenizer,device,max_length=args.max_length)\n\n image_features,image_features_pool = image_encoder(image)\n if args.add_dataset:\n pred_class_image, moe_img = model(image_features,text_features,args)\n else:\n pred_class_image = model(image_features,text_features)\n\n\n if args.bce or args.asl:\n label = label.float()\n\n label_mask = (label != -1).squeeze()\n\n\n\n if args.add_dataset:\n loss_moe_img = moe_cl_loss(moe_img, dataset_label)\n\n if args.asl:\n pred_class_image = pred_class_image[label_mask]\n label_image = label[label_mask] \n loss_ce_image = ASL_loss(pred_class_image.view(-1,1),label_image.view(-1,1))\n elif args.bce:\n pred_class_image = pred_class_image[label_mask]\n label_image = label[label_mask] \n loss_ce_image = F.binary_cross_entropy(pred_class_image.view(-1,1),label_image.view(-1,1))\n else:\n if args.asl:\n loss_ce_image = ASL_loss(pred_class_image.view(-1,1),label.view(-1,1))\n elif args.bce:\n loss_ce_image = F.binary_cross_entropy_with_logits(pred_class_image.view(-1,1),label.view(-1,1)) \n else:\n loss_ce_image = ce_loss(pred_class_image.view(-1,2),label.view(-1)) \n\n if args.use_entity_features:\n if args.add_dataset:\n pred_class_text, moe_txt = model(entity_features.unsqueeze(1),text_features,args)\n loss_moe_txt = moe_cl_loss(moe_txt, dataset_label)\n else:\n pred_class_text = model(entity_features.unsqueeze(1),text_features)\n\n if args.add_dataset:\n if args.asl:\n pred_class_text = pred_class_text[label_mask]\n label_text = label[label_mask] \n loss_ce_text = ASL_loss(pred_class_text.view(-1,1),label_text.view(-1,1))\n \n elif args.bce:\n pred_class_text = pred_class_text[label_mask]\n label_text = label[label_mask] \n loss_ce_text = F.binary_cross_entropy(pred_class_text.view(-1,1),label_text.view(-1,1))\n\n else:\n if args.asl:\n loss_ce_text = ASL_loss(pred_class_text.view(-1,1),label.view(-1,1))\n elif args.bce:\n loss_ce_text = F.binary_cross_entropy_with_logits(pred_class_text.view(-1,1),label.view(-1,1)) \n else:\n loss_ce_text = ce_loss(pred_class_text.view(-1,2),label.view(-1))\n\n loss_ce = loss_ce_image + loss_ce_text\n if args.add_dataset:\n loss_moe = loss_moe_img + loss_moe_txt\n\n else:\n loss_ce = loss_ce_image\n if args.add_dataset:\n loss_moe = loss_moe_img\n\n\n loss_clip = clip_loss(image_features_pool,entity_features)\n if args.add_dataset:\n loss = loss_ce + loss_clip * args.loss_ratio + args.moe_ratio * loss_moe\n else:\n loss = loss_ce + loss_clip * args.loss_ratio\n \n\n loss.backward()\n optimizer.step() \n \n writer.add_scalar('loss/loss', loss, scalar_step)\n writer.add_scalar('loss/loss_ce', loss_ce, scalar_step)\n writer.add_scalar('loss/loss_ce_image', loss_ce_image, scalar_step)\n if args.use_entity_features:\n writer.add_scalar('loss/loss_ce_text', loss_ce_text, scalar_step)\n writer.add_scalar('loss/loss_clip', loss_clip, scalar_step)\n scalar_step += 1\n\n metric_logger.update(loss=loss.item())\n metric_logger.update(loss_ce=loss_ce.item())\n metric_logger.update(loss_ce_image=loss_ce_image.item())\n if args.use_entity_features:\n metric_logger.update(loss_ce_text=loss_ce_text.item())\n metric_logger.update(loss_clip=loss_clip.item())\n\n\n if epoch==0 and i%step_size==0 and i<=warmup_iterations: \n scheduler.step(i//step_size) \n metric_logger.update(lr = scheduler._get_lr(epoch)[0])\n\n batch_time_m.update(time.time() - end)\n end = time.time()\n batch_count = i + 1\n if i % 100 == 0:\n batch_size = len(image)\n num_samples = batch_count * batch_size\n samples_per_epoch = data_loader.num_samples\n percent_complete = 100.0 * batch_count / num_batches_per_epoch\n\n # NOTE loss is coarsely sampled, just master node and per log update\n loss_m.update(loss.item(), batch_size)\n loss_clip_m.update(loss_clip.item(), batch_size)\n loss_ce_m.update(loss_ce.item(), batch_size)\n loss_ce_image_m.update(loss_ce_image.item(), batch_size)\n if args.use_entity_features:\n loss_ce_text_m.update(loss_ce_text.item(), batch_size)\n logging.info(\n f\"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] \"\n f\"Loss: {loss_m.val:#.5g} ({loss_m.avg:#.4g}) \"\n f\"Loss_clip: {loss_clip_m.val:#.5g} ({loss_clip_m.avg:#.4g}) \"\n f\"Loss_ce: {loss_ce_m.val:#.5g} ({loss_ce_m.avg:#.4g}) \"\n f\"Loss_ce_image: {loss_ce_image_m.val:#.5g} ({loss_ce_image_m.avg:#.4g}) \"\n f\"Loss_ce_text: {loss_ce_text_m.val:#.5g} ({loss_ce_text_m.avg:#.4g}) \"\n f\"Data (t): {data_time_m.avg:.3f} \"\n f\"Batch (t): {batch_time_m.avg:.3f}, {batch_size/ batch_time_m.val:#g}/s \"\n f\"LR: { scheduler._get_lr(epoch)[0]:5f} \"\n )\n else:\n logging.info(\n f\"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] \"\n f\"Loss: {loss_m.val:#.5g} ({loss_m.avg:#.4g}) \"\n f\"Loss_clip: {loss_clip_m.val:#.5g} ({loss_clip_m.avg:#.4g}) \"\n f\"Loss_ce: {loss_ce_m.val:#.5g} ({loss_ce_m.avg:#.4g}) \"\n f\"Loss_ce_image: {loss_ce_image_m.val:#.5g} ({loss_ce_image_m.avg:#.4g}) \"\n f\"Data (t): {data_time_m.avg:.3f} \"\n f\"Batch (t): {batch_time_m.avg:.3f}, {batch_size/ batch_time_m.val:#g}/s \"\n f\"LR: { scheduler._get_lr(epoch)[0]:5f} \"\n )\n\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print(\"Averaged stats:\", metric_logger.global_avg()) \n return {k: \"{:.6f}\".format(meter.global_avg) for k, meter in metric_logger.meters.items()} #,loss_epoch.mean()" }, { "identifier": "valid_on_cheXpert", "path": "engine/train.py", "snippet": "def valid_on_cheXpert(model,image_encoder,text_encoder,tokenizer,data_loader, epoch, device, args, config, writer):\n criterion = nn.CrossEntropyLoss()\n model.eval()\n image_encoder.eval()\n text_encoder.eval()\n text_list = ['atelectasis', 'cardiomegaly', 'consolidation', 'edema', 'pleural effusion']\n text_features = get_text_features(text_encoder,text_list,tokenizer,device,max_length=args.max_length)\n \n val_scalar_step = epoch*len(data_loader)\n val_losses = []\n\n # initialize the ground truth and output tensor\n gt = torch.FloatTensor()\n gt = gt.cuda()\n pred = torch.FloatTensor()\n pred = pred.cuda()\n\n for i, sample in enumerate(data_loader):\n image = sample['image'].to(device,non_blocking=True) \n label = sample['label'].long().to(device)\n if args.bce or args.asl:\n label = label.float()\n\n gt = torch.cat((gt, label), 0)\n with torch.no_grad():\n image_features,image_features_pool = image_encoder(image)\n \n # \n if args.add_dataset:\n pred_class,_ = model(image_features,text_features,args)#b,14,2/1\n val_loss = F.binary_cross_entropy(pred_class.view(-1,1),label.view(-1, 1))\n pred = torch.cat((pred, pred_class[:,:,0]), 0)\n else:\n pred_class = model(image_features,text_features)#b,14,2/1\n if args.bce or args.asl:\n val_loss = F.binary_cross_entropy_with_logits(pred_class.view(-1,1),label.view(-1, 1))\n pred_class = torch.sigmoid(pred_class)\n pred = torch.cat((pred, pred_class[:,:,0]), 0)\n else:\n val_loss = criterion(pred_class.view(-1,2),label.view(-1))\n pred_class = torch.softmax(pred_class, dim=-1)\n pred = torch.cat((pred, pred_class[:,:,1]), 0)\n \n val_losses.append(val_loss.item())\n writer.add_scalar('val_loss/loss', val_loss, val_scalar_step)\n val_scalar_step += 1\n metrics = compute_AUCs(gt, pred, n_class=5)\n AUROC_avg = metrics['mean_auc']\n avg_val_loss = np.array(val_losses).mean()\n return avg_val_loss,AUROC_avg,metrics" }, { "identifier": "valid_on_chestxray14", "path": "engine/train.py", "snippet": "def valid_on_chestxray14(model, image_encoder, text_encoder, tokenizer, data_loader, epoch, device, args, config, writer):\n criterion = nn.CrossEntropyLoss()\n model.eval()\n image_encoder.eval()\n text_encoder.eval()\n text_list = [\"atelectasis\",\"cardiomegaly\",\"pleural effusion\",\"infiltration\",\"lung mass\",\"lung nodule\",\"pneumonia\",\"pneumothorax\",\"consolidation\",\"edema\",\"emphysema\",\"fibrosis\",\"pleural thicken\",\"hernia\"]\n text_features = get_text_features(text_encoder,text_list,tokenizer,device,max_length=args.max_length)\n \n val_scalar_step = epoch*len(data_loader)\n val_losses = []\n\n gt = torch.FloatTensor()\n gt = gt.cuda()\n pred = torch.FloatTensor()\n pred = pred.cuda()\n\n for i, sample in enumerate(data_loader):\n image = sample['image'].to(device,non_blocking=True) \n label = sample['label'].long().to(device)\n if args.bce or args.asl:\n label = label.float()\n\n gt = torch.cat((gt, label), 0)\n with torch.no_grad():\n image_features,image_features_pool = image_encoder(image)\n\n if args.add_dataset:\n pred_class,_ = model(image_features,text_features,args)#b,14,2/1\n val_loss = F.binary_cross_entropy(pred_class.view(-1,1),label.view(-1, 1))\n pred = torch.cat((pred, pred_class[:,:,0]), 0)\n else:\n pred_class = model(image_features,text_features)#b,14,2/1\n if args.bce or args.asl:\n val_loss = F.binary_cross_entropy_with_logits(pred_class.view(-1,1),label.view(-1, 1))\n pred_class = torch.sigmoid(pred_class)\n pred = torch.cat((pred, pred_class[:,:,0]), 0)\n else:\n val_loss = criterion(pred_class.view(-1,2),label.view(-1))\n pred_class = torch.softmax(pred_class, dim=-1)\n pred = torch.cat((pred, pred_class[:,:,1]), 0)\n\n\n\n val_losses.append(val_loss.item())\n writer.add_scalar('val_loss/loss', val_loss, val_scalar_step)\n val_scalar_step += 1\n metrics = compute_AUCs(gt, pred, n_class = 14)\n AUROC_avg = metrics['mean_auc']\n avg_val_loss = np.array(val_losses).mean()\n return avg_val_loss,AUROC_avg,metrics" }, { "identifier": "CLP_clinical", "path": "models/clip_tqn.py", "snippet": "class CLP_clinical(nn.Module):\n def __init__(self,\n bert_model_name: str,\n embed_dim: int = 768,\n freeze_layers:Union[Tuple[int, int], int] = None):\n super().__init__()\n self.bert_model = self._get_bert_basemodel(bert_model_name=bert_model_name, freeze_layers=freeze_layers)\n self.mlp_embed = nn.Sequential(\n nn.Linear(embed_dim, embed_dim),\n nn.GELU(),\n nn.Linear(embed_dim, embed_dim)\n )\n self.embed_dim = embed_dim\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n self.init_parameters()\n \n def init_parameters(self):\n nn.init.constant_(self.logit_scale, np.log(1 / 0.07))\n for m in self.mlp_embed:\n if isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, std=self.embed_dim ** -0.5)\n\n def _get_bert_basemodel(self, bert_model_name, freeze_layers=None):#12\n try:\n print(bert_model_name)\n config = BertConfig.from_pretrained(bert_model_name, output_hidden_states=True)#bert-base-uncased\n model = AutoModel.from_pretrained(bert_model_name, config=config)#, return_dict=True)\n print(\"Text feature extractor:\", bert_model_name)\n print(\"bert encoder layers:\",len(model.encoder.layer))\n except:\n raise (\"Invalid model name. Check the config file and pass a BERT model from transformers lybrary\")\n\n if freeze_layers is not None:\n for layer_idx in freeze_layers:\n for param in list(model.encoder.layer[layer_idx].parameters()):\n param.requires_grad = False\n return model\n\n def encode_text(self, text):\n #input batch_size,token, return batch_size,dim \n output = self.bert_model(input_ids = text['input_ids'],attention_mask = text['attention_mask'] )\n last_hidden_state, pooler_output, hidden_states = output[0],output[1],output[2]\n encode_out = self.mlp_embed(pooler_output)\n # encode_out = pooler_output\n return encode_out\n \n def forward(self,text1,text2):\n text1_features = self.encode_text(text1)\n text2_features = self.encode_text(text2)\n text1_features = F.normalize(text1_features, dim=-1)\n text2_features = F.normalize(text2_features, dim=-1)\n return text1_features, text2_features, self.logit_scale.exp()" }, { "identifier": "ModelRes", "path": "models/clip_tqn.py", "snippet": "class ModelRes(nn.Module):\n def __init__(self, res_base_model):\n super(ModelRes, self).__init__()\n self.resnet_dict = {\"resnet50\": models.resnet50(pretrained=True)}\n self.resnet = self._get_res_basemodel(res_base_model)\n\n num_ftrs = int(self.resnet.fc.in_features)\n self.res_features = nn.Sequential(*list(self.resnet.children())[:-2])\n\n self.res_l1 = nn.Linear(num_ftrs, num_ftrs)\n self.res_l2 = nn.Linear(num_ftrs, 768)\n\n def _get_res_basemodel(self, res_model_name):\n try:\n res_model = self.resnet_dict[res_model_name]\n print(\"Image feature extractor:\", res_model_name)\n return res_model\n except:\n raise (\"Invalid model name. Check the config file and pass one of: resnet18 or resnet50\")\n\n def forward(self, img):\n batch_size = img.shape[0]\n res_fea = self.res_features(img)\n\n res_fea = rearrange(res_fea,'b d n1 n2 -> b (n1 n2) d')\n h = rearrange(res_fea,'b n d -> (b n) d')\n x = self.res_l1(h)\n x = F.relu(x)\n x = self.res_l2(x)\n out_emb = rearrange(x,'(b n) d -> b n d',b=batch_size)\n out_pool = torch.mean(out_emb,dim=1)\n return out_emb,out_pool" }, { "identifier": "TQN_Model", "path": "models/clip_tqn.py", "snippet": "class TQN_Model(nn.Module):\n def __init__(self, \n embed_dim: int = 768, \n class_num: int = 1, \n lam: list = [1, 0]\n ):\n super().__init__()\n self.d_model = embed_dim\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n decoder_layer = TransformerDecoderLayer(self.d_model, 4, 1024,\n 0.1, 'relu',normalize_before=True)\n decoder_layerV1 = TransformerDecoderLayerV1(self.d_model, 4, 1024,\n 0.1, 'relu', True, lam)\n self.decoder_norm = nn.LayerNorm(self.d_model)\n self.decoder = TransformerDecoder(decoder_layer, 4, self.decoder_norm,\n return_intermediate=False)\n self.decoderV1 = TransformerDecoderV1(decoder_layerV1, 4, self.decoder_norm,\n return_intermediate=False)\n \n self.dropout_feas = nn.Dropout(0.1)\n\n self.mlp_head = nn.Sequential( # nn.LayerNorm(768),\n nn.Linear(embed_dim, class_num)\n )\n self.apply(self._init_weights)\n \n @staticmethod\n def _init_weights(module):\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=0.02)\n\n elif isinstance(module, nn.MultiheadAttention):\n module.in_proj_weight.data.normal_(mean=0.0, std=0.02)\n module.out_proj.weight.data.normal_(mean=0.0, std=0.02)\n\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n \n def forward(self, image_features, text_features):\n\n batch_size = image_features.shape[0]\n image_features = image_features.transpose(0,1)\n text_features = text_features.unsqueeze(1).repeat(1, batch_size, 1)\n image_features = self.decoder_norm(image_features)\n text_features = self.decoder_norm(text_features)\n \n image_features_pool = torch.mean(image_features,dim=0).unsqueeze(0)\n features = self.decoderV1(text_features, image_features, image_features_pool,\n memory_key_padding_mask=None, pos=None, query_pos=None) \n \n features = self.dropout_feas(features).transpose(0,1) #b,embed_dim\n out = self.mlp_head(features) #(batch_size, query_num)\n return out" }, { "identifier": "TQN_Model_Add", "path": "models/clip_tqn.py", "snippet": "class TQN_Model_Add(nn.Module):\n def __init__(self, \n embed_dim: int = 768, \n class_num: int = 1, \n gate_num: int = 3,\n high_dim: int = 32,\n lam: list = [1, 0]\n ):\n super().__init__()\n self.d_model = embed_dim\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n decoder_layer = TransformerDecoderLayer(self.d_model, 4, 1024,\n 0.1, 'relu',normalize_before=True)\n decoder_layerV1 = TransformerDecoderLayerV1(self.d_model, 4, 1024,\n 0.1, 'relu', True, lam)\n self.decoder_norm = nn.LayerNorm(self.d_model)\n self.decoder = TransformerDecoder(decoder_layer, 4, self.decoder_norm,\n return_intermediate=False)\n self.decoderV1 = TransformerDecoderV1(decoder_layerV1, 4, self.decoder_norm,\n return_intermediate=False)\n \n self.decoderV1_1 = TransformerDecoderV1(decoder_layerV1, 4, self.decoder_norm,\n return_intermediate=False)\n self.decoderV1_2 = TransformerDecoderV1(decoder_layerV1, 4, self.decoder_norm,\n return_intermediate=False)\n self.decoderV1_3 = TransformerDecoderV1(decoder_layerV1, 4, self.decoder_norm,\n return_intermediate=False)\n\n self.dropout_feas = nn.Dropout(0.1)\n\n self.mlp_head = nn.Sequential( # nn.LayerNorm(768),\n nn.Linear(embed_dim, class_num)\n )\n self.mlp_head_1 = nn.Sequential( # nn.LayerNorm(768),\n nn.Linear(embed_dim, class_num)\n )\n self.mlp_head_2 = nn.Sequential( # nn.LayerNorm(768),\n nn.Linear(embed_dim, class_num)\n )\n self.mlp_head_3 = nn.Sequential( # nn.LayerNorm(768),\n nn.Linear(embed_dim, class_num)\n ) \n \n self.gate_head = nn.Sequential(\n nn.Linear(embed_dim, gate_num)\n )\n self.cl_head = nn.Sequential(\n nn.Linear(gate_num, high_dim)\n )\n\n self.apply(self._init_weights)\n \n @staticmethod\n def _init_weights(module):\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=0.02)\n\n elif isinstance(module, nn.MultiheadAttention):\n module.in_proj_weight.data.normal_(mean=0.0, std=0.02)\n module.out_proj.weight.data.normal_(mean=0.0, std=0.02)\n\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n \n def forward(self, image_features, text_features, args):\n\n batch_size = image_features.shape[0]\n image_features = image_features.transpose(0,1)\n text_features = text_features.unsqueeze(1).repeat(1, batch_size, 1)\n image_features = self.decoder_norm(image_features)\n text_features = self.decoder_norm(text_features)\n \n image_features_pool = torch.mean(image_features,dim=0).unsqueeze(0)\n features = self.decoderV1(text_features, image_features, image_features_pool,\n memory_key_padding_mask=None, pos=None, query_pos=None)\n gate_weight = self.gate_head(image_features_pool.squeeze(0)) \n \n features = self.dropout_feas(features).transpose(0,1) #b,embed_dim\n \n \n if args.finetune:\n features_1 = self.decoderV1_1(text_features, image_features, image_features_pool,\n memory_key_padding_mask=None, pos=None, query_pos=None)\n features_1 = self.dropout_feas(features_1).transpose(0,1) \n features_2 = self.decoderV1_2(text_features, image_features, image_features_pool,\n memory_key_padding_mask=None, pos=None, query_pos=None)\n features_2 = self.dropout_feas(features_2).transpose(0,1) \n features_3 = self.decoderV1_3(text_features, image_features, image_features_pool,\n memory_key_padding_mask=None, pos=None, query_pos=None)\n features_3 = self.dropout_feas(features_3).transpose(0,1) \n \n out_1 = torch.sigmoid(self.mlp_head_1(features_1))\n out_2 = torch.sigmoid(self.mlp_head_2(features_2))\n out_3 = torch.sigmoid(self.mlp_head_3(features_3))\n\n\n out = self.mlp_head(features)\n \n gate_weight = torch.softmax(gate_weight, dim=1)\n out = torch.sigmoid(out)\n\n high_dimension = self.cl_head(gate_weight)\n out_bias = gate_weight[:,0].unsqueeze(1).unsqueeze(2) * out_1 + gate_weight[:,1].unsqueeze(1).unsqueeze(2) * out_2 + gate_weight[:,2].unsqueeze(1).unsqueeze(2) * out_3\n\n out = args.main_ratio * out + args.bias_ratio * out_bias\n\n return out, high_dimension" }, { "identifier": "ModelDense", "path": "models/clip_tqn.py", "snippet": "class ModelDense(nn.Module):\n def __init__(self, dense_base_model):\n super(ModelDense, self).__init__()\n \n self.densenet_dict = {\"densenet121\": models.densenet121(pretrained=True)}#,\n # \"densenet161\": models.densenet161(pretrained=True)}\n self.densenet = self._get_dense_basemodel(dense_base_model)\n num_ftrs = int(self.densenet.classifier.in_features)\n self.dense_features = self.densenet.features\n self.dense_l1 = nn.Linear(num_ftrs, num_ftrs)\n self.dense_l2 = nn.Linear(num_ftrs, 768)\n\n def _get_dense_basemodel(self, dense_base_model):\n try:\n dense_model = self.densenet_dict[dense_base_model]\n print(\"Image feature extractor:\", dense_base_model)\n return dense_model\n except:\n raise (\"Invalid model name. Check the config file and pass one of: densenet121 or densenet161\")\n\n def forward(self, img):\n batch_size = img.shape[0]\n dense_fea = self.dense_features(img)#N, 1024, 7,7\n dense_fea = rearrange(dense_fea,'b d n1 n2 -> b (n1 n2) d')\n h = rearrange(dense_fea,'b n d -> (b n) d')\n x = self.dense_l1(h)\n x = F.relu(x)\n x = self.dense_l2(x)\n out_emb = rearrange(x,'(b n) d -> b n d',b=batch_size)\n out_pool = torch.mean(out_emb,dim=1)\n return out_emb,out_pool" }, { "identifier": "CLP_clinical2", "path": "models/clip_tqn.py", "snippet": "class CLP_clinical2(nn.Module):\n def __init__(self,\n bert_model_name: str,\n embed_dim: int = 768,\n freeze_layers:Union[Tuple[int, int], int] = None):\n super().__init__()\n self.bert_model = self._get_bert_basemodel(bert_model_name=bert_model_name, freeze_layers=freeze_layers)\n\n\n def _get_bert_basemodel(self, bert_model_name, freeze_layers=None):#12\n try:\n print(bert_model_name)\n model = AutoModel.from_pretrained(bert_model_name)\n print(\"Text feature extractor:\", bert_model_name)\n print(\"bert encoder layers:\",len(model.encoder.layer))\n except:\n raise (\"Invalid model name. Check the config file and pass a BERT model from transformers lybrary\")\n\n if freeze_layers is not None:\n for layer_idx in freeze_layers:\n for param in list(model.encoder.layer[layer_idx].parameters()):\n param.requires_grad = False\n return model\n\n def encode_text(self, text):\n output = self.bert_model(input_ids = text['input_ids'],attention_mask = text['attention_mask'] )\n encode_out = output.last_hidden_state[:,0,:]\n return encode_out\n \n def forward(self,text1,text2):\n text1_features = self.encode_text(text1)\n text2_features = self.encode_text(text2)\n text1_features = F.normalize(text1_features, dim=-1)\n text2_features = F.normalize(text2_features, dim=-1)\n return text1_features, text2_features, self.logit_scale.exp()" }, { "identifier": "BertTokenizer", "path": "models/tokenization_bert.py", "snippet": "class BertTokenizer(PreTrainedTokenizer):\n r\"\"\"\n Construct a BERT tokenizer. Based on WordPiece.\n This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods.\n Users should refer to this superclass for more information regarding those methods.\n Args:\n vocab_file (:obj:`str`):\n File containing the vocabulary.\n do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not to lowercase the input when tokenizing.\n do_basic_tokenize (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not to do basic tokenization before WordPiece.\n never_split (:obj:`Iterable`, `optional`):\n Collection of tokens which will never be split during tokenization. Only has an effect when\n :obj:`do_basic_tokenize=True`\n unk_token (:obj:`str`, `optional`, defaults to :obj:`\"[UNK]\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n sep_token (:obj:`str`, `optional`, defaults to :obj:`\"[SEP]\"`):\n The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for\n sequence classification or for a text and a question for question answering. It is also used as the last\n token of a sequence built with special tokens.\n pad_token (:obj:`str`, `optional`, defaults to :obj:`\"[PAD]\"`):\n The token used for padding, for example when batching sequences of different lengths.\n cls_token (:obj:`str`, `optional`, defaults to :obj:`\"[CLS]\"`):\n The classifier token which is used when doing sequence classification (classification of the whole sequence\n instead of per-token classification). It is the first token of the sequence when built with special tokens.\n mask_token (:obj:`str`, `optional`, defaults to :obj:`\"[MASK]\"`):\n The token used for masking values. This is the token used when training this model with masked language\n modeling. This is the token which the model will try to predict.\n tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not to tokenize Chinese characters.\n This should likely be deactivated for Japanese (see this `issue\n <https://github.com/huggingface/transformers/issues/328>`__).\n strip_accents: (:obj:`bool`, `optional`):\n Whether or not to strip all accents. If this option is not specified, then it will be determined by the\n value for :obj:`lowercase` (as in the original BERT).\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP\n pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION\n max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n\n def __init__(\n self,\n vocab_file,\n do_lower_case=True,\n do_basic_tokenize=True,\n never_split=None,\n unk_token=\"[UNK]\",\n sep_token=\"[SEP]\",\n pad_token=\"[PAD]\",\n cls_token=\"[CLS]\",\n mask_token=\"[MASK]\",\n tokenize_chinese_chars=True,\n strip_accents=None,\n **kwargs\n ):\n super().__init__(\n do_lower_case=do_lower_case,\n do_basic_tokenize=do_basic_tokenize,\n never_split=never_split,\n unk_token=unk_token,\n sep_token=sep_token,\n pad_token=pad_token,\n cls_token=cls_token,\n mask_token=mask_token,\n tokenize_chinese_chars=tokenize_chinese_chars,\n strip_accents=strip_accents,\n **kwargs,\n )\n\n if not os.path.isfile(vocab_file):\n raise ValueError(\n \"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained \"\n \"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`\".format(vocab_file)\n )\n self.vocab = load_vocab(vocab_file)\n self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])\n self.do_basic_tokenize = do_basic_tokenize\n if do_basic_tokenize:\n self.basic_tokenizer = BasicTokenizer(\n do_lower_case=do_lower_case,\n never_split=never_split,\n tokenize_chinese_chars=tokenize_chinese_chars,\n strip_accents=strip_accents,\n )\n self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)\n\n @property\n def do_lower_case(self):\n return self.basic_tokenizer.do_lower_case\n\n @property\n def vocab_size(self):\n return len(self.vocab)\n\n def get_vocab(self):\n return dict(self.vocab, **self.added_tokens_encoder)\n\n def _tokenize(self, text):\n split_tokens = []\n if self.do_basic_tokenize:\n for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):\n\n # If the token is part of the never_split set\n if token in self.basic_tokenizer.never_split:\n split_tokens.append(token)\n else:\n split_tokens += self.wordpiece_tokenizer.tokenize(token)\n else:\n split_tokens = self.wordpiece_tokenizer.tokenize(text)\n return split_tokens\n\n def _convert_token_to_id(self, token):\n \"\"\" Converts a token (str) in an id using the vocab. \"\"\"\n return self.vocab.get(token, self.vocab.get(self.unk_token))\n\n def _convert_id_to_token(self, index):\n \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n return self.ids_to_tokens.get(index, self.unk_token)\n\n def convert_tokens_to_string(self, tokens):\n \"\"\" Converts a sequence of tokens (string) in a single string. \"\"\"\n out_string = \" \".join(tokens).replace(\" ##\", \"\").strip()\n return out_string\n\n def build_inputs_with_special_tokens(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n ) -> List[int]:\n \"\"\"\n Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\n adding special tokens. A BERT sequence has the following format:\n - single sequence: ``[CLS] X ``\n - pair of sequences: ``[CLS] A [SEP] B [SEP]``\n Args:\n token_ids_0 (:obj:`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (:obj:`List[int]`, `optional`):\n Optional second list of IDs for sequence pairs.\n Returns:\n :obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.\n \"\"\"\n if token_ids_1 is None:\n return [self.cls_token_id] + token_ids_0\n cls = [self.cls_token_id]\n sep = [self.sep_token_id]\n return cls + token_ids_0 + sep + token_ids_1 + sep\n\n def get_special_tokens_mask(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False\n ) -> List[int]:\n \"\"\"\n Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer ``prepare_for_model`` method.\n Args:\n token_ids_0 (:obj:`List[int]`):\n List of IDs.\n token_ids_1 (:obj:`List[int]`, `optional`):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not the token list is already formatted with special tokens for the model.\n Returns:\n :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.\n \"\"\"\n\n if already_has_special_tokens:\n if token_ids_1 is not None:\n raise ValueError(\n \"You should not supply a second sequence if the provided sequence of \"\n \"ids is already formatted with special tokens for the model.\"\n )\n return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))\n\n if token_ids_1 is not None:\n return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]\n return [1] + ([0] * len(token_ids_0)) + [1]\n\n def create_token_type_ids_from_sequences(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n ) -> List[int]:\n \"\"\"\n Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence\n pair mask has the following format:\n ::\n 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1\n | first sequence | second sequence |\n If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s).\n Args:\n token_ids_0 (:obj:`List[int]`):\n List of IDs.\n token_ids_1 (:obj:`List[int]`, `optional`):\n Optional second list of IDs for sequence pairs.\n Returns:\n :obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given\n sequence(s).\n \"\"\"\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n if token_ids_1 is None:\n return len(cls + token_ids_0 + sep) * [0]\n return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:\n index = 0\n if os.path.isdir(save_directory):\n vocab_file = os.path.join(\n save_directory, (filename_prefix + \"-\" if filename_prefix else \"\") + VOCAB_FILES_NAMES[\"vocab_file\"]\n )\n else:\n vocab_file = (filename_prefix + \"-\" if filename_prefix else \"\") + save_directory\n with open(vocab_file, \"w\", encoding=\"utf-8\") as writer:\n for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):\n if index != token_index:\n logger.warning(\n \"Saving vocabulary to {}: vocabulary indices are not consecutive.\"\n \" Please check that the vocabulary is not corrupted!\".format(vocab_file)\n )\n index = token_index\n writer.write(token + \"\\n\")\n index += 1\n return (vocab_file,)" }, { "identifier": "MIMIC_Dataset", "path": "dataset/dataset_entity.py", "snippet": "class MIMIC_Dataset(Dataset):\n def __init__(self, json_path, csv_path, sty_path,image_res,args):\n self.json_info = json.load(open(json_path,'r'))\n data_info = pd.read_csv(csv_path)\n self.img_path_list = np.asarray(data_info.iloc[:,0])\n self.class_list = np.asarray(data_info.iloc[:,1:])#40 class for fine-grained query list\n sty_info = pd.read_csv(sty_path)\n self.sty_dict_info = self.csv_to_dict(sty_info)\n\n normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n\n if args.colourjitter:\n self.transform = transforms.Compose([ \n transforms.RandomResizedCrop(image_res,scale=(0.2, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.RandomHorizontalFlip(),\n\n transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.4),\n transforms.RandomGrayscale(),\n\n RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',\n 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), \n transforms.ToTensor(),\n normalize,\n ])\n\n else:\n self.transform = transforms.Compose([ \n transforms.RandomResizedCrop(image_res,scale=(0.2, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.RandomHorizontalFlip(),\n RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',\n 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), \n transforms.ToTensor(),\n normalize,\n ]) \n\n \n def csv_to_dict(self,sty_info):\n tui_list = sty_info.iloc[:,0]\n sty_list = sty_info.iloc[:,1]\n sty_dict = defaultdict(list)\n for idx in tqdm(range(len(tui_list))):\n tui_idx = tui_list[idx]\n sty_idx = sty_list[idx]\n sty_dict[tui_idx] = sty_idx\n return sty_dict\n \n def __len__(self):\n return len(self.img_path_list)\n \n def __getitem__(self, index):\n img_path = self.img_path_list[index].replace(\"/nvme/zhangruipeng/zhangxiaoman/dataset/MIMIC-CXR-DCM/files\", '/remote-home/share/medical/public/MIMIC-CXR-JPG/MIMIC-CXR/small/files')\n class_label = self.class_list[index] \n\n # index_transit = np.load(\"/remote-home/tianjiedai/KAD/R1_CLIP_LR/A1_DATA/small/index0626.npy\")\n # new_index_json = index_transit[index]\n # entities = self.json_info[new_index_json]['entities']\n # captions = self.json_info[new_index_json]['caption']\n \n entities = self.json_info[index]['entities']\n captions = self.json_info[index]['caption']\n\n\n if len(entities) != 0:\n caption_list = ''\n entity_details = ''\n for entity in entities:\n sub_caption = entity['caption']\n sub_entities = entity['entity']#搞错了 还不是list\n sub_entity_details = ''\n for sub_entity in sub_entities:\n try:\n sub_entity_details += ' [ENT] ' + sub_entity['Entity'] \n except:\n sub_entity_details += ' [ENT] ' + sub_entity['Entity'] \n entity_details = entity_details + sub_entity_details + ' [SEP] '\n caption_list = caption_list + sub_caption + ' [SEP] '\n else:\n caption_list = ''\n entity_details = ''\n for sub_caption in captions:\n caption_list = caption_list + sub_caption + ' [SEP] '\n entity_details = caption_list\n \n # img = open_jpg(img_path).convert('RGB') \n img = Image.open(img_path).convert('RGB') \n image = self.transform(img)\n return {\n \"image\": image,\n \"label\": class_label,\n \"caption\": caption_list,\n \"entity\": entity_details\n }" }, { "identifier": "Mergetrain_Dataset", "path": "dataset/dataset_entity.py", "snippet": "class Mergetrain_Dataset(Dataset):\n def __init__(self, json_path, csv_path, sty_path,image_res,args):\n self.json_info = json.load(open(json_path,'r'))\n data_info = pd.read_csv(csv_path)\n self.img_path_list = np.asarray(data_info.iloc[:,0])\n self.class_list = np.asarray(data_info.iloc[:,2:])#60 class for fine-grained query list\n self.label_dataset_list = np.asarray(data_info.iloc[:,1])\n\n sty_info = pd.read_csv(sty_path)\n self.sty_dict_info = self.csv_to_dict(sty_info)\n\n normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n\n if args.colourjitter:\n self.transform = transforms.Compose([ \n transforms.RandomResizedCrop(image_res,scale=(0.2, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.RandomHorizontalFlip(),\n\n transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.4),\n transforms.RandomGrayscale(),\n\n RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',\n 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), \n transforms.ToTensor(),\n normalize,\n ])\n\n else:\n self.transform = transforms.Compose([ \n transforms.RandomResizedCrop(image_res,scale=(0.2, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.RandomHorizontalFlip(),\n RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',\n 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), \n transforms.ToTensor(),\n normalize,\n ]) \n\n \n def csv_to_dict(self,sty_info):\n tui_list = sty_info.iloc[:,0]\n sty_list = sty_info.iloc[:,1]\n sty_dict = defaultdict(list)\n for idx in tqdm(range(len(tui_list))):\n tui_idx = tui_list[idx]\n sty_idx = sty_list[idx]\n sty_dict[tui_idx] = sty_idx\n return sty_dict\n \n def __len__(self):\n return len(self.img_path_list)\n \n def __getitem__(self, index):\n\n if self.label_dataset_list[index] == 0:\n img_path = self.img_path_list[index].replace(\"/nvme/zhangruipeng/zhangxiaoman/dataset/MIMIC-CXR-DCM/files\", '/remote-home/share/medical/public/MIMIC-CXR-JPG/MIMIC-CXR/small/files')\n class_label = self.class_list[index] \n\n # index_transit = np.load(\"/remote-home/tianjiedai/KAD/R1_CLIP_LR/A1_DATA/small/index0626.npy\")\n # new_index_json = index_transit[index]\n # entities = self.json_info[new_index_json]['entities']\n # captions = self.json_info[new_index_json]['caption']\n \n entities = self.json_info[index]['entities']\n captions = self.json_info[index]['caption']\n\n\n if len(entities) != 0:\n caption_list = ''\n entity_details = ''\n for entity in entities:\n sub_caption = entity['caption']\n sub_entities = entity['entity']#搞错了 还不是list\n sub_entity_details = ''\n for sub_entity in sub_entities:\n try:\n sub_entity_details += ' [ENT] ' + sub_entity['Entity'] \n except:\n sub_entity_details += ' [ENT] ' + sub_entity['Entity'] \n entity_details = entity_details + sub_entity_details + ' [SEP] '\n caption_list = caption_list + sub_caption + ' [SEP] '\n else:\n caption_list = ''\n entity_details = ''\n for sub_caption in captions:\n caption_list = caption_list + sub_caption + ' [SEP] '\n entity_details = caption_list\n \n # img = open_jpg(img_path).convert('RGB') \n # img = Image.open(img_path).convert('RGB') \n # image = self.transform(img)\n # return {\n # \"image\": image,\n # \"label\": class_label,\n # \"caption\": caption_list,\n # \"entity\": entity_details\n # }\n \n else:\n img_path = self.img_path_list[index]\n class_label = self.class_list[index] \n caption_list = ''\n head = ['normal', 'pleural effusion', 'opacity', 'pneumothorax', 'edema', 'atelectasis', 'tube', 'consolidation','enlarged cardiomediastinum','tip', 'pneumonia','line','cardiomegaly', 'fracture','calcification',\n 'device','engorgement', 'nodule', 'wire', 'pacemaker', 'pleural thicken', 'marking', 'scar', 'hyperinflate', 'blunt', 'collapse', 'emphysema', 'aerate', 'mass','infiltration', 'obscure', 'deformity', 'hernia',\n 'drainage', 'distention', 'shift', 'stent', 'lesion', 'hardware', 'dilation', 'aspiration',\n 'fibrosis',\t'No Finding', 'Pleural Other', 'Support Devices', 'Aortic enlargement',\n 'Clavicle fracture', 'Enlarged PA', 'ILD', 'Lung cavity', 'Lung cyst', 'Mediastinal shift',\t\n 'Nodule/Mass', 'Pulmonary fibrosis', 'Rib fracture', 'Other lesion', 'COPD', 'Lung tumor', 'Tuberculosis',\n 'Other diseases']\n index_positive = np.where(class_label == 1)\n entity = np.array(head)[index_positive]\n entity_details = ''\n for sub_entity in entity:\n entity_details = entity_details + sub_entity + ' [SEP] '\n\n img = Image.open(img_path).convert('RGB') \n image = self.transform(img)\n label_dataset = self.label_dataset_list[index]\n\n return {\n \"image\": image,\n \"label\": class_label,\n \"label_dataset\": label_dataset,\n \"caption\": caption_list,\n \"entity\": entity_details\n }" }, { "identifier": "Chestxray14_Dataset", "path": "dataset/dataset_entity.py", "snippet": "class Chestxray14_Dataset(Dataset):\n def __init__(self, csv_path,image_res):\n data_info = pd.read_csv(csv_path)\n self.img_path_list = np.asarray(data_info.iloc[:,0])\n self.class_list = np.asarray(data_info.iloc[:,3:])\n\n normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n self.transform = transforms.Compose([ \n transforms.Resize(image_res, interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.ToTensor(),\n normalize,\n ])\n \n def __getitem__(self, index):\n img_path = self.img_path_list[index].replace('/mnt/petrelfs/zhangxiaoman/DATA/Chestxray/ChestXray8/','/remote-home/share/medical/public/ChestXray8/')\n class_label = self.class_list[index] \n img = Image.open(img_path).convert('RGB') \n image = self.transform(img)\n return {\n \"image\": image,\n \"label\": class_label\n }\n \n def __len__(self):\n return len(self.img_path_list)" }, { "identifier": "CheXpert_Dataset", "path": "dataset/dataset_entity.py", "snippet": "class CheXpert_Dataset(Dataset):\n def __init__(self, csv_path,image_res):\n data_info = pd.read_csv(csv_path)\n self.img_path_list = np.asarray(data_info.iloc[:,0])\n self.class_list = np.asarray(data_info.iloc[:,[13,7,11,10,15]])\n\n normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n self.transform = transforms.Compose([ \n transforms.Resize([image_res,image_res], interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.ToTensor(),\n normalize,\n ]) \n \n def __getitem__(self, index):\n img_path = os.path.join('/remote-home/share/tianjiedai/',self.img_path_list[index])\n class_label = self.class_list[index] \n img = Image.open(img_path).convert('RGB') \n image = self.transform(img)\n return {\n \"image\": image,\n \"label\": class_label\n }\n \n def __len__(self):\n return len(self.img_path_list)" } ]
import argparse import os import logging import yaml import numpy as np import random import time import datetime import json import math import torch import torch.nn as nn import torch.nn.functional as F import torch.backends.cudnn as cudnn import torch.distributed as dist import socket from pathlib import Path from functools import partial from sklearn.metrics import roc_auc_score from collections import OrderedDict from torch.utils.data import DataLoader from tensorboardX import SummaryWriter from transformers import AutoModel,BertConfig,AutoTokenizer from factory import utils from scheduler import create_scheduler from optim import create_optimizer from engine.train import train,valid_on_cheXpert,valid_on_chestxray14 from models.clip_tqn import CLP_clinical,ModelRes,TQN_Model,TQN_Model_Add,ModelDense,CLP_clinical2 from models.tokenization_bert import BertTokenizer from dataset.dataset_entity import MIMIC_Dataset,Mergetrain_Dataset, Chestxray14_Dataset,CheXpert_Dataset from io import BytesIO
17,454
#### Dataset #### print("Creating dataset") if args.add_dataset == True: train_dataset = Mergetrain_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) else: train_dataset = MIMIC_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) train_dataloader = DataLoader( train_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=train_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) train_dataloader.num_samples = len(train_dataset) train_dataloader.num_batches = len(train_dataloader) val_dataset = Chestxray14_Dataset(config['chestxray_valid_file'],config['image_res']) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) val_dataloader =DataLoader( val_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=val_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) val_dataloader.num_samples = len(val_dataset) val_dataloader.num_batches = len(val_dataloader) test_dataset = Chestxray14_Dataset(config['chestxray_test_file'],config['image_res']) test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader =DataLoader( test_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=test_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader.num_samples = len(test_dataset) test_dataloader.num_batches = len(test_dataloader) test_dataset_chexpert = CheXpert_Dataset(config['chexpert_valid_file'],config['image_res']) test_sampler_chexpert = torch.utils.data.distributed.DistributedSampler(test_dataset_chexpert,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader_chexpert =DataLoader( test_dataset_chexpert, batch_size=config['batch_size'], num_workers=4, pin_memory=True, sampler=test_sampler_chexpert, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader_chexpert.num_samples = len(test_dataset_chexpert) test_dataloader_chexpert.num_batches = len(test_dataloader_chexpert) if args.image_encoder_name == 'resnet': image_encoder = ModelRes(res_base_model='resnet50').cuda() elif args.image_encoder_name == 'dense': image_encoder = ModelDense(dense_base_model = 'densenet121').cuda() if args.bert_model_name == 'emilyalsentzer/Bio_ClinicalBERT': tokenizer = BertTokenizer.from_pretrained(args.bert_model_name) text_encoder = CLP_clinical2(bert_model_name=args.bert_model_name).cuda() else: tokenizer = AutoTokenizer.from_pretrained(args.bert_model_name,do_lower_case=True, local_files_only=True) text_encoder = CLP_clinical(bert_model_name=args.bert_model_name).cuda() if args.bert_pretrained: checkpoint = torch.load(args.bert_pretrained, map_location='cpu') state_dict = checkpoint["state_dict"] text_encoder.load_state_dict(state_dict) print('Load pretrained bert success from: ',args.bert_pretrained) if args.freeze_bert: for param in text_encoder.parameters(): param.requires_grad = False if args.add_dataset: if 'lam' in config: model = TQN_Model_Add(class_num = args.class_num, gate_num = args.gate_num, high_dim = args.high_dim, lam = config['lam']).cuda() else: model = TQN_Model_Add(class_num = args.class_num, gate_num = args.gate_num, high_dim = args.high_dim).cuda() else: if 'lam' in config: model = TQN_Model(class_num = args.class_num, lam = config['lam']).cuda() else: model = TQN_Model(class_num = args.class_num).cuda() model = torch.nn.parallel.DistributedDataParallel(model, device_ids = [args.gpu], find_unused_parameters=True, broadcast_buffers=False) model_without_ddp = model.module if args.finetune: image_encoder_without_ddp = image_encoder else: image_encoder = torch.nn.parallel.DistributedDataParallel(image_encoder, device_ids = [args.gpu], find_unused_parameters=True, broadcast_buffers=False) image_encoder_without_ddp = image_encoder.module text_encoder_without_ddp = text_encoder arg_opt = utils.AttrDict(config['optimizer'])
# import ruamel.yaml as yaml def main(args, config): torch.cuda.current_device() torch.cuda._initialized = True print("Total CUDA devices: ", torch.cuda.device_count()) torch.set_default_tensor_type('torch.FloatTensor') utils.init_distributed_mode(args) device = torch.device(args.device) # fix the seed for reproducibility seed = args.seed + utils.get_rank() torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) cudnn.benchmark = True start_epoch = 0 max_epoch = config['schedular']['epochs'] warmup_steps = config['schedular']['warmup_epochs'] num_tasks = utils.get_world_size() global_rank = utils.get_rank() sampler_rank = global_rank print('sampler_rank',sampler_rank,'num_tasks',num_tasks) #### Dataset #### print("Creating dataset") if args.add_dataset == True: train_dataset = Mergetrain_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) else: train_dataset = MIMIC_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) train_dataloader = DataLoader( train_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=train_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) train_dataloader.num_samples = len(train_dataset) train_dataloader.num_batches = len(train_dataloader) val_dataset = Chestxray14_Dataset(config['chestxray_valid_file'],config['image_res']) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) val_dataloader =DataLoader( val_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=val_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) val_dataloader.num_samples = len(val_dataset) val_dataloader.num_batches = len(val_dataloader) test_dataset = Chestxray14_Dataset(config['chestxray_test_file'],config['image_res']) test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader =DataLoader( test_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=test_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader.num_samples = len(test_dataset) test_dataloader.num_batches = len(test_dataloader) test_dataset_chexpert = CheXpert_Dataset(config['chexpert_valid_file'],config['image_res']) test_sampler_chexpert = torch.utils.data.distributed.DistributedSampler(test_dataset_chexpert,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader_chexpert =DataLoader( test_dataset_chexpert, batch_size=config['batch_size'], num_workers=4, pin_memory=True, sampler=test_sampler_chexpert, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader_chexpert.num_samples = len(test_dataset_chexpert) test_dataloader_chexpert.num_batches = len(test_dataloader_chexpert) if args.image_encoder_name == 'resnet': image_encoder = ModelRes(res_base_model='resnet50').cuda() elif args.image_encoder_name == 'dense': image_encoder = ModelDense(dense_base_model = 'densenet121').cuda() if args.bert_model_name == 'emilyalsentzer/Bio_ClinicalBERT': tokenizer = BertTokenizer.from_pretrained(args.bert_model_name) text_encoder = CLP_clinical2(bert_model_name=args.bert_model_name).cuda() else: tokenizer = AutoTokenizer.from_pretrained(args.bert_model_name,do_lower_case=True, local_files_only=True) text_encoder = CLP_clinical(bert_model_name=args.bert_model_name).cuda() if args.bert_pretrained: checkpoint = torch.load(args.bert_pretrained, map_location='cpu') state_dict = checkpoint["state_dict"] text_encoder.load_state_dict(state_dict) print('Load pretrained bert success from: ',args.bert_pretrained) if args.freeze_bert: for param in text_encoder.parameters(): param.requires_grad = False if args.add_dataset: if 'lam' in config: model = TQN_Model_Add(class_num = args.class_num, gate_num = args.gate_num, high_dim = args.high_dim, lam = config['lam']).cuda() else: model = TQN_Model_Add(class_num = args.class_num, gate_num = args.gate_num, high_dim = args.high_dim).cuda() else: if 'lam' in config: model = TQN_Model(class_num = args.class_num, lam = config['lam']).cuda() else: model = TQN_Model(class_num = args.class_num).cuda() model = torch.nn.parallel.DistributedDataParallel(model, device_ids = [args.gpu], find_unused_parameters=True, broadcast_buffers=False) model_without_ddp = model.module if args.finetune: image_encoder_without_ddp = image_encoder else: image_encoder = torch.nn.parallel.DistributedDataParallel(image_encoder, device_ids = [args.gpu], find_unused_parameters=True, broadcast_buffers=False) image_encoder_without_ddp = image_encoder.module text_encoder_without_ddp = text_encoder arg_opt = utils.AttrDict(config['optimizer'])
optimizer = create_optimizer(arg_opt, model_without_ddp,image_encoder_without_ddp,text_encoder_without_ddp)
2
2023-10-30 00:24:16+00:00
24k
ifrit98/storage-subnet
neurons/miner.py
[ { "identifier": "hash_data", "path": "storage/shared/ecc.py", "snippet": "def hash_data(data):\n \"\"\"\n Compute a SHA3-256 hash of the input data and return its integer representation.\n\n The function handles both byte-like and non-byte-like inputs by converting non-byte inputs to\n strings and then encoding to bytes before hashing.\n\n Parameters:\n - data (bytes | bytearray | object): Data to be hashed.\n\n Returns:\n - int: Integer representation of the SHA3-256 hash of the input data.\n\n Raises:\n - TypeError: If the hashing operation encounters an incompatible data type.\n \"\"\"\n if not isinstance(data, (bytes, bytearray)):\n data_str = str(data)\n data = data_str.encode()\n h = hashlib.sha3_256(data).hexdigest()\n return int(h, 16)" }, { "identifier": "setup_CRS", "path": "storage/shared/ecc.py", "snippet": "def setup_CRS(curve=\"P-256\"):\n \"\"\"\n Generate a pair of random points to serve as a Common Reference String (CRS) for elliptic curve operations.\n\n The CRS is essential for various cryptographic protocols that rely on a shared reference\n between parties, typically for the purpose of ensuring consistent cryptographic operations.\n\n Parameters:\n - curve (str, optional): Name of the elliptic curve to use; defaults to \"P-256\".\n\n Returns:\n - tuple(ECC.EccPoint, ECC.EccPoint): A 2-tuple of ECC.EccPoint instances representing the base points (g, h).\n\n Raises:\n - ValueError: If the specified elliptic curve name is not recognized.\n \"\"\"\n curve_obj = ECC.generate(curve=curve)\n g = curve_obj.pointQ # Base point\n h = ECC.generate(curve=curve).pointQ # Another random point\n return g, h" }, { "identifier": "ECCommitment", "path": "storage/shared/ecc.py", "snippet": "class ECCommitment:\n \"\"\"\n Elliptic Curve based commitment scheme allowing one to commit to a chosen value while keeping it hidden to others.\n\n Attributes:\n g (ECC.EccPoint): The base point of the elliptic curve used as part of the commitment.\n h (ECC.EccPoint): Another random point on the elliptic curve used as part of the commitment.\n\n Methods:\n commit(m): Accepts a message, hashes it, and produces a commitment to the hashed message.\n open(c, m_val, r): Accepts a commitment, a hashed message, and a random value to verify the commitment.\n\n The `commit` method will print the commitment process, and the `open` method will print the verification process.\n \"\"\"\n\n def __init__(self, g, h, verbose=False):\n self.g = g # Base point of the curve\n self.h = h # Another random point on the curve\n self.verbose = verbose\n\n def commit(self, m): # AKA Seal.\n \"\"\"\n Create a cryptographic commitment to a message.\n\n The message is hashed, and the hash is used along with a random number to form the commitment\n using the public parameters g and h. The commitment can be verified with the `open` method.\n\n Parameters:\n - m (bytes | bytearray | object): The message to commit to.\n\n Returns:\n - tuple: A 3-tuple (commitment, hashed message value, random number used in the commitment).\n\n Side Effects:\n - This method will print the commitment details to the console.\n\n Raises:\n - Exception: If the commitment calculation fails.\n \"\"\"\n m_val = hash_data(m) # Compute hash of the data\n r = random.randint(1, 2**256)\n c1 = self.g.__mul__(m_val)\n c2 = self.h.__mul__(r)\n c = c1.__add__(c2)\n if self.verbose:\n print(\n f\"Committing: Data = {m}\\nHashed Value = {m_val}\\nRandom Value = {r}\\nComputed Commitment = {c}\\n\"\n )\n return c, m_val, r\n\n def open(self, c, m_val, r):\n \"\"\"\n Verify a commitment using the original message hash and randomness.\n\n This method recomputes the commitment using the public parameters and compares it with\n the provided commitment to check its validity.\n\n Parameters:\n - c (ECC.EccPoint): The commitment point to verify.\n - m_val (int): The integer value of the hashed message used in the commitment.\n - r (int): The random number used in the commitment.\n\n Returns:\n - bool: True if the verification succeeds (commitment is valid), False otherwise.\n\n Side Effects:\n - This method will print the verification details to the console.\n\n Raises:\n - Exception: If the verification calculation fails.\n \"\"\"\n c1 = self.g.__mul__(m_val)\n c2 = self.h.__mul__(r)\n computed_c = c1.__add__(c2)\n if self.verbose:\n print(\n f\"\\nOpening: Hashed Value = {m_val}\\nRandom Value = {r}\\nRecomputed Commitment = {computed_c}\\nOriginal Commitment = {c}\"\n )\n return computed_c == c" }, { "identifier": "ecc_point_to_hex", "path": "storage/shared/ecc.py", "snippet": "def ecc_point_to_hex(point):\n \"\"\"\n Convert an elliptic curve point to a hexadecimal string.\n\n This encoding is typically used for compact representation or for preparing the data\n to be transmitted over protocols that may not support binary data.\n\n Parameters:\n - point (ECC.EccPoint): An ECC point to convert.\n\n Returns:\n - str: Hexadecimal string representing the elliptic curve point.\n\n Raises:\n - AttributeError: If the input is not a valid ECC point with accessible x and y coordinates.\n \"\"\"\n point_str = \"{},{}\".format(point.x, point.y)\n return binascii.hexlify(point_str.encode()).decode()" }, { "identifier": "hex_to_ecc_point", "path": "storage/shared/ecc.py", "snippet": "def hex_to_ecc_point(hex_str, curve):\n \"\"\"\n Convert a hexadecimal string back into an elliptic curve point.\n\n This function is typically used to deserialize an ECC point that has been transmitted or stored as a hex string.\n\n Parameters:\n - hex_str (str): The hex string representing an elliptic curve point.\n - curve (str): The name of the elliptic curve the point belongs to.\n\n Returns:\n - ECC.EccPoint: The elliptic curve point represented by the hex string.\n\n Raises:\n - ValueError: If the hex string is not properly formatted or does not represent a valid point on the specified curve.\n \"\"\"\n point_str = binascii.unhexlify(hex_str).decode()\n x, y = map(int, point_str.split(\",\"))\n return ECC.EccPoint(x, y, curve=curve)" }, { "identifier": "MerkleTree", "path": "storage/shared/merkle.py", "snippet": "class MerkleTree(object):\n \"\"\"\n Represents a Merkle Tree, a data structure used for efficiently summarizing and verifying the\n integrity of large sets of data. The Merkle Tree is a binary tree where each leaf node is the hash\n of a data block and every non-leaf node is the hash of its children nodes.\n\n Attributes:\n hash_function (callable): The hash function used for generating hashes of the blocks\n and non-leaf nodes in the Merkle Tree.\n leaves (list): A list where each element is a bytearray representing the hashed value of a leaf.\n levels (list of lists): A list of lists where each sublist represents a level of the tree, starting\n from the leaves up to the root.\n is_ready (bool): Indicates whether the tree has been fully constructed and is ready to provide\n the Merkle root and proofs.\n\n Methods:\n add_leaf(values, do_hash=False): Adds one or multiple leaves to the tree. If `do_hash` is True,\n it will hash the values before adding them as leaves.\n get_leaf(index): Retrieves the hexadecimal string representation of a leaf at the given index.\n get_leaf_count(): Returns the total number of leaves in the tree.\n get_tree_ready_state(): Checks if the tree has been fully constructed.\n make_tree(): Constructs the Merkle Tree from the current leaves. This method must be called\n after all leaves are added and before retrieving the Merkle root or proofs.\n get_merkle_root(): Retrieves the Merkle root as a hexadecimal string if the tree is ready.\n get_proof(index): Generates a proof of inclusion for the leaf at the given index. This proof\n consists of a list of sibling hashes that, when combined with the target leaf,\n can reproduce the Merkle root.\n update_leaf(index, new_value): Updates the value of the leaf at the given index with `new_value`\n and recalculates the hashes up the tree to reflect this change.\n serialize(): Converts the Merkle Tree into a JSON-formatted string for storage or transmission.\n deserialize(json_data, hash_type=\"sha3_256\"): Reconstructs the Merkle Tree from a JSON string,\n using the specified hash function.\n\n Raises:\n Exception: If the `hash_type` provided during initialization is not supported or recognized.\n\n Example:\n # Create a Merkle tree using the SHA3-256 hash function\n merkle_tree = MerkleTree(hash_type='sha3_256')\n\n # Add data blocks (as leaves) to the tree\n merkle_tree.add_leaf(['block1', 'block2', 'block3'], do_hash=True)\n\n # Construct the tree\n merkle_tree.make_tree()\n\n # Retrieve the Merkle root\n root = merkle_tree.get_merkle_root()\n\n # Get proof of inclusion for the first data block\n proof = merkle_tree.get_proof(0)\n\n # Update the value of the first leaf and reconstruct the tree\n merkle_tree.update_leaf(0, 'new_block1_hashed_value')\n merkle_tree.make_tree()\n\n # Serialize the tree for storage\n serialized_tree = merkle_tree.serialize()\n\n # Deserialize the tree for later use\n deserialized_tree = MerkleTree.deserialize(serialized_tree, hash_type='sha3_256')\n\n Note:\n The hash_function attribute is determined by the hash_type parameter provided at initialization.\n Only hash types supported by the `hashlib` library can be used. Attempting to use an unsupported\n hash type will result in an exception.\n \"\"\"\n\n def __init__(self, hash_type=\"sha3_256\"):\n hash_type = hash_type.lower()\n if hash_type in [\"sha3_256\"]:\n self.hash_function = getattr(hashlib, hash_type)\n else:\n raise Exception(\"`hash_type` {} nor supported\".format(hash_type))\n\n self.reset_tree()\n\n def __eq__(self, other):\n if not isinstance(other, MerkleTree):\n return False\n return self.serialize() == other.serialize()\n\n def _to_hex(self, x):\n try: # python3\n return x.hex()\n except: # python2\n return binascii.hexlify(x)\n\n def reset_tree(self):\n self.leaves = list()\n self.levels = None\n self.is_ready = False\n\n def add_leaf(self, values, do_hash=False):\n self.is_ready = False\n # check if single leaf\n if not isinstance(values, tuple) and not isinstance(values, list):\n values = [values]\n for v in values:\n if do_hash:\n v = v.encode(\"utf-8\")\n v = self.hash_function(v).hexdigest()\n v = bytearray.fromhex(v)\n self.leaves.append(v)\n\n def get_leaf(self, index):\n return self._to_hex(self.leaves[index])\n\n def get_leaf_count(self):\n return len(self.leaves)\n\n def get_tree_ready_state(self):\n return self.is_ready\n\n def _calculate_next_level(self):\n solo_leave = None\n N = len(self.levels[0]) # number of leaves on the level\n if N % 2 == 1: # if odd number of leaves on the level\n solo_leave = self.levels[0][-1]\n N -= 1\n\n new_level = []\n for l, r in zip(self.levels[0][0:N:2], self.levels[0][1:N:2]):\n new_level.append(self.hash_function(l + r).digest())\n if solo_leave is not None:\n new_level.append(solo_leave)\n self.levels = [\n new_level,\n ] + self.levels # prepend new level\n\n def make_tree(self):\n \"\"\"\n Constructs the Merkle Tree from the leaves that have been added.\n\n This must be called after adding all the leaves and before calling\n get_merkle_root or get_proof to ensure the tree is constructed.\n \"\"\"\n self.is_ready = False\n if self.get_leaf_count() > 0:\n self.levels = [\n self.leaves,\n ]\n while len(self.levels[0]) > 1:\n self._calculate_next_level()\n self.is_ready = True\n\n def get_merkle_root(self):\n if self.is_ready:\n if self.levels is not None:\n return self._to_hex(self.levels[0][0])\n else:\n return None\n else:\n return None\n\n def get_proof(self, index):\n \"\"\"\n Generates the proof for the existence of a leaf at the specified index within the Merkle Tree.\n\n A Merkle proof is a collection of sibling hashes on the path from a leaf to the root of the tree.\n This proof can be used to independently verify that a leaf is indeed part of the Merkle tree without\n needing the entire tree. Each element of the proof shows the direction ('left' or 'right') and the\n corresponding hash that pairs with the path to the root.\n\n Parameters:\n index (int): The index of the target leaf for which to generate the Merkle proof. The index must\n correspond to the position of the leaf in the original list of leaves when the tree\n was constructed.\n\n Returns:\n list of dicts: A list where each dictionary contains a single key-value pair. The key is either\n 'left' or 'right', indicating the side of the sibling hash, and the value is a\n string representing the hexadecimal hash value of the sibling. If the tree is not\n ready or the index is out of bounds, None is returned.\n\n Raises:\n IndexError: If the index provided is not within the range of the leaves in the tree.\n ValueError: If the tree has not been constructed by calling `make_tree` method, or the index\n is not an integer.\n\n Example:\n # Assuming `merkle_tree` is an instance of `MerkleTree` and has been populated with leaves and made ready\n proof = merkle_tree.get_proof(2)\n print(proof) # Outputs something like [{'left': 'abcd...'}, {'right': 'ef01...'}]\n\n Note:\n The Merkle proof is only valid if the tree is in the ready state (`is_ready` attribute is True),\n which occurs after the `make_tree` method has been called. If the tree is not ready or the index\n is not valid, the method will return None.\n \"\"\"\n if self.levels is None:\n return None\n elif not self.is_ready or index > len(self.leaves) - 1 or index < 0:\n return None\n else:\n proof = []\n for x in range(len(self.levels) - 1, 0, -1):\n level_len = len(self.levels[x])\n if (index == level_len - 1) and (\n level_len % 2 == 1\n ): # skip if this is an odd end node\n index = int(index / 2.0)\n continue\n is_right_node = index % 2\n sibling_index = index - 1 if is_right_node else index + 1\n sibling_pos = \"left\" if is_right_node else \"right\"\n sibling_value = self._to_hex(self.levels[x][sibling_index])\n proof.append({sibling_pos: sibling_value})\n index = int(index / 2.0)\n return proof\n\n def update_leaf(self, index, new_value):\n \"\"\"\n Updates the value of a leaf at a given index in the Merkle Tree and recalculates the hashes along\n the path from the updated leaf to the root of the tree to reflect the change.\n\n This method allows the Merkle Tree to maintain integrity by ensuring that any updates to the leaf\n nodes are propagated upwards, resulting in a new Merkle root that represents the current state of\n the leaves.\n\n Parameters:\n index (int): The index of the leaf to update. The index is zero-based and must be less than\n the number of leaves in the tree.\n new_value (str): The new value in hexadecimal format to which the leaf should be updated. This\n value should be a valid hexadecimal string that represents the hashed data\n if hashing was applied to the leaves upon tree construction.\n\n Returns:\n None\n\n Raises:\n ValueError: If the tree is not ready for updates (i.e., `is_ready` is False), if the index is\n not an integer, if the new_value is not a hexadecimal string, or if the index is\n out of bounds (less than 0 or greater than or equal to the number of leaves).\n IndexError: If the index is out of the range of current leaves.\n\n Example:\n # Assuming `merkle_tree` is an instance of `MerkleTree`, populated with leaves and made ready.\n merkle_tree.update_leaf(0, 'a1b2c3d4e5f67890')\n # The leaf at index 0 is updated, and changes are propagated to the root.\n\n Note:\n The tree must have been constructed and be in a ready state before calling this method. If the\n tree has not been made by calling the `make_tree` method, or the index is invalid, this method\n will not perform an update and will return None.\n \"\"\"\n if not self.is_ready:\n return None\n new_value = bytearray.fromhex(new_value)\n self.levels[-1][index] = new_value\n for x in range(len(self.levels) - 1, 0, -1):\n parent_index = index // 2\n left_child = self.levels[x][parent_index * 2]\n try:\n right_child = self.levels[x][parent_index * 2 + 1]\n except IndexError:\n right_child = bytearray()\n self.levels[x - 1][parent_index] = self.hash_function(\n left_child + right_child\n ).digest()\n index = parent_index\n\n def serialize(self):\n \"\"\"\n Serializes the MerkleTree object into a JSON string.\n \"\"\"\n # Convert the bytearray leaves and levels to hex strings for serialization\n leaves = [self._to_hex(leaf) for leaf in self.leaves]\n levels = None\n if self.levels is not None:\n levels = []\n for level in self.levels:\n levels.append([self._to_hex(item) for item in level])\n\n # Construct a dictionary with the MerkleTree properties\n merkle_tree_data = {\n \"leaves\": leaves,\n \"levels\": levels,\n \"is_ready\": self.is_ready,\n }\n\n # Convert the dictionary to a JSON string\n return json.dumps(merkle_tree_data)\n\n @classmethod\n def deserialize(cls, json_data, hash_type=\"sha3_256\"):\n \"\"\"\n Deserializes the JSON string into a MerkleTree object.\n \"\"\"\n # Convert the JSON string back to a dictionary\n merkle_tree_data = json.loads(json_data)\n\n # Create a new MerkleTree object\n m_tree = cls(hash_type)\n\n # Convert the hex strings back to bytearrays and set the leaves and levels\n m_tree.leaves = [bytearray.fromhex(leaf) for leaf in merkle_tree_data[\"leaves\"]]\n if merkle_tree_data[\"levels\"] is not None:\n m_tree.levels = []\n for level in merkle_tree_data[\"levels\"]:\n m_tree.levels.append([bytearray.fromhex(item) for item in level])\n m_tree.is_ready = merkle_tree_data[\"is_ready\"]\n\n return m_tree" }, { "identifier": "b64_encode", "path": "storage/shared/utils.py", "snippet": "def b64_encode(data: Union[bytes, str, List[str], List[bytes], dict]) -> str:\n \"\"\"\n Encodes the given data into a base64 string. If the data is a list or dictionary of bytes, it converts\n the bytes into hexadecimal strings before encoding.\n\n Args:\n data (list or dict): The data to be base64 encoded. Can be a list of bytes or a dictionary with bytes values.\n\n Returns:\n str: The base64 encoded string of the input data.\n\n Raises:\n TypeError: If the input is not a list, dict, or bytes.\n \"\"\"\n if isinstance(data, bytes):\n data = data.hex()\n if isinstance(data, list) and len(data) and isinstance(data[0], bytes):\n data = [d.hex() for d in data]\n if isinstance(data, dict) and isinstance(data[list(data.keys())[0]], bytes):\n data = {k: v.hex() for k, v in data.items()}\n return base64.b64encode(json.dumps(data).encode()).decode(\"utf-8\")" }, { "identifier": "b64_decode", "path": "storage/shared/utils.py", "snippet": "def b64_decode(data: bytes, decode_hex: bool = False, encrypted: bool = False):\n \"\"\"\n Decodes a base64 string into a list or dictionary. If decode_hex is True, it converts any hexadecimal strings\n within the data back into bytes.\n\n Args:\n data (bytes or str): The base64 encoded data to be decoded.\n decode_hex (bool): A flag to indicate whether to decode hex strings into bytes. Defaults to False.\n\n Returns:\n list or dict: The decoded data. Returns a list if the original encoded data was a list, and a dict if it was a dict.\n\n Raises:\n ValueError: If the input is not properly base64 encoded or if hex decoding fails.\n \"\"\"\n data = data.decode(\"utf-8\") if isinstance(data, bytes) else data\n decoded_data = json.loads(\n base64.b64decode(data) if encrypted else base64.b64decode(data).decode(\"utf-8\")\n )\n if decode_hex:\n try:\n decoded_data = (\n [bytes.fromhex(d) for d in decoded_data]\n if isinstance(decoded_data, list)\n else {k: bytes.fromhex(v) for k, v in decoded_data.items()}\n )\n except:\n pass\n return decoded_data" }, { "identifier": "chunk_data", "path": "storage/shared/utils.py", "snippet": "def chunk_data(data: bytes, chunksize: int) -> List[bytes]:\n \"\"\"\n Generator function that chunks the given data into pieces of a specified size.\n\n Args:\n data (bytes): The binary data to be chunked.\n chunksize (int): The size of each chunk in bytes.\n\n Yields:\n bytes: A chunk of the data with the size equal to 'chunksize' or the remaining size of data.\n\n Raises:\n ValueError: If 'chunksize' is less than or equal to 0.\n \"\"\"\n for i in range(0, len(data), chunksize):\n yield data[i : i + chunksize]" }, { "identifier": "safe_key_search", "path": "storage/shared/utils.py", "snippet": "async def safe_key_search(database: aioredis.Redis, pattern: str) -> List[str]:\n \"\"\"\n Safely search for keys in the database that doesn't block.\n `scan_iter` uses cursor under the hood.\n \"\"\"\n return [key for key in await database.scan_iter(pattern)]" }, { "identifier": "run", "path": "storage/miner/run.py", "snippet": "def run(self):\n \"\"\"\n Initiates and manages the main loop for the miner on the Bittensor network.\n\n This function performs the following primary tasks:\n 1. Check for registration on the Bittensor network.\n 2. Attaches the miner's forward, blacklist, and priority functions to its axon.\n 3. Starts the miner's axon, making it active on the network.\n 4. Regularly updates the metagraph with the latest network state.\n 5. Optionally sets weights on the network, defining how much trust to assign to other nodes.\n 6. Handles graceful shutdown on keyboard interrupts and logs unforeseen errors.\n\n The miner continues its operations until `should_exit` is set to True or an external interruption occurs.\n During each epoch of its operation, the miner waits for new blocks on the Bittensor network, updates its\n knowledge of the network (metagraph), and sets its weights. This process ensures the miner remains active\n and up-to-date with the network's latest state.\n\n Note:\n - The function leverages the global configurations set during the initialization of the miner.\n - The miner's axon serves as its interface to the Bittensor network, handling incoming and outgoing requests.\n\n Raises:\n KeyboardInterrupt: If the miner is stopped by a manual interruption.\n Exception: For unforeseen errors during the miner's operation, which are logged for diagnosis.\n \"\"\"\n block_handler_substrate = SubstrateInterface(\n ss58_format=bt.__ss58_format__,\n use_remote_preset=True,\n url=self.subtensor.chain_endpoint,\n type_registry=bt.__type_registry__,\n )\n\n netuid = self.config.netuid\n\n # --- Check for registration.\n if not self.subtensor.is_hotkey_registered(\n netuid=netuid,\n hotkey_ss58=self.wallet.hotkey.ss58_address,\n ):\n bt.logging.error(\n f\"Wallet: {self.wallet} is not registered on netuid {netuid}\"\n f\"Please register the hotkey using `btcli subnets register` before trying again\"\n )\n exit()\n\n tempo = block_handler_substrate.query(\n module=\"SubtensorModule\", storage_function=\"Tempo\", params=[netuid]\n ).value\n\n last_extrinsic_hash = None\n checked_extrinsics_count = 0\n should_retry = False\n\n def handler(obj, update_nr, subscription_id):\n current_block = obj[\"header\"][\"number\"]\n block_hash = block_handler_substrate.get_block_hash(current_block)\n bt.logging.debug(f\"New block #{current_block}\")\n\n bt.logging.debug(\n f\"Blocks since epoch: {(current_block + netuid + 1) % (tempo + 1)}\"\n )\n\n nonlocal last_extrinsic_hash\n nonlocal checked_extrinsics_count\n nonlocal should_retry\n\n if last_extrinsic_hash != None:\n try:\n receipt = block_handler_substrate.retrieve_extrinsic_by_hash(\n block_hash, last_extrinsic_hash\n )\n bt.logging.debug(\n f\"Last set-weights call: {'Success' if receipt.is_success else format('Failure, reason: %s', receipt.error_message['name'] if receipt.error_message != None else 'nil')}\"\n )\n\n should_retry = False\n last_extrinsic_hash = None\n checked_extrinsics_count = 0\n except Exception as e:\n checked_extrinsics_count += 1\n bt.logging.debug(f\"An error occurred, extrinsic not found in block.\")\n finally:\n if checked_extrinsics_count >= 20:\n should_retry = True\n last_extrinsic_hash = None\n checked_extrinsics_count = 0\n\n if ((current_block + netuid + 1) % (tempo + 1) == 0) or should_retry:\n bt.logging.info(\n f\"New epoch started, setting weights at block {current_block}\"\n )\n with self.subtensor.substrate as substrate:\n call = substrate.compose_call(\n call_module=\"SubtensorModule\",\n call_function=\"set_weights\",\n call_params={\n \"dests\": [self.my_subnet_uid],\n \"weights\": [65535],\n \"netuid\": netuid,\n \"version_key\": 1,\n },\n )\n\n # Period dictates how long the extrinsic will stay as part of waiting pool\n extrinsic = substrate.create_signed_extrinsic(\n call=call, keypair=self.wallet.hotkey, era={\"period\": 1000}\n )\n\n dry_run = runtime_call(\n substrate=substrate,\n api=\"TaggedTransactionQueue\",\n method=\"validate_transaction\",\n params=[\"InBlock\", extrinsic, block_hash],\n block_hash=block_hash,\n )\n bt.logging.debug(dry_run)\n\n response = substrate.submit_extrinsic(\n extrinsic,\n wait_for_inclusion=False,\n wait_for_finalization=False,\n )\n\n result_data = substrate.rpc_request(\"author_pendingExtrinsics\", [])\n for extrinsic_data in result_data[\"result\"]:\n extrinsic = substrate.runtime_config.create_scale_object(\n \"Extrinsic\", metadata=substrate.metadata\n )\n extrinsic.decode(\n ScaleBytes(extrinsic_data),\n check_remaining=substrate.config.get(\"strict_scale_decode\"),\n )\n\n if extrinsic.value[\"extrinsic_hash\"] == response.extrinsic_hash:\n bt.logging.debug(\n \"Weights transaction is in the pending transaction pool\"\n )\n\n last_extrinsic_hash = response.extrinsic_hash\n should_retry = False\n\n # --- Update the miner storage information periodically.\n if not should_retry:\n update_storage_stats(self)\n bt.logging.debug(\"Storage statistics updated...\")\n\n if self.should_exit:\n return True\n\n block_handler_substrate.subscribe_block_headers(handler)" }, { "identifier": "set_weights", "path": "storage/miner/set_weights.py", "snippet": "def set_weights_for_miner(\n subtensor: \"bt.subtensor\",\n netuid: int,\n uid: int,\n wallet: \"bt.wallet\",\n metagraph: \"bt.metagraph\",\n wandb_on: bool = False,\n tempo: int = 360,\n wait_for_inclusion: bool = False,\n wait_for_finalization: bool = False,\n) -> bool:" }, { "identifier": "compute_subsequent_commitment", "path": "storage/miner/utils.py", "snippet": "def compute_subsequent_commitment(data, previous_seed, new_seed, verbose=False):\n \"\"\"\n Computes a new commitment based on provided data and a change from an old seed to a new seed.\n This function is typically used in cryptographic operations to update commitments without\n altering the underlying data.\n\n Parameters:\n - data: The original data for which the commitment is being updated.\n - previous_seed: The seed used in the previous commitment.\n - new_seed: The seed to be used for the new commitment.\n - verbose (bool): If True, additional debug information will be printed. Defaults to False.\n\n Returns:\n - A tuple containing the new commitment and the proof of the old commitment.\n\n If verbose is set to True, debug information about the types and contents of the parameters\n will be printed to aid in debugging.\n \"\"\"\n if verbose:\n bt.logging.debug(\"IN COMPUTE SUBESEQUENT COMMITMENT\")\n bt.logging.debug(\"type of data :\", type(data))\n bt.logging.debug(\"type of prev_seed:\", type(previous_seed))\n bt.logging.debug(\"type of new_seed :\", type(new_seed))\n proof = hash_data(data + previous_seed)\n return hash_data(str(proof).encode(\"utf-8\") + new_seed), proof" }, { "identifier": "save_data_to_filesystem", "path": "storage/miner/utils.py", "snippet": "def save_data_to_filesystem(data, directory, filename):\n \"\"\"\n Saves data to the filesystem at the specified directory and filename. If the directory does\n not exist, it is created.\n\n Parameters:\n - data: The data to be saved.\n - directory (str): The directory path where the data should be saved.\n - filename (str): The name of the file to save the data in.\n\n Returns:\n - file_path (str): The full path to the saved file.\n\n This function is useful for persisting data to the disk.\n \"\"\"\n # Ensure the directory exists\n directory = os.path.expanduser(directory)\n os.makedirs(directory, exist_ok=True)\n file_path = os.path.join(directory, filename)\n with open(file_path, \"wb\") as file:\n file.write(data)\n return file_path" }, { "identifier": "load_from_filesystem", "path": "storage/miner/utils.py", "snippet": "def load_from_filesystem(filepath):\n \"\"\"\n Loads data from a file in the filesystem.\n\n Parameters:\n - filepath (str): The path to the file from which data is to be loaded.\n\n Returns:\n - data: The data read from the file.\n\n This function is a straightforward utility for reading binary data from a file.\n \"\"\"\n with open(os.path.expanduser(filepath), \"rb\") as file:\n data = file.read()\n return data" }, { "identifier": "commit_data_with_seed", "path": "storage/miner/utils.py", "snippet": "def commit_data_with_seed(committer, data_chunks, n_chunks, seed):\n \"\"\"\n Commits chunks of data with a seed using a Merkle tree structure to create a proof of\n integrity for each chunk. This function is used in environments where the integrity\n and order of data need to be verifiable.\n\n Parameters:\n - committer: The committing object, which should have a commit method.\n - data_chunks (list): A list of data chunks to be committed.\n - n_chunks (int): The number of chunks expected to be committed.\n - seed: A seed value that is combined with data chunks before commitment.\n\n Returns:\n - randomness (list): A list of randomness values associated with each data chunk's commitment.\n - chunks (list): The list of original data chunks that were committed.\n - points (list): A list of commitment points in hex format.\n - merkle_tree (MerkleTree): A Merkle tree constructed from the commitment points.\n\n This function handles the conversion of commitment points to hex format and adds them to the\n Merkle tree. The completed tree represents the combined commitments.\n \"\"\"\n merkle_tree = MerkleTree()\n\n # Commit each chunk of data\n randomness, chunks, points = [None] * n_chunks, [None] * n_chunks, [None] * n_chunks\n for index, chunk in enumerate(data_chunks):\n c, m_val, r = committer.commit(chunk + str(seed).encode())\n c_hex = ecc_point_to_hex(c)\n randomness[index] = r\n chunks[index] = chunk\n points[index] = c_hex\n merkle_tree.add_leaf(c_hex)\n\n # Create the tree from the leaves\n merkle_tree.make_tree()\n return randomness, chunks, points, merkle_tree" }, { "identifier": "init_wandb", "path": "storage/miner/utils.py", "snippet": "def init_wandb(self, reinit=False):\n \"\"\"Starts a new wandb run.\"\"\"\n tags = [\n self.wallet.hotkey.ss58_address,\n storage.__version__,\n str(storage.__spec_version__),\n f\"netuid_{self.metagraph.netuid}\",\n ]\n\n if self.config.mock:\n tags.append(\"mock\")\n\n wandb_config = {\n key: copy.deepcopy(self.config.get(key, None))\n for key in (\"neuron\", \"reward\", \"netuid\", \"wandb\")\n }\n\n if wandb_config[\"neuron\"] is not None:\n wandb_config[\"neuron\"].pop(\"full_path\", None)\n\n self.wandb = wandb.init(\n anonymous=\"allow\",\n reinit=reinit,\n project=self.config.wandb.project_name,\n entity=self.config.wandb.entity,\n config=wandb_config,\n mode=\"offline\" if self.config.wandb.offline else \"online\",\n dir=self.config.neuron.full_path\n if self.config.neuron is not None\n else \"wandb_logs\",\n tags=tags,\n notes=self.config.wandb.notes,\n )\n bt.logging.success(\n prefix=\"Started a new wandb run\",\n sufix=f\"<blue> {self.wandb.name} </blue>\",\n )" }, { "identifier": "get_directory_size", "path": "storage/miner/utils.py", "snippet": "def get_directory_size(path):\n \"\"\"\n Calculates the total size of files in a specified directory.\n\n This function traverses the directory at the given path, including all subdirectories, and sums up the size\n of each file to calculate the total directory size.\n\n Args:\n path (str): The file path of the directory whose size is to be calculated.\n\n Returns:\n int: The total size of the directory in bytes (B).\n\n Usage:\n directory_size_gb = get_directory_size('/path/to/directory')\n \"\"\"\n total_size = 0\n path = os.path.expanduser(path)\n for dirpath, dirnames, filenames in os.walk(path):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n if not os.path.islink(fp):\n total_size += os.path.getsize(fp)\n return total_size" }, { "identifier": "get_free_disk_space", "path": "storage/miner/utils.py", "snippet": "def get_free_disk_space(path=\".\"):\n \"\"\"\n Retrieves the free disk space for the drive containing the specified path.\n\n This function provides the free disk space of the drive on which the specified path resides.\n It's useful for understanding the storage capacity and usage of the system where the miner is running.\n\n Args:\n path (str): A file path on the drive whose free disk space is to be fetched. Typically, you can\n provide the root path ('/') to get the stats for the primary drive.\n\n Returns:\n int: The free space on the disk in bytes (B).\n\n Usage:\n free_disk_space_gb = get_free_disk_space('/')\n \"\"\"\n stats = get_disk_space_stats(path)\n free = stats.get(\"free_bytes\", 0)\n return free" }, { "identifier": "update_storage_stats", "path": "storage/miner/utils.py", "snippet": "def update_storage_stats(self):\n \"\"\"\n Updates the miner's storage statistics.\n\n This function updates the miner's storage statistics, including the free disk space, current storage usage,\n and percent disk usage. It's useful for understanding the storage capacity and usage of the system where\n the miner is running.\n \"\"\"\n\n self.free_memory = get_free_disk_space()\n bt.logging.info(f\"Free memory: {self.free_memory} bytes\")\n self.current_storage_usage = get_directory_size(self.config.database.directory)\n bt.logging.info(f\"Miner storage usage: {self.current_storage_usage} bytes\")\n self.percent_disk_usage = self.current_storage_usage / self.free_memory\n bt.logging.info(f\"Miner % disk usage : {100 * self.percent_disk_usage:.3f}%\")" }, { "identifier": "config", "path": "storage/miner/config.py", "snippet": "def config(cls):\n parser = argparse.ArgumentParser()\n bt.subtensor.add_args(parser)\n bt.logging.add_args(parser)\n bt.wallet.add_args(parser)\n bt.axon.add_args(parser)\n cls.add_args(parser)\n return bt.config(parser)" }, { "identifier": "check_config", "path": "storage/miner/config.py", "snippet": "def check_config(cls, config: \"bt.Config\"):\n r\"\"\"Checks/validates the config namespace object.\"\"\"\n bt.logging.check_config(config)\n\n if config.mock:\n config.wallet._mock = True\n\n timestamp = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n full_path = os.path.expanduser(\n \"{}/{}/{}/netuid{}/{}\".format(\n config.logging.logging_dir,\n config.wallet.name,\n config.wallet.hotkey,\n config.netuid,\n config.miner.name,\n )\n )\n log_path = os.path.join(full_path, \"logs\", timestamp)\n\n config.miner.log_path = os.path.expanduser(log_path)\n config.miner.full_path = os.path.expanduser(full_path)\n\n if not os.path.exists(config.miner.full_path):\n os.makedirs(config.miner.full_path, exist_ok=True)\n if not os.path.exists(config.miner.log_path):\n os.makedirs(config.miner.log_path, exist_ok=True)\n\n if not config.miner.dont_save_events:\n # Add custom event logger for the events.\n logger.level(\"EVENTS\", no=38, icon=\"📝\")\n logger.add(\n config.miner.full_path + \"/\" + \"EVENTS.log\",\n rotation=config.miner.events_retention_size,\n serialize=True,\n enqueue=True,\n backtrace=False,\n diagnose=False,\n level=\"EVENTS\",\n format=\"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}\",\n )\n\n logger.add(\n config.miner.full_path + \"/\" + \"INFO.log\",\n rotation=config.miner.events_retention_size,\n serialize=True,\n enqueue=True,\n backtrace=False,\n diagnose=False,\n level=\"INFO\",\n format=\"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}\",\n )\n\n logger.add(\n config.miner.full_path + \"/\" + \"DEBUG.log\",\n rotation=config.miner.events_retention_size,\n serialize=True,\n enqueue=True,\n backtrace=False,\n diagnose=False,\n level=\"DEBUG\",\n format=\"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}\",\n )\n\n logger.add(\n config.miner.full_path + \"/\" + \"TRACE.log\",\n rotation=config.miner.events_retention_size,\n serialize=True,\n enqueue=True,\n backtrace=False,\n diagnose=False,\n level=\"TRACE\",\n format=\"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}\",\n )" }, { "identifier": "add_args", "path": "storage/miner/config.py", "snippet": "def add_args(cls, parser):\n parser.add_argument(\"--netuid\", type=int, default=21, help=\"The chain subnet uid.\")\n parser.add_argument(\"--test\", default=False, action=\"store_true\")\n parser.add_argument(\n \"--miner.name\",\n type=str,\n help=\"Trials for this miner go in miner.root / (wallet_cold - wallet_hot) / miner.name. \",\n default=\"core_storage_miner\",\n )\n parser.add_argument(\n \"--miner.device\",\n type=str,\n help=\"Device to run the validator on.\",\n default=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n )\n parser.add_argument(\"--miner.verbose\", default=False, action=\"store_true\")\n\n parser.add_argument(\n \"--database.host\", default=\"localhost\", help=\"The host of the redis database.\"\n )\n parser.add_argument(\n \"--database.port\",\n type=int,\n default=6379,\n help=\"The port of the redis database.\",\n )\n parser.add_argument(\n \"--database.index\",\n type=int,\n default=0,\n help=\"The index of the redis database.\",\n )\n parser.add_argument(\n \"--database.directory\",\n default=\"~/.data\",\n help=\"The directory to store data in.\",\n )\n\n # Run config.\n parser.add_argument(\n \"--miner.set_weights_wait_for_inclusion\",\n action=\"store_true\",\n help=\"Wether to wait for the set_weights extrinsic to enter a block\",\n default=False,\n )\n parser.add_argument(\n \"--miner.set_weights_wait_for_finalization\",\n action=\"store_true\",\n help=\"Wether to wait for the set_weights extrinsic to be finalized on the chain\",\n default=False,\n )\n parser.add_argument(\n \"--miner.seconds_to_wait_to_log_presence_message\",\n type=int,\n help=\"How many seconds to wait before logging a presence message.\",\n default=4,\n )\n\n # Blacklist.\n parser.add_argument(\n \"--miner.blacklist.blacklist\",\n type=str,\n required=False,\n nargs=\"*\",\n help=\"Blacklist certain hotkeys\",\n default=[],\n )\n parser.add_argument(\n \"--miner.blacklist.whitelist\",\n type=str,\n required=False,\n nargs=\"*\",\n help=\"Whitelist certain hotkeys\",\n default=[],\n )\n parser.add_argument(\n \"--miner.blacklist.force_validator_permit\",\n action=\"store_true\",\n help=\"Only allow requests from validators\",\n default=False,\n )\n parser.add_argument(\n \"--miner.blacklist.allow_non_registered\",\n action=\"store_true\",\n help=\"If True, the miner will allow non-registered hotkeys to mine.\",\n default=False,\n )\n parser.add_argument(\n \"--miner.blacklist.minimum_stake_requirement\",\n type=float,\n help=\"Minimum stake requirement\",\n default=0.0,\n )\n parser.add_argument(\n \"--miner.blacklist.min_request_period\",\n type=int,\n help=\"Time period (in minute) to serve a maximum of 50 requests for each hotkey\",\n default=5,\n )\n\n # Priority.\n parser.add_argument(\n \"--miner.priority.default\",\n type=float,\n help=\"Default priority of non-registered requests\",\n default=0.0,\n )\n parser.add_argument(\n \"--miner.priority.time_stake_multiplicate\",\n type=int,\n help=\"Time (in minute) it takes to make the stake twice more important in the priority queue\",\n default=10,\n )\n parser.add_argument(\n \"--miner.priority.len_request_timestamps\",\n type=int,\n help=\"Number of historic request timestamps to record\",\n default=50,\n )\n # Switches.\n parser.add_argument(\n \"--miner.no_set_weights\",\n action=\"store_true\",\n help=\"If True, the miner does not set weights.\",\n default=False,\n )\n parser.add_argument(\n \"--miner.no_serve\",\n action=\"store_true\",\n help=\"If True, the miner doesnt serve the axon.\",\n default=False,\n )\n parser.add_argument(\n \"--miner.no_start_axon\",\n action=\"store_true\",\n help=\"If True, the miner doesnt start the axon.\",\n default=False,\n )\n\n # Mocks.\n parser.add_argument(\n \"--miner.mock_subtensor\",\n action=\"store_true\",\n help=\"If True, the miner will allow non-registered hotkeys to mine.\",\n default=False,\n )\n\n # Wandb args\n parser.add_argument(\n \"--wandb.off\", action=\"store_true\", help=\"Turn off wandb.\", default=False\n )\n parser.add_argument(\n \"--wandb.project_name\",\n type=str,\n help=\"The name of the project where you are sending the new run.\",\n default=\"philanthropic-thunder\",\n )\n parser.add_argument(\n \"--wandb.entity\",\n type=str,\n help=\"An entity is a username or team name where youre sending runs.\",\n default=\"philanthrope\",\n )\n parser.add_argument(\n \"--wandb.offline\",\n action=\"store_true\",\n help=\"Runs wandb in offline mode.\",\n default=False,\n )\n parser.add_argument(\n \"--wandb.weights_step_length\",\n type=int,\n help=\"How many steps before we log the weights.\",\n default=10,\n )\n parser.add_argument(\n \"--wandb.run_step_length\",\n type=int,\n help=\"How many steps before we rollover to a new run.\",\n default=1500,\n )\n parser.add_argument(\n \"--wandb.notes\",\n type=str,\n help=\"Notes to add to the wandb run.\",\n default=\"\",\n )" }, { "identifier": "store_chunk_metadata", "path": "storage/miner/database.py", "snippet": "async def store_chunk_metadata(r, chunk_hash, filepath, hotkey, size, seed):\n \"\"\"\n Stores the metadata of a chunk in a Redis database.\n\n Args:\n r (redis.Redis): The Redis connection instance.\n chunk_hash (str): The unique hash identifying the chunk.\n hotkey (str): Miner hotkey associated with the chunk.\n size (int): The size of the chunk.\n seed (str): The seed associated with the chunk.\n\n This function stores the filepath, size (as a string), and seed for the given chunk hash.\n \"\"\"\n # Ensure that all data are in the correct format\n metadata = {\n \"filepath\": filepath,\n \"hotkey\": hotkey,\n \"size\": str(size), # Convert size to string\n \"seed\": seed, # Store seed directly\n }\n\n # Use hmset (or hset which is its modern equivalent) to store the hash\n for key, value in metadata.items():\n await r.hset(chunk_hash, key, value)" }, { "identifier": "update_seed_info", "path": "storage/miner/database.py", "snippet": "async def update_seed_info(r, chunk_hash, hotkey, seed):\n \"\"\"\n Updates the seed information for a specific chunk in the Redis database.\n\n Args:\n r (redis.Redis): The Redis connection instance.\n chunk_hash (str): The unique hash identifying the chunk.\n hotkey (str): The caller hotkey value to be updated.\n seed (str): The new seed value to be updated.\n\n This function updates the seed information for the specified chunk hash.\n \"\"\"\n # Update the existing seed information\n await r.hset(chunk_hash, \"seed\", seed)\n await r.hset(chunk_hash, \"hotkey\", hotkey)" }, { "identifier": "get_chunk_metadata", "path": "storage/miner/database.py", "snippet": "async def get_chunk_metadata(r, chunk_hash):\n \"\"\"\n Retrieves the metadata for a specific chunk from the Redis database.\n\n Args:\n r (redis.Redis): The Redis connection instance.\n chunk_hash (str): The unique hash identifying the chunk.\n\n Returns:\n dict: A dictionary containing the chunk's metadata, including filepath, size, and seed.\n Size is converted to an integer, and seed is decoded from bytes to a string.\n \"\"\"\n metadata = await r.hgetall(chunk_hash)\n if metadata:\n metadata[b\"size\"] = int(metadata[b\"size\"])\n metadata[b\"seed\"] = metadata[b\"seed\"].decode(\"utf-8\")\n return metadata" } ]
import os import sys import copy import json import time import torch import typing import base64 import asyncio import aioredis import argparse import threading import traceback import bittensor as bt import storage from collections import defaultdict from Crypto.Random import get_random_bytes from typing import Dict from pprint import pprint, pformat from storage.shared.ecc import ( hash_data, setup_CRS, ECCommitment, ecc_point_to_hex, hex_to_ecc_point, ) from storage.shared.merkle import ( MerkleTree, ) from storage.shared.utils import b64_encode, b64_decode, chunk_data, safe_key_search from storage.miner import ( run, set_weights, ) from storage.miner.utils import ( compute_subsequent_commitment, save_data_to_filesystem, load_from_filesystem, commit_data_with_seed, init_wandb, get_directory_size, get_free_disk_space, update_storage_stats, ) from storage.miner.config import ( config, check_config, add_args, ) from storage.miner.database import ( store_chunk_metadata, update_seed_info, get_chunk_metadata, )
14,974
If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def retrieve_priority_fn(self, synapse: storage.protocol.Retrieve) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority async def store(self, synapse: storage.protocol.Store) -> storage.protocol.Store: """ Processes the storage request from a synapse by securely storing the provided data and returning a proof of storage. The data is committed using elliptic curve cryptography, stored on the filesystem, and the metadata is recorded in a Redis database. A cryptographic proof of the commitment, along with a digital signature from the server's hotkey, is returned in the synapse for verification by the requester. Args: synapse (storage.protocol.Store): An object containing the data to be stored, encoded in base64 format, along with associated metadata like the cryptographic curve parameters, a seed for the commitment, and the expected commitment group elements. Returns: storage.protocol.Store: The synapse is returned with additional fields populated, including the randomness used in the commitment, the commitment point itself, a signature from this storage server's hotkey, and a commitment hash that can be used for chained proofs. The method performs the following operations: 1. Decodes the base64-encoded data into raw bytes. 2. Commits to the data using the provided elliptic curve parameters and the seed to generate a commitment point. 3. Stores the raw byte data in the filesystem using a hash of the data as the filename. 4. Records metadata about the stored data in the Redis database, including the file path, previous seed, and data size. 5. Updates the synapse object with the commitment details and a digital signature. This process ensures the integrity and non-repudiation of the data storage, allowing clients to verify that their data has been stored correctly without the need to retrieve the full data set. Example usage: Assuming an initialized 'committer' object and 'synapse' with necessary data: >>> updated_synapse = self.store(synapse) """ bt.logging.info(f"received store request: {synapse.encrypted_data[:24]}") self.request_count += 1 # Decode the data from base64 to raw bytes encrypted_byte_data = base64.b64decode(synapse.encrypted_data) bt.logging.trace(f"store b64decrypted data: {encrypted_byte_data[:24]}") # Store the data with the hash as the key in the filesystem bt.logging.trace(f"entering hash_data()") data_hash = hash_data(encrypted_byte_data) # If already storing this hash, simply update the validator seeds and return challenge bt.logging.trace(f"checking if data already exists...") if await self.database.exists(data_hash): # update the validator seed challenge hash in storage await update_seed_info( self.database, data_hash, synapse.dendrite.hotkey, synapse.seed ) else: # Store the data in the filesystem filepath = save_data_to_filesystem( encrypted_byte_data, self.config.database.directory, str(data_hash) ) bt.logging.trace(f"stored data {data_hash} in filepath: {filepath}") # Add the initial chunk, size, and validator seed information await store_chunk_metadata( self.database, data_hash, filepath, synapse.dendrite.hotkey, sys.getsizeof(encrypted_byte_data), synapse.seed, ) # Commit to the entire data block bt.logging.trace(f"entering ECCommitment()") committer = ECCommitment(
# The MIT License (MIT) # Copyright © 2023 Yuma Rao # Copyright © 2023 philanthrope # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the “Software”), to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies or substantial portions of # the Software. # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO # THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # import this repo class miner: @classmethod def check_config(cls, config: "bt.Config"): """ Adds neuron-specific arguments to the argument parser. Args: parser (argparse.ArgumentParser): Parser to add arguments to. This class method enriches the argument parser with options specific to the neuron's configuration. """ check_config(cls, config) @classmethod def add_args(cls, parser): """ Adds neuron-specific arguments to the argument parser. Args: parser (argparse.ArgumentParser): Parser to add arguments to. This class method enriches the argument parser with options specific to the neuron's configuration. """ add_args(cls, parser) @classmethod def config(cls): """ Retrieves the configuration for the neuron. Returns: bt.Config: The configuration object for the neuron. This class method returns the neuron's configuration, which is used throughout the neuron's lifecycle for various functionalities and operations. """ return config(cls) subtensor: "bt.subtensor" wallet: "bt.wallet" metagraph: "bt.metagraph" def __init__(self): self.config = miner.config() self.check_config(self.config) bt.logging(config=self.config, logging_dir=self.config.miner.full_path) bt.logging.info(f"{self.config}") bt.logging.info("miner.__init__()") # Init device. bt.logging.debug("loading device") self.device = torch.device(self.config.miner.device) bt.logging.debug(str(self.device)) # Init subtensor bt.logging.debug("loading subtensor") self.subtensor = bt.subtensor(config=self.config) bt.logging.debug(str(self.subtensor)) self.current_block = self.subtensor.get_current_block() # Init wallet. bt.logging.debug("loading wallet") self.wallet = bt.wallet(config=self.config) self.wallet.create_if_non_existent() if not self.config.wallet._mock: if not self.subtensor.is_hotkey_registered_on_subnet( hotkey_ss58=self.wallet.hotkey.ss58_address, netuid=self.config.netuid ): raise Exception( f"Wallet not currently registered on netuid {self.config.netuid}, please first register wallet before running" ) bt.logging.debug(f"wallet: {str(self.wallet)}") # Init metagraph. bt.logging.debug("loading metagraph") self.metagraph = bt.metagraph( netuid=self.config.netuid, network=self.subtensor.network, sync=False ) # Make sure not to sync without passing subtensor self.metagraph.sync(subtensor=self.subtensor) # Sync metagraph with subtensor. bt.logging.debug(str(self.metagraph)) # Setup database self.database = aioredis.StrictRedis( host=self.config.database.host, port=self.config.database.port, db=self.config.database.index, socket_keepalive=True, socket_connect_timeout=300, ) self.my_subnet_uid = self.metagraph.hotkeys.index( self.wallet.hotkey.ss58_address ) bt.logging.info(f"Running miner on uid: {self.my_subnet_uid}") # Init wandb. if not self.config.wandb.off: bt.logging.debug("loading wandb") init_wandb(self) # The axon handles request processing, allowing validators to send this process requests. self.axon = bt.axon(wallet=self.wallet, config=self.config) bt.logging.info(f"Axon {self.axon}") # Attach determiners which functions are called when servicing a request. bt.logging.info(f"Attaching forward functions to axon.") self.axon.attach( forward_fn=self.store, blacklist_fn=self.store_blacklist_fn, priority_fn=self.store_priority_fn, ).attach( forward_fn=self.challenge, blacklist_fn=self.challenge_blacklist_fn, priority_fn=self.challenge_priority_fn, ).attach( forward_fn=self.retrieve, blacklist_fn=self.retrieve_blacklist_fn, priority_fn=self.retrieve_priority_fn, ) # Serve passes the axon information to the network + netuid we are hosting on. # This will auto-update if the axon port of external ip have changed. bt.logging.info( f"Serving axon {self.axon} on network: {self.subtensor.chain_endpoint} with netuid: {self.config.netuid}" ) self.axon.serve(netuid=self.config.netuid, subtensor=self.subtensor) # Start starts the miner's axon, making it active on the network. bt.logging.info(f"Starting axon server on port: {self.config.axon.port}") self.axon.start() # Init the event loop. self.loop = asyncio.get_event_loop() # Instantiate runners self.should_exit: bool = False self.is_running: bool = False self.thread: threading.Thread = None self.lock = asyncio.Lock() self.request_timestamps: Dict = {} self.step = 0 # Init the miner's storage request tracker self.request_count = 0 self.start_request_count_timer() self.requests_per_hour = [] self.average_requests_per_hour = 0 # Init the miner's storage usage tracker update_storage_stats(self) def start_request_count_timer(self): """ Initializes and starts a timer for tracking the number of requests received by the miner in an hour. This method sets up a one-hour timer that, upon expiration, calls the `reset_request_count` method to log the number of requests received and reset the count for the next hour. The timer is set to run in a separate thread to avoid blocking the main execution. Usage: Should be called during the initialization of the miner to start tracking requests per hour. """ self.request_count_timer = threading.Timer(3600, self.reset_request_count) self.request_count_timer.start() def reset_request_count(self): """ Logs the number of requests received in the last hour and resets the count. This method is automatically called when the one-hour timer set by `start_request_count_timer` expires. It logs the count of requests received in the last hour and then resets the count. Additionally, it restarts the timer for the next hour. Usage: This method is intended to be called automatically by a timer and typically should not be called directly. """ bt.logging.info( f"Number of requests received in the last hour: {self.request_count}" ) self.requests_per_hour.append(self.request_count) bt.logging.info(f"Requests per hour: {self.requests_per_hour}") self.average_requests_per_hour = sum(self.requests_per_hour) / len( self.requests_per_hour ) bt.logging.info(f"Average requests per hour: {self.average_requests_per_hour}") self.request_count = 0 self.start_request_count_timer() @property async def total_storage(self): """ Calculates the total size of data stored by the miner. This method fetches all data keys from the Redis database and sums up the size of each data object. It provides an estimate of the total amount of data currently held by the miner. Returns: int: Total size of data (in bytes) stored by the miner. Example: >>> miner.total_storage() 102400 # Example output indicating 102,400 bytes of data stored """ # Fetch all keys from Redis all_keys = await safe_key_search(self.database, "*") # Filter out keys that contain a period (temporary, remove later) filtered_keys = [key for key in all_keys if b"." not in key] # Get the size of each data object and sum them up total_size = sum( [ await get_chunk_metadata(self.database, key).get(b"size", 0) for key in filtered_keys ] ) return total_size def store_blacklist_fn( self, synapse: storage.protocol.Store ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def store_priority_fn(self, synapse: storage.protocol.Store) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority def challenge_blacklist_fn( self, synapse: storage.protocol.Challenge ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def challenge_priority_fn(self, synapse: storage.protocol.Challenge) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority def retrieve_blacklist_fn( self, synapse: storage.protocol.Retrieve ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def retrieve_priority_fn(self, synapse: storage.protocol.Retrieve) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority async def store(self, synapse: storage.protocol.Store) -> storage.protocol.Store: """ Processes the storage request from a synapse by securely storing the provided data and returning a proof of storage. The data is committed using elliptic curve cryptography, stored on the filesystem, and the metadata is recorded in a Redis database. A cryptographic proof of the commitment, along with a digital signature from the server's hotkey, is returned in the synapse for verification by the requester. Args: synapse (storage.protocol.Store): An object containing the data to be stored, encoded in base64 format, along with associated metadata like the cryptographic curve parameters, a seed for the commitment, and the expected commitment group elements. Returns: storage.protocol.Store: The synapse is returned with additional fields populated, including the randomness used in the commitment, the commitment point itself, a signature from this storage server's hotkey, and a commitment hash that can be used for chained proofs. The method performs the following operations: 1. Decodes the base64-encoded data into raw bytes. 2. Commits to the data using the provided elliptic curve parameters and the seed to generate a commitment point. 3. Stores the raw byte data in the filesystem using a hash of the data as the filename. 4. Records metadata about the stored data in the Redis database, including the file path, previous seed, and data size. 5. Updates the synapse object with the commitment details and a digital signature. This process ensures the integrity and non-repudiation of the data storage, allowing clients to verify that their data has been stored correctly without the need to retrieve the full data set. Example usage: Assuming an initialized 'committer' object and 'synapse' with necessary data: >>> updated_synapse = self.store(synapse) """ bt.logging.info(f"received store request: {synapse.encrypted_data[:24]}") self.request_count += 1 # Decode the data from base64 to raw bytes encrypted_byte_data = base64.b64decode(synapse.encrypted_data) bt.logging.trace(f"store b64decrypted data: {encrypted_byte_data[:24]}") # Store the data with the hash as the key in the filesystem bt.logging.trace(f"entering hash_data()") data_hash = hash_data(encrypted_byte_data) # If already storing this hash, simply update the validator seeds and return challenge bt.logging.trace(f"checking if data already exists...") if await self.database.exists(data_hash): # update the validator seed challenge hash in storage await update_seed_info( self.database, data_hash, synapse.dendrite.hotkey, synapse.seed ) else: # Store the data in the filesystem filepath = save_data_to_filesystem( encrypted_byte_data, self.config.database.directory, str(data_hash) ) bt.logging.trace(f"stored data {data_hash} in filepath: {filepath}") # Add the initial chunk, size, and validator seed information await store_chunk_metadata( self.database, data_hash, filepath, synapse.dendrite.hotkey, sys.getsizeof(encrypted_byte_data), synapse.seed, ) # Commit to the entire data block bt.logging.trace(f"entering ECCommitment()") committer = ECCommitment(
hex_to_ecc_point(synapse.g, synapse.curve),
4
2023-10-26 18:54:47+00:00
24k
cpacker/MemGPT
memgpt/main.py
[ { "identifier": "logger", "path": "memgpt/log.py", "snippet": "" }, { "identifier": "CLIInterface", "path": "memgpt/interface.py", "snippet": "class CLIInterface(AgentInterface):\r\n \"\"\"Basic interface for dumping agent events to the command-line\"\"\"\r\n\r\n @staticmethod\r\n def important_message(msg):\r\n fstr = f\"{Fore.MAGENTA}{Style.BRIGHT}{{msg}}{Style.RESET_ALL}\"\r\n if STRIP_UI:\r\n fstr = \"{msg}\"\r\n print(fstr.format(msg=msg))\r\n\r\n @staticmethod\r\n def warning_message(msg):\r\n fstr = f\"{Fore.RED}{Style.BRIGHT}{{msg}}{Style.RESET_ALL}\"\r\n if STRIP_UI:\r\n fstr = \"{msg}\"\r\n else:\r\n print(fstr.format(msg=msg))\r\n\r\n @staticmethod\r\n def internal_monologue(msg):\r\n # ANSI escape code for italic is '\\x1B[3m'\r\n fstr = f\"\\x1B[3m{Fore.LIGHTBLACK_EX}💭 {{msg}}{Style.RESET_ALL}\"\r\n if STRIP_UI:\r\n fstr = \"{msg}\"\r\n print(fstr.format(msg=msg))\r\n\r\n @staticmethod\r\n def assistant_message(msg):\r\n fstr = f\"{Fore.YELLOW}{Style.BRIGHT}🤖 {Fore.YELLOW}{{msg}}{Style.RESET_ALL}\"\r\n if STRIP_UI:\r\n fstr = \"{msg}\"\r\n print(fstr.format(msg=msg))\r\n\r\n @staticmethod\r\n def memory_message(msg):\r\n fstr = f\"{Fore.LIGHTMAGENTA_EX}{Style.BRIGHT}🧠 {Fore.LIGHTMAGENTA_EX}{{msg}}{Style.RESET_ALL}\"\r\n if STRIP_UI:\r\n fstr = \"{msg}\"\r\n print(fstr.format(msg=msg))\r\n\r\n @staticmethod\r\n def system_message(msg):\r\n fstr = f\"{Fore.MAGENTA}{Style.BRIGHT}🖥️ [system] {Fore.MAGENTA}{msg}{Style.RESET_ALL}\"\r\n if STRIP_UI:\r\n fstr = \"{msg}\"\r\n print(fstr.format(msg=msg))\r\n\r\n @staticmethod\r\n def user_message(msg, raw=False, dump=False, debug=DEBUG):\r\n def print_user_message(icon, msg, printf=print):\r\n if STRIP_UI:\r\n printf(f\"{icon} {msg}\")\r\n else:\r\n printf(f\"{Fore.GREEN}{Style.BRIGHT}{icon} {Fore.GREEN}{msg}{Style.RESET_ALL}\")\r\n\r\n def printd_user_message(icon, msg):\r\n return print_user_message(icon, msg)\r\n\r\n if not (raw or dump or debug):\r\n # we do not want to repeat the message in normal use\r\n return\r\n\r\n if isinstance(msg, str):\r\n if raw:\r\n printd_user_message(\"🧑\", msg)\r\n return\r\n else:\r\n try:\r\n msg_json = json.loads(msg)\r\n except:\r\n printd(f\"{CLI_WARNING_PREFIX}failed to parse user message into json\")\r\n printd_user_message(\"🧑\", msg)\r\n return\r\n if msg_json[\"type\"] == \"user_message\":\r\n if dump:\r\n print_user_message(\"🧑\", msg_json[\"message\"])\r\n return\r\n msg_json.pop(\"type\")\r\n printd_user_message(\"🧑\", msg_json)\r\n elif msg_json[\"type\"] == \"heartbeat\":\r\n if debug:\r\n msg_json.pop(\"type\")\r\n printd_user_message(\"💓\", msg_json)\r\n elif dump:\r\n print_user_message(\"💓\", msg_json)\r\n return\r\n\r\n elif msg_json[\"type\"] == \"system_message\":\r\n msg_json.pop(\"type\")\r\n printd_user_message(\"🖥️\", msg_json)\r\n else:\r\n printd_user_message(\"🧑\", msg_json)\r\n\r\n @staticmethod\r\n def function_message(msg, debug=DEBUG):\r\n def print_function_message(icon, msg, color=Fore.RED, printf=print):\r\n if STRIP_UI:\r\n printf(f\"⚡{icon} [function] {msg}\")\r\n else:\r\n printf(f\"{color}{Style.BRIGHT}⚡{icon} [function] {color}{msg}{Style.RESET_ALL}\")\r\n\r\n def printd_function_message(icon, msg, color=Fore.RED):\r\n return print_function_message(icon, msg, color, printf=(print if debug else printd))\r\n\r\n if isinstance(msg, dict):\r\n printd_function_message(\"\", msg)\r\n return\r\n\r\n if msg.startswith(\"Success\"):\r\n printd_function_message(\"🟢\", msg)\r\n elif msg.startswith(\"Error: \"):\r\n printd_function_message(\"🔴\", msg)\r\n elif msg.startswith(\"Running \"):\r\n if debug:\r\n printd_function_message(\"\", msg)\r\n else:\r\n match = re.search(r\"Running (\\w+)\\((.*)\\)\", msg)\r\n if match:\r\n function_name = match.group(1)\r\n function_args = match.group(2)\r\n if function_name in [\"archival_memory_insert\", \"archival_memory_search\", \"core_memory_replace\", \"core_memory_append\"]:\r\n if function_name in [\"archival_memory_insert\", \"core_memory_append\", \"core_memory_replace\"]:\r\n print_function_message(\"🧠\", f\"updating memory with {function_name}\")\r\n elif function_name == \"archival_memory_search\":\r\n print_function_message(\"🧠\", f\"searching memory with {function_name}\")\r\n try:\r\n msg_dict = eval(function_args)\r\n if function_name == \"archival_memory_search\":\r\n output = f'\\tquery: {msg_dict[\"query\"]}, page: {msg_dict[\"page\"]}'\r\n if STRIP_UI:\r\n print(output)\r\n else:\r\n print(f\"{Fore.RED}{output}{Style.RESET_ALL}\")\r\n elif function_name == \"archival_memory_insert\":\r\n output = f'\\t→ {msg_dict[\"content\"]}'\r\n if STRIP_UI:\r\n print(output)\r\n else:\r\n print(f\"{Style.BRIGHT}{Fore.RED}{output}{Style.RESET_ALL}\")\r\n else:\r\n if STRIP_UI:\r\n print(f'\\t {msg_dict[\"old_content\"]}\\n\\t→ {msg_dict[\"new_content\"]}')\r\n else:\r\n print(\r\n f'{Style.BRIGHT}\\t{Fore.RED} {msg_dict[\"old_content\"]}\\n\\t{Fore.GREEN}→ {msg_dict[\"new_content\"]}{Style.RESET_ALL}'\r\n )\r\n except Exception as e:\r\n printd(str(e))\r\n printd(msg_dict)\r\n pass\r\n elif function_name in [\"conversation_search\", \"conversation_search_date\"]:\r\n print_function_message(\"🧠\", f\"searching memory with {function_name}\")\r\n try:\r\n msg_dict = eval(function_args)\r\n output = f'\\tquery: {msg_dict[\"query\"]}, page: {msg_dict[\"page\"]}'\r\n if STRIP_UI:\r\n print(output)\r\n else:\r\n print(f\"{Fore.RED}{output}{Style.RESET_ALL}\")\r\n except Exception as e:\r\n printd(str(e))\r\n printd(msg_dict)\r\n pass\r\n else:\r\n printd(f\"{CLI_WARNING_PREFIX}did not recognize function message\")\r\n printd_function_message(\"\", msg)\r\n else:\r\n try:\r\n msg_dict = json.loads(msg)\r\n if \"status\" in msg_dict and msg_dict[\"status\"] == \"OK\":\r\n printd_function_message(\"\", str(msg), color=Fore.GREEN)\r\n else:\r\n printd_function_message(\"\", str(msg), color=Fore.RED)\r\n except Exception:\r\n print(f\"{CLI_WARNING_PREFIX}did not recognize function message {type(msg)} {msg}\")\r\n printd_function_message(\"\", msg)\r\n\r\n @staticmethod\r\n def print_messages(message_sequence, dump=False):\r\n idx = len(message_sequence)\r\n for msg in message_sequence:\r\n if dump:\r\n print(f\"[{idx}] \", end=\"\")\r\n idx -= 1\r\n role = msg[\"role\"]\r\n content = msg[\"content\"]\r\n\r\n if role == \"system\":\r\n CLIInterface.system_message(content)\r\n elif role == \"assistant\":\r\n # Differentiate between internal monologue, function calls, and messages\r\n if msg.get(\"function_call\"):\r\n if content is not None:\r\n CLIInterface.internal_monologue(content)\r\n # I think the next one is not up to date\r\n # function_message(msg[\"function_call\"])\r\n args = json.loads(msg[\"function_call\"].get(\"arguments\"))\r\n CLIInterface.assistant_message(args.get(\"message\"))\r\n # assistant_message(content)\r\n else:\r\n CLIInterface.internal_monologue(content)\r\n elif role == \"user\":\r\n CLIInterface.user_message(content, dump=dump)\r\n elif role == \"function\":\r\n CLIInterface.function_message(content, debug=dump)\r\n else:\r\n print(f\"Unknown role: {content}\")\r\n\r\n @staticmethod\r\n def print_messages_simple(message_sequence):\r\n for msg in message_sequence:\r\n role = msg[\"role\"]\r\n content = msg[\"content\"]\r\n\r\n if role == \"system\":\r\n CLIInterface.system_message(content)\r\n elif role == \"assistant\":\r\n CLIInterface.assistant_message(content)\r\n elif role == \"user\":\r\n CLIInterface.user_message(content, raw=True)\r\n else:\r\n print(f\"Unknown role: {content}\")\r\n\r\n @staticmethod\r\n def print_messages_raw(message_sequence):\r\n for msg in message_sequence:\r\n print(msg)\r\n\r\n @staticmethod\r\n def step_yield():\r\n pass\r" }, { "identifier": "MemGPTConfig", "path": "memgpt/config.py", "snippet": "class MemGPTConfig:\n config_path: str = os.path.join(MEMGPT_DIR, \"config\")\n anon_clientid: str = None\n\n # preset\n preset: str = DEFAULT_PRESET\n\n # persona parameters\n persona: str = DEFAULT_PERSONA\n human: str = DEFAULT_HUMAN\n agent: str = None\n\n # model parameters\n default_llm_config: LLMConfig = field(default_factory=LLMConfig)\n\n # embedding parameters\n default_embedding_config: EmbeddingConfig = field(default_factory=EmbeddingConfig)\n\n # database configs: archival\n archival_storage_type: str = \"chroma\" # local, db\n archival_storage_path: str = os.path.join(MEMGPT_DIR, \"chroma\")\n archival_storage_uri: str = None # TODO: eventually allow external vector DB\n\n # database configs: recall\n recall_storage_type: str = \"sqlite\" # local, db\n recall_storage_path: str = MEMGPT_DIR\n recall_storage_uri: str = None # TODO: eventually allow external vector DB\n\n # database configs: metadata storage (sources, agents, data sources)\n metadata_storage_type: str = \"sqlite\"\n metadata_storage_path: str = MEMGPT_DIR\n metadata_storage_uri: str = None\n\n # database configs: agent state\n persistence_manager_type: str = None # in-memory, db\n persistence_manager_save_file: str = None # local file\n persistence_manager_uri: str = None # db URI\n\n # version (for backcompat)\n memgpt_version: str = None\n\n # user info\n policies_accepted: bool = False\n\n def __post_init__(self):\n # ensure types\n # self.embedding_chunk_size = int(self.embedding_chunk_size)\n # self.embedding_dim = int(self.embedding_dim)\n # self.context_window = int(self.context_window)\n pass\n\n @staticmethod\n def generate_uuid() -> str:\n return uuid.UUID(int=uuid.getnode()).hex\n\n @classmethod\n def load(cls) -> \"MemGPTConfig\":\n # avoid circular import\n from memgpt.migrate import config_is_compatible, VERSION_CUTOFF\n\n if not config_is_compatible(allow_empty=True):\n error_message = \" \".join(\n [\n f\"\\nYour current config file is incompatible with MemGPT versions later than {VERSION_CUTOFF}.\",\n f\"\\nTo use MemGPT, you must either downgrade your MemGPT version (<= {VERSION_CUTOFF}) or regenerate your config using `memgpt configure`, or `memgpt migrate` if you would like to migrate old agents.\",\n ]\n )\n raise ValueError(error_message)\n\n config = configparser.ConfigParser()\n\n # allow overriding with env variables\n if os.getenv(\"MEMGPT_CONFIG_PATH\"):\n config_path = os.getenv(\"MEMGPT_CONFIG_PATH\")\n else:\n config_path = MemGPTConfig.config_path\n\n # insure all configuration directories exist\n cls.create_config_dir()\n if os.path.exists(config_path):\n # read existing config\n config.read(config_path)\n\n # Handle extraction of nested LLMConfig and EmbeddingConfig\n llm_config_dict = {\n # Extract relevant LLM configuration from the config file\n \"model\": get_field(config, \"model\", \"model\"),\n \"model_endpoint\": get_field(config, \"model\", \"model_endpoint\"),\n \"model_endpoint_type\": get_field(config, \"model\", \"model_endpoint_type\"),\n \"model_wrapper\": get_field(config, \"model\", \"model_wrapper\"),\n \"context_window\": get_field(config, \"model\", \"context_window\"),\n }\n embedding_config_dict = {\n # Extract relevant Embedding configuration from the config file\n \"embedding_endpoint\": get_field(config, \"embedding\", \"embedding_endpoint\"),\n \"embedding_model\": get_field(config, \"embedding\", \"embedding_model\"),\n \"embedding_endpoint_type\": get_field(config, \"embedding\", \"embedding_endpoint_type\"),\n \"embedding_dim\": get_field(config, \"embedding\", \"embedding_dim\"),\n \"embedding_chunk_size\": get_field(config, \"embedding\", \"chunk_size\"),\n }\n # Correct the types that aren't strings\n if llm_config_dict[\"context_window\"] is not None:\n llm_config_dict[\"context_window\"] = int(llm_config_dict[\"context_window\"])\n if embedding_config_dict[\"embedding_dim\"] is not None:\n embedding_config_dict[\"embedding_dim\"] = int(embedding_config_dict[\"embedding_dim\"])\n if embedding_config_dict[\"embedding_chunk_size\"] is not None:\n embedding_config_dict[\"embedding_chunk_size\"] = int(embedding_config_dict[\"embedding_chunk_size\"])\n # Construct the inner properties\n llm_config = LLMConfig(**llm_config_dict)\n embedding_config = EmbeddingConfig(**embedding_config_dict)\n\n # Everything else\n config_dict = {\n # Two prepared configs\n \"default_llm_config\": llm_config,\n \"default_embedding_config\": embedding_config,\n # Agent related\n \"preset\": get_field(config, \"defaults\", \"preset\"),\n \"persona\": get_field(config, \"defaults\", \"persona\"),\n \"human\": get_field(config, \"defaults\", \"human\"),\n \"agent\": get_field(config, \"defaults\", \"agent\"),\n # Storage related\n \"archival_storage_type\": get_field(config, \"archival_storage\", \"type\"),\n \"archival_storage_path\": get_field(config, \"archival_storage\", \"path\"),\n \"archival_storage_uri\": get_field(config, \"archival_storage\", \"uri\"),\n \"recall_storage_type\": get_field(config, \"recall_storage\", \"type\"),\n \"recall_storage_path\": get_field(config, \"recall_storage\", \"path\"),\n \"recall_storage_uri\": get_field(config, \"recall_storage\", \"uri\"),\n \"metadata_storage_type\": get_field(config, \"metadata_storage\", \"type\"),\n \"metadata_storage_path\": get_field(config, \"metadata_storage\", \"path\"),\n \"metadata_storage_uri\": get_field(config, \"metadata_storage\", \"uri\"),\n # Misc\n \"anon_clientid\": get_field(config, \"client\", \"anon_clientid\"),\n \"config_path\": config_path,\n \"memgpt_version\": get_field(config, \"version\", \"memgpt_version\"),\n }\n\n # Don't include null values\n config_dict = {k: v for k, v in config_dict.items() if v is not None}\n\n return cls(**config_dict)\n\n # create new config\n anon_clientid = MemGPTConfig.generate_uuid()\n config = cls(anon_clientid=anon_clientid, config_path=config_path)\n config.create_config_dir() # create dirs\n config.save() # save updated config\n\n return config\n\n def save(self):\n import memgpt\n\n config = configparser.ConfigParser()\n\n # CLI defaults\n set_field(config, \"defaults\", \"preset\", self.preset)\n set_field(config, \"defaults\", \"persona\", self.persona)\n set_field(config, \"defaults\", \"human\", self.human)\n set_field(config, \"defaults\", \"agent\", self.agent)\n\n # model defaults\n set_field(config, \"model\", \"model\", self.default_llm_config.model)\n set_field(config, \"model\", \"model_endpoint\", self.default_llm_config.model_endpoint)\n set_field(config, \"model\", \"model_endpoint_type\", self.default_llm_config.model_endpoint_type)\n set_field(config, \"model\", \"model_wrapper\", self.default_llm_config.model_wrapper)\n set_field(config, \"model\", \"context_window\", str(self.default_llm_config.context_window))\n\n # embeddings\n set_field(config, \"embedding\", \"embedding_endpoint_type\", self.default_embedding_config.embedding_endpoint_type)\n set_field(config, \"embedding\", \"embedding_endpoint\", self.default_embedding_config.embedding_endpoint)\n set_field(config, \"embedding\", \"embedding_model\", self.default_embedding_config.embedding_model)\n set_field(config, \"embedding\", \"embedding_dim\", str(self.default_embedding_config.embedding_dim))\n set_field(config, \"embedding\", \"embedding_chunk_size\", str(self.default_embedding_config.embedding_chunk_size))\n\n # archival storage\n set_field(config, \"archival_storage\", \"type\", self.archival_storage_type)\n set_field(config, \"archival_storage\", \"path\", self.archival_storage_path)\n set_field(config, \"archival_storage\", \"uri\", self.archival_storage_uri)\n\n # recall storage\n set_field(config, \"recall_storage\", \"type\", self.recall_storage_type)\n set_field(config, \"recall_storage\", \"path\", self.recall_storage_path)\n set_field(config, \"recall_storage\", \"uri\", self.recall_storage_uri)\n\n # metadata storage\n set_field(config, \"metadata_storage\", \"type\", self.metadata_storage_type)\n set_field(config, \"metadata_storage\", \"path\", self.metadata_storage_path)\n set_field(config, \"metadata_storage\", \"uri\", self.metadata_storage_uri)\n\n # set version\n set_field(config, \"version\", \"memgpt_version\", memgpt.__version__)\n\n # client\n if not self.anon_clientid:\n self.anon_clientid = self.generate_uuid()\n set_field(config, \"client\", \"anon_clientid\", self.anon_clientid)\n\n # always make sure all directories are present\n self.create_config_dir()\n\n with open(self.config_path, \"w\") as f:\n config.write(f)\n logger.debug(f\"Saved Config: {self.config_path}\")\n\n @staticmethod\n def exists():\n # allow overriding with env variables\n if os.getenv(\"MEMGPT_CONFIG_PATH\"):\n config_path = os.getenv(\"MEMGPT_CONFIG_PATH\")\n else:\n config_path = MemGPTConfig.config_path\n\n assert not os.path.isdir(config_path), f\"Config path {config_path} cannot be set to a directory.\"\n return os.path.exists(config_path)\n\n @staticmethod\n def create_config_dir():\n if not os.path.exists(MEMGPT_DIR):\n os.makedirs(MEMGPT_DIR, exist_ok=True)\n\n folders = [\"personas\", \"humans\", \"archival\", \"agents\", \"functions\", \"system_prompts\", \"presets\", \"settings\"]\n\n for folder in folders:\n if not os.path.exists(os.path.join(MEMGPT_DIR, folder)):\n os.makedirs(os.path.join(MEMGPT_DIR, folder))" }, { "identifier": "run", "path": "memgpt/cli/cli.py", "snippet": "def run(\n persona: str = typer.Option(None, help=\"Specify persona\"),\n agent: str = typer.Option(None, help=\"Specify agent save file\"),\n human: str = typer.Option(None, help=\"Specify human\"),\n preset: str = typer.Option(None, help=\"Specify preset\"),\n # model flags\n model: str = typer.Option(None, help=\"Specify the LLM model\"),\n model_wrapper: str = typer.Option(None, help=\"Specify the LLM model wrapper\"),\n model_endpoint: str = typer.Option(None, help=\"Specify the LLM model endpoint\"),\n model_endpoint_type: str = typer.Option(None, help=\"Specify the LLM model endpoint type\"),\n context_window: int = typer.Option(None, help=\"The context window of the LLM you are using (e.g. 8k for most Mistral 7B variants)\"),\n # other\n first: bool = typer.Option(False, \"--first\", help=\"Use --first to send the first message in the sequence\"),\n strip_ui: bool = typer.Option(False, help=\"Remove all the bells and whistles in CLI output (helpful for testing)\"),\n debug: bool = typer.Option(False, \"--debug\", help=\"Use --debug to enable debugging output\"),\n no_verify: bool = typer.Option(False, help=\"Bypass message verification\"),\n yes: bool = typer.Option(False, \"-y\", help=\"Skip confirmation prompt and use defaults\"),\n):\n \"\"\"Start chatting with an MemGPT agent\n\n Example usage: `memgpt run --agent myagent --data-source mydata --persona mypersona --human myhuman --model gpt-3.5-turbo`\n\n :param persona: Specify persona\n :param agent: Specify agent name (will load existing state if the agent exists, or create a new one with that name)\n :param human: Specify human\n :param model: Specify the LLM model\n\n \"\"\"\n\n # setup logger\n # TODO: remove Utils Debug after global logging is complete.\n utils.DEBUG = debug\n # TODO: add logging command line options for runtime log level\n\n if debug:\n logger.setLevel(logging.DEBUG)\n else:\n logger.setLevel(logging.CRITICAL)\n\n from memgpt.migrate import config_is_compatible, wipe_config_and_reconfigure, VERSION_CUTOFF\n\n if not config_is_compatible(allow_empty=True):\n typer.secho(f\"\\nYour current config file is incompatible with MemGPT versions later than {VERSION_CUTOFF}\\n\", fg=typer.colors.RED)\n choices = [\n \"Run the full config setup (recommended)\",\n \"Create a new config using defaults\",\n \"Cancel\",\n ]\n selection = questionary.select(\n f\"To use MemGPT, you must either downgrade your MemGPT version (<= {VERSION_CUTOFF}), or regenerate your config. Would you like to proceed?\",\n choices=choices,\n default=choices[0],\n ).ask()\n if selection == choices[0]:\n try:\n wipe_config_and_reconfigure()\n except Exception as e:\n typer.secho(f\"Fresh config generation failed - error:\\n{e}\", fg=typer.colors.RED)\n raise\n elif selection == choices[1]:\n try:\n wipe_config_and_reconfigure(run_configure=False)\n except Exception as e:\n typer.secho(f\"Fresh config generation failed - error:\\n{e}\", fg=typer.colors.RED)\n raise\n else:\n typer.secho(\"Migration cancelled (to migrate old agents, run `memgpt migrate`)\", fg=typer.colors.RED)\n raise KeyboardInterrupt()\n\n if not MemGPTConfig.exists():\n # if no config, ask about quickstart\n # do you want to do:\n # - openai (run quickstart)\n # - memgpt hosted (run quickstart)\n # - other (run configure)\n if yes:\n # if user is passing '-y' to bypass all inputs, use memgpt hosted\n # since it can't fail out if you don't have an API key\n quickstart(backend=QuickstartChoice.memgpt_hosted)\n config = MemGPTConfig()\n\n else:\n config_choices = {\n \"memgpt\": \"Use the free MemGPT endpoints\",\n \"openai\": \"Use OpenAI (requires an OpenAI API key)\",\n \"other\": \"Other (OpenAI Azure, custom LLM endpoint, etc)\",\n }\n print()\n config_selection = questionary.select(\n \"How would you like to set up MemGPT?\",\n choices=list(config_choices.values()),\n default=config_choices[\"memgpt\"],\n ).ask()\n\n if config_selection == config_choices[\"memgpt\"]:\n print()\n quickstart(backend=QuickstartChoice.memgpt_hosted, debug=debug, terminal=False, latest=False)\n elif config_selection == config_choices[\"openai\"]:\n print()\n quickstart(backend=QuickstartChoice.openai, debug=debug, terminal=False, latest=False)\n elif config_selection == config_choices[\"other\"]:\n configure()\n else:\n raise ValueError(config_selection)\n\n config = MemGPTConfig.load()\n\n else: # load config\n config = MemGPTConfig.load()\n\n # force re-configuration is config is from old version\n if config.memgpt_version is None: # TODO: eventually add checks for older versions, if config changes again\n typer.secho(\"MemGPT has been updated to a newer version, so re-running configuration.\", fg=typer.colors.YELLOW)\n configure()\n config = MemGPTConfig.load()\n\n # read user id from config\n ms = MetadataStore(config)\n user_id = uuid.UUID(config.anon_clientid)\n user = ms.get_user(user_id=user_id)\n if user is None:\n ms.create_user(User(id=user_id))\n user = ms.get_user(user_id=user_id)\n if user is None:\n typer.secho(f\"Failed to create default user in database.\", fg=typer.colors.RED)\n sys.exit(1)\n\n # override with command line arguments\n if debug:\n config.debug = debug\n if no_verify:\n config.no_verify = no_verify\n # determine agent to use, if not provided\n if not yes and not agent:\n agents = ms.list_agents(user_id=user.id)\n agents = [a.name for a in agents]\n\n if len(agents) > 0 and not any([persona, human, model]):\n print()\n select_agent = questionary.confirm(\"Would you like to select an existing agent?\").ask()\n if select_agent is None:\n raise KeyboardInterrupt\n if select_agent:\n agent = questionary.select(\"Select agent:\", choices=agents).ask()\n\n # create agent config\n if agent and ms.get_agent(agent_name=agent, user_id=user.id): # use existing agent\n typer.secho(f\"\\n🔁 Using existing agent {agent}\", fg=typer.colors.GREEN)\n # agent_config = AgentConfig.load(agent)\n agent_state = ms.get_agent(agent_name=agent, user_id=user_id)\n printd(\"Loading agent state:\", agent_state.id)\n printd(\"Agent state:\", agent_state.state)\n # printd(\"State path:\", agent_config.save_state_dir())\n # printd(\"Persistent manager path:\", agent_config.save_persistence_manager_dir())\n # printd(\"Index path:\", agent_config.save_agent_index_dir())\n # persistence_manager = LocalStateManager(agent_config).load() # TODO: implement load\n # TODO: load prior agent state\n if persona and persona != agent_state.persona:\n typer.secho(f\"{CLI_WARNING_PREFIX}Overriding existing persona {agent_state.persona} with {persona}\", fg=typer.colors.YELLOW)\n agent_state.persona = persona\n # raise ValueError(f\"Cannot override {agent_state.name} existing persona {agent_state.persona} with {persona}\")\n if human and human != agent_state.human:\n typer.secho(f\"{CLI_WARNING_PREFIX}Overriding existing human {agent_state.human} with {human}\", fg=typer.colors.YELLOW)\n agent_state.human = human\n # raise ValueError(f\"Cannot override {agent_config.name} existing human {agent_config.human} with {human}\")\n\n # Allow overriding model specifics (model, model wrapper, model endpoint IP + type, context_window)\n if model and model != agent_state.llm_config.model:\n typer.secho(\n f\"{CLI_WARNING_PREFIX}Overriding existing model {agent_state.llm_config.model} with {model}\", fg=typer.colors.YELLOW\n )\n agent_state.llm_config.model = model\n if context_window is not None and int(context_window) != agent_state.llm_config.context_window:\n typer.secho(\n f\"{CLI_WARNING_PREFIX}Overriding existing context window {agent_state.llm_config.context_window} with {context_window}\",\n fg=typer.colors.YELLOW,\n )\n agent_state.llm_config.context_window = context_window\n if model_wrapper and model_wrapper != agent_state.llm_config.model_wrapper:\n typer.secho(\n f\"{CLI_WARNING_PREFIX}Overriding existing model wrapper {agent_state.llm_config.model_wrapper} with {model_wrapper}\",\n fg=typer.colors.YELLOW,\n )\n agent_state.llm_config.model_wrapper = model_wrapper\n if model_endpoint and model_endpoint != agent_state.llm_config.model_endpoint:\n typer.secho(\n f\"{CLI_WARNING_PREFIX}Overriding existing model endpoint {agent_state.llm_config.model_endpoint} with {model_endpoint}\",\n fg=typer.colors.YELLOW,\n )\n agent_state.llm_config.model_endpoint = model_endpoint\n if model_endpoint_type and model_endpoint_type != agent_state.llm_config.model_endpoint_type:\n typer.secho(\n f\"{CLI_WARNING_PREFIX}Overriding existing model endpoint type {agent_state.llm_config.model_endpoint_type} with {model_endpoint_type}\",\n fg=typer.colors.YELLOW,\n )\n agent_state.llm_config.model_endpoint_type = model_endpoint_type\n\n # Update the agent with any overrides\n ms.update_agent(agent_state)\n\n # create agent\n memgpt_agent = Agent(agent_state, interface=interface)\n\n else: # create new agent\n # create new agent config: override defaults with args if provided\n typer.secho(\"\\n🧬 Creating new agent...\", fg=typer.colors.WHITE)\n\n if agent is None:\n # determine agent name\n # agent_count = len(ms.list_agents(user_id=user.id))\n # agent = f\"agent_{agent_count}\"\n agent = utils.create_random_username()\n\n llm_config = config.default_llm_config\n embedding_config = config.default_embedding_config # TODO allow overriding embedding params via CLI run\n\n # Allow overriding model specifics (model, model wrapper, model endpoint IP + type, context_window)\n if model and model != llm_config.model:\n typer.secho(f\"{CLI_WARNING_PREFIX}Overriding default model {llm_config.model} with {model}\", fg=typer.colors.YELLOW)\n llm_config.model = model\n if context_window is not None and int(context_window) != llm_config.context_window:\n typer.secho(\n f\"{CLI_WARNING_PREFIX}Overriding default context window {llm_config.context_window} with {context_window}\",\n fg=typer.colors.YELLOW,\n )\n llm_config.context_window = context_window\n if model_wrapper and model_wrapper != llm_config.model_wrapper:\n typer.secho(\n f\"{CLI_WARNING_PREFIX}Overriding existing model wrapper {llm_config.model_wrapper} with {model_wrapper}\",\n fg=typer.colors.YELLOW,\n )\n llm_config.model_wrapper = model_wrapper\n if model_endpoint and model_endpoint != llm_config.model_endpoint:\n typer.secho(\n f\"{CLI_WARNING_PREFIX}Overriding existing model endpoint {llm_config.model_endpoint} with {model_endpoint}\",\n fg=typer.colors.YELLOW,\n )\n llm_config.model_endpoint = model_endpoint\n if model_endpoint_type and model_endpoint_type != llm_config.model_endpoint_type:\n typer.secho(\n f\"{CLI_WARNING_PREFIX}Overriding existing model endpoint type {llm_config.model_endpoint_type} with {model_endpoint_type}\",\n fg=typer.colors.YELLOW,\n )\n llm_config.model_endpoint_type = model_endpoint_type\n\n agent_state = AgentState(\n name=agent,\n user_id=user.id,\n persona=persona if persona else user.default_persona,\n human=human if human else user.default_human,\n preset=preset if preset else user.default_preset,\n llm_config=llm_config,\n embedding_config=embedding_config,\n )\n ms.create_agent(agent_state)\n\n typer.secho(f\"-> 🤖 Using persona profile '{agent_state.persona}'\", fg=typer.colors.WHITE)\n typer.secho(f\"-> 🧑 Using human profile '{agent_state.human}'\", fg=typer.colors.WHITE)\n\n # Supress llama-index noise\n # TODO(swooders) add persistence manager code? or comment out?\n # with suppress_stdout():\n # TODO: allow configrable state manager (only local is supported right now)\n # persistence_manager = LocalStateManager(agent_config) # TODO: insert dataset/pre-fill\n\n # create agent\n try:\n memgpt_agent = presets.create_agent_from_preset(\n agent_state=agent_state,\n interface=interface,\n )\n except ValueError as e:\n # TODO(swooders) what's the equivalent cleanup code for the new DB refactor?\n typer.secho(f\"Failed to create agent from provided information:\\n{e}\", fg=typer.colors.RED)\n # # Delete the directory of the failed agent\n # try:\n # # Path to the specific file\n # agent_config_file = agent_config.agent_config_path\n\n # # Check if the file exists\n # if os.path.isfile(agent_config_file):\n # # Delete the file\n # os.remove(agent_config_file)\n\n # # Now, delete the directory along with any remaining files in it\n # agent_save_dir = os.path.join(MEMGPT_DIR, \"agents\", agent_config.name)\n # shutil.rmtree(agent_save_dir)\n # except:\n # typer.secho(f\"Failed to delete agent directory during cleanup:\\n{e}\", fg=typer.colors.RED)\n sys.exit(1)\n typer.secho(f\"🎉 Created new agent '{agent_state.name}'\", fg=typer.colors.GREEN)\n\n # pretty print agent config\n # printd(json.dumps(vars(agent_config), indent=4, sort_keys=True, ensure_ascii=JSON_ENSURE_ASCII))\n # printd(json.dumps(agent_init_state), indent=4, sort_keys=True, ensure_ascii=JSON_ENSURE_ASCII))\n\n # configure llama index\n original_stdout = sys.stdout # unfortunate hack required to suppress confusing print statements from llama index\n sys.stdout = io.StringIO()\n embed_model = embedding_model(config=agent_state.embedding_config, user_id=user.id)\n service_context = ServiceContext.from_defaults(\n llm=None, embed_model=embed_model, chunk_size=agent_state.embedding_config.embedding_chunk_size\n )\n set_global_service_context(service_context)\n sys.stdout = original_stdout\n\n # start event loop\n from memgpt.main import run_agent_loop\n\n print() # extra space\n run_agent_loop(memgpt_agent, config, first, ms, no_verify) # TODO: add back no_verify" }, { "identifier": "attach", "path": "memgpt/cli/cli.py", "snippet": "def attach(\n agent: str = typer.Option(help=\"Specify agent to attach data to\"),\n data_source: str = typer.Option(help=\"Data source to attach to avent\"),\n user_id: uuid.UUID = None,\n):\n # use client ID is no user_id provided\n config = MemGPTConfig.load()\n if user_id is None:\n user_id = uuid.UUID(config.anon_clientid)\n try:\n # loads the data contained in data source into the agent's memory\n from memgpt.agent_store.storage import StorageConnector, TableType\n from tqdm import tqdm\n\n ms = MetadataStore(config)\n agent = ms.get_agent(agent_name=agent, user_id=user_id)\n source = ms.get_source(source_name=data_source, user_id=user_id)\n assert source is not None, f\"Source {data_source} does not exist for user {user_id}\"\n\n # get storage connectors\n with suppress_stdout():\n source_storage = StorageConnector.get_storage_connector(TableType.PASSAGES, config, user_id=user_id)\n dest_storage = StorageConnector.get_storage_connector(TableType.ARCHIVAL_MEMORY, config, user_id=user_id, agent_id=agent.id)\n\n size = source_storage.size({\"data_source\": data_source})\n typer.secho(f\"Ingesting {size} passages into {agent.name}\", fg=typer.colors.GREEN)\n page_size = 100\n generator = source_storage.get_all_paginated(filters={\"data_source\": data_source}, page_size=page_size) # yields List[Passage]\n passages = []\n for i in tqdm(range(0, size, page_size)):\n passages = next(generator)\n print(\"inserting\", passages)\n\n # need to associated passage with agent (for filtering)\n for passage in passages:\n passage.agent_id = agent.id\n\n # insert into agent archival memory\n dest_storage.insert_many(passages)\n\n # save destination storage\n dest_storage.save()\n\n # attach to agent\n source_id = ms.get_source(source_name=data_source, user_id=user_id).id\n ms.attach_source(agent_id=agent.id, source_id=source_id, user_id=user_id)\n\n total_agent_passages = dest_storage.size()\n\n typer.secho(\n f\"Attached data source {data_source} to agent {agent}, consisting of {len(passages)}. Agent now has {total_agent_passages} embeddings in archival memory.\",\n fg=typer.colors.GREEN,\n )\n except KeyboardInterrupt:\n typer.secho(\"Operation interrupted by KeyboardInterrupt.\", fg=typer.colors.YELLOW)" }, { "identifier": "version", "path": "memgpt/cli/cli.py", "snippet": "def version():\n import memgpt\n\n print(memgpt.__version__)\n return memgpt.__version__" }, { "identifier": "server", "path": "memgpt/cli/cli.py", "snippet": "def server(\n type: ServerChoice = typer.Option(\"rest\", help=\"Server to run\"),\n port: int = typer.Option(None, help=\"Port to run the server on\"),\n host: str = typer.Option(None, help=\"Host to run the server on (default to localhost)\"),\n debug: bool = typer.Option(True, help=\"Turn debugging output on\"),\n):\n \"\"\"Launch a MemGPT server process\"\"\"\n\n if debug:\n from memgpt.server.server import logger as server_logger\n\n # Set the logging level\n server_logger.setLevel(logging.DEBUG)\n # Create a StreamHandler\n stream_handler = logging.StreamHandler()\n # Set the formatter (optional)\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n stream_handler.setFormatter(formatter)\n # Add the handler to the logger\n server_logger.addHandler(stream_handler)\n\n if type == ServerChoice.rest_api:\n import uvicorn\n from memgpt.server.rest_api.server import app\n\n try:\n # Start the subprocess in a new session\n uvicorn.run(app, host=host or \"localhost\", port=port or REST_DEFAULT_PORT)\n\n except KeyboardInterrupt:\n # Handle CTRL-C\n print(\"Terminating the server...\")\n sys.exit(0)\n\n elif type == ServerChoice.ws_api:\n if port is None:\n port = WS_DEFAULT_PORT\n\n # Change to the desired directory\n script_path = Path(__file__).resolve()\n script_dir = script_path.parent\n\n server_directory = os.path.join(script_dir.parent, \"server\", \"ws_api\")\n command = f\"python server.py {port}\"\n\n # Run the command\n print(f\"Running WS (websockets) server: {command} (inside {server_directory})\")\n\n try:\n # Start the subprocess in a new session\n process = subprocess.Popen(command, shell=True, start_new_session=True, cwd=server_directory)\n process.wait()\n except KeyboardInterrupt:\n # Handle CTRL-C\n print(\"Terminating the server...\")\n process.terminate()\n try:\n process.wait(timeout=5)\n except subprocess.TimeoutExpired:\n process.kill()\n print(\"Server terminated with kill()\")\n sys.exit(0)" }, { "identifier": "open_folder", "path": "memgpt/cli/cli.py", "snippet": "def open_folder():\n \"\"\"Open a folder viewer of the MemGPT home directory\"\"\"\n try:\n print(f\"Opening home folder: {MEMGPT_DIR}\")\n open_folder_in_explorer(MEMGPT_DIR)\n except Exception as e:\n print(f\"Failed to open folder with system viewer, error:\\n{e}\")" }, { "identifier": "quickstart", "path": "memgpt/cli/cli.py", "snippet": "def quickstart(\n backend: QuickstartChoice = typer.Option(\"memgpt\", help=\"Quickstart setup backend\"),\n latest: bool = typer.Option(False, \"--latest\", help=\"Use --latest to pull the latest config from online\"),\n debug: bool = typer.Option(False, \"--debug\", help=\"Use --debug to enable debugging output\"),\n terminal: bool = True,\n):\n \"\"\"Set the base config file with a single command\"\"\"\n\n # setup logger\n utils.DEBUG = debug\n logging.getLogger().setLevel(logging.CRITICAL)\n if debug:\n logging.getLogger().setLevel(logging.DEBUG)\n\n # make sure everything is set up properly\n MemGPTConfig.create_config_dir()\n credentials = MemGPTCredentials.load()\n\n config_was_modified = False\n if backend == QuickstartChoice.memgpt_hosted:\n # if latest, try to pull the config from the repo\n # fallback to using local\n if latest:\n # Download the latest memgpt hosted config\n url = \"https://raw.githubusercontent.com/cpacker/MemGPT/main/memgpt/configs/memgpt_hosted.json\"\n response = requests.get(url)\n\n # Check if the request was successful\n if response.status_code == 200:\n # Parse the response content as JSON\n config = response.json()\n # Output a success message and the first few items in the dictionary as a sample\n printd(\"JSON config file downloaded successfully.\")\n config_was_modified = set_config_with_dict(config)\n else:\n typer.secho(f\"Failed to download config from {url}. Status code: {response.status_code}\", fg=typer.colors.RED)\n\n # Load the file from the relative path\n script_dir = os.path.dirname(__file__) # Get the directory where the script is located\n backup_config_path = os.path.join(script_dir, \"..\", \"configs\", \"memgpt_hosted.json\")\n try:\n with open(backup_config_path, \"r\") as file:\n backup_config = json.load(file)\n printd(\"Loaded backup config file successfully.\")\n config_was_modified = set_config_with_dict(backup_config)\n except FileNotFoundError:\n typer.secho(f\"Backup config file not found at {backup_config_path}\", fg=typer.colors.RED)\n return\n else:\n # Load the file from the relative path\n script_dir = os.path.dirname(__file__) # Get the directory where the script is located\n backup_config_path = os.path.join(script_dir, \"..\", \"configs\", \"memgpt_hosted.json\")\n try:\n with open(backup_config_path, \"r\") as file:\n backup_config = json.load(file)\n printd(\"Loaded config file successfully.\")\n config_was_modified = set_config_with_dict(backup_config)\n except FileNotFoundError:\n typer.secho(f\"Config file not found at {backup_config_path}\", fg=typer.colors.RED)\n return\n\n elif backend == QuickstartChoice.openai:\n # Make sure we have an API key\n api_key = os.getenv(\"OPENAI_API_KEY\")\n while api_key is None or len(api_key) == 0:\n # Ask for API key as input\n api_key = questionary.password(\"Enter your OpenAI API key (starts with 'sk-', see https://platform.openai.com/api-keys):\").ask()\n credentials.openai_key = api_key\n credentials.save()\n\n # if latest, try to pull the config from the repo\n # fallback to using local\n if latest:\n url = \"https://raw.githubusercontent.com/cpacker/MemGPT/main/memgpt/configs/openai.json\"\n response = requests.get(url)\n\n # Check if the request was successful\n if response.status_code == 200:\n # Parse the response content as JSON\n config = response.json()\n # Output a success message and the first few items in the dictionary as a sample\n print(\"JSON config file downloaded successfully.\")\n config_was_modified = set_config_with_dict(config)\n else:\n typer.secho(f\"Failed to download config from {url}. Status code: {response.status_code}\", fg=typer.colors.RED)\n\n # Load the file from the relative path\n script_dir = os.path.dirname(__file__) # Get the directory where the script is located\n backup_config_path = os.path.join(script_dir, \"..\", \"configs\", \"openai.json\")\n try:\n with open(backup_config_path, \"r\") as file:\n backup_config = json.load(file)\n printd(\"Loaded backup config file successfully.\")\n config_was_modified = set_config_with_dict(backup_config)\n except FileNotFoundError:\n typer.secho(f\"Backup config file not found at {backup_config_path}\", fg=typer.colors.RED)\n return\n else:\n # Load the file from the relative path\n script_dir = os.path.dirname(__file__) # Get the directory where the script is located\n backup_config_path = os.path.join(script_dir, \"..\", \"configs\", \"openai.json\")\n try:\n with open(backup_config_path, \"r\") as file:\n backup_config = json.load(file)\n printd(\"Loaded config file successfully.\")\n config_was_modified = set_config_with_dict(backup_config)\n except FileNotFoundError:\n typer.secho(f\"Config file not found at {backup_config_path}\", fg=typer.colors.RED)\n return\n\n else:\n raise NotImplementedError(backend)\n\n # 'terminal' = quickstart was run alone, in which case we should guide the user on the next command\n if terminal:\n if config_was_modified:\n typer.secho('⚡ Run \"memgpt run\" to create an agent with the new config.', fg=typer.colors.YELLOW)\n else:\n typer.secho('⚡ Run \"memgpt run\" to create an agent.', fg=typer.colors.YELLOW)" }, { "identifier": "migrate", "path": "memgpt/cli/cli.py", "snippet": "def migrate():\n \"\"\"Migrate old agents (pre 0.2.12) to the new database system\"\"\"\n migrate_all_agents()\n migrate_all_sources()" }, { "identifier": "configure", "path": "memgpt/cli/cli_config.py", "snippet": "@app.command()\ndef configure():\n \"\"\"Updates default MemGPT configurations\"\"\"\n\n # check credentials\n credentials = MemGPTCredentials.load()\n openai_key = get_openai_credentials()\n azure_creds = get_azure_credentials()\n\n MemGPTConfig.create_config_dir()\n\n # Will pre-populate with defaults, or what the user previously set\n config = MemGPTConfig.load()\n try:\n model_endpoint_type, model_endpoint = configure_llm_endpoint(\n config=config,\n credentials=credentials,\n )\n model, model_wrapper, context_window = configure_model(\n config=config,\n credentials=credentials,\n model_endpoint_type=model_endpoint_type,\n model_endpoint=model_endpoint,\n )\n embedding_endpoint_type, embedding_endpoint, embedding_dim, embedding_model = configure_embedding_endpoint(\n config=config,\n credentials=credentials,\n )\n default_preset, default_persona, default_human, default_agent = configure_cli(\n config=config,\n credentials=credentials,\n )\n archival_storage_type, archival_storage_uri, archival_storage_path = configure_archival_storage(\n config=config,\n credentials=credentials,\n )\n recall_storage_type, recall_storage_uri, recall_storage_path = configure_recall_storage(\n config=config,\n credentials=credentials,\n )\n except ValueError as e:\n typer.secho(str(e), fg=typer.colors.RED)\n return\n\n # openai key might have gotten added along the way\n openai_key = credentials.openai_key if credentials.openai_key is not None else openai_key\n\n # TODO: remove most of this (deplicated with User table)\n config = MemGPTConfig(\n default_llm_config=LLMConfig(\n model=model,\n model_endpoint=model_endpoint,\n model_endpoint_type=model_endpoint_type,\n model_wrapper=model_wrapper,\n context_window=context_window,\n ),\n default_embedding_config=EmbeddingConfig(\n embedding_endpoint_type=embedding_endpoint_type,\n embedding_endpoint=embedding_endpoint,\n embedding_dim=embedding_dim,\n embedding_model=embedding_model,\n ),\n # cli configs\n preset=default_preset,\n persona=default_persona,\n human=default_human,\n agent=default_agent,\n # storage\n archival_storage_type=archival_storage_type,\n archival_storage_uri=archival_storage_uri,\n archival_storage_path=archival_storage_path,\n # recall storage\n recall_storage_type=recall_storage_type,\n recall_storage_uri=recall_storage_uri,\n recall_storage_path=recall_storage_path,\n # metadata storage (currently forced to match recall storage)\n metadata_storage_type=recall_storage_type,\n metadata_storage_uri=recall_storage_uri,\n metadata_storage_path=recall_storage_path,\n )\n\n typer.secho(f\"📖 Saving config to {config.config_path}\", fg=typer.colors.GREEN)\n config.save()\n\n # create user records\n ms = MetadataStore(config)\n user_id = uuid.UUID(config.anon_clientid)\n user = User(\n id=uuid.UUID(config.anon_clientid),\n default_preset=default_preset,\n default_persona=default_persona,\n default_human=default_human,\n default_agent=default_agent,\n )\n if ms.get_user(user_id):\n # update user\n ms.update_user(user)\n else:\n ms.create_user(user)" }, { "identifier": "list", "path": "memgpt/cli/cli_config.py", "snippet": "@app.command()\ndef list(arg: Annotated[ListChoice, typer.Argument]):\n config = MemGPTConfig.load()\n ms = MetadataStore(config)\n user_id = uuid.UUID(config.anon_clientid)\n if arg == ListChoice.agents:\n \"\"\"List all agents\"\"\"\n table = PrettyTable()\n table.field_names = [\"Name\", \"Model\", \"Persona\", \"Human\", \"Data Source\", \"Create Time\"]\n for agent in tqdm(ms.list_agents(user_id=user_id)):\n source_ids = ms.list_attached_sources(agent_id=agent.id)\n source_names = [ms.get_source(source_id=source_id).name for source_id in source_ids]\n table.add_row(\n [\n agent.name,\n agent.llm_config.model,\n agent.persona,\n agent.human,\n \",\".join(source_names),\n utils.format_datetime(agent.created_at),\n ]\n )\n print(table)\n elif arg == ListChoice.humans:\n \"\"\"List all humans\"\"\"\n table = PrettyTable()\n table.field_names = [\"Name\", \"Text\"]\n for human_file in utils.list_human_files():\n text = open(human_file, \"r\").read()\n name = os.path.basename(human_file).replace(\"txt\", \"\")\n table.add_row([name, text])\n print(table)\n elif arg == ListChoice.personas:\n \"\"\"List all personas\"\"\"\n table = PrettyTable()\n table.field_names = [\"Name\", \"Text\"]\n for persona_file in utils.list_persona_files():\n print(persona_file)\n text = open(persona_file, \"r\").read()\n name = os.path.basename(persona_file).replace(\".txt\", \"\")\n table.add_row([name, text])\n print(table)\n elif arg == ListChoice.sources:\n \"\"\"List all data sources\"\"\"\n\n # create table\n table = PrettyTable()\n table.field_names = [\"Name\", \"Created At\", \"Agents\"]\n # TODO: eventually look accross all storage connections\n # TODO: add data source stats\n # TODO: connect to agents\n\n # get all sources\n for source in ms.list_sources(user_id=user_id):\n # get attached agents\n agent_ids = ms.list_attached_agents(source_id=source.id)\n agent_names = [ms.get_agent(agent_id=agent_id).name for agent_id in agent_ids]\n\n table.add_row([source.name, utils.format_datetime(source.created_at), \",\".join(agent_names)])\n\n print(table)\n else:\n raise ValueError(f\"Unknown argument {arg}\")" }, { "identifier": "add", "path": "memgpt/cli/cli_config.py", "snippet": "@app.command()\ndef add(\n option: str, # [human, persona]\n name: str = typer.Option(help=\"Name of human/persona\"),\n text: str = typer.Option(None, help=\"Text of human/persona\"),\n filename: str = typer.Option(None, \"-f\", help=\"Specify filename\"),\n):\n \"\"\"Add a person/human\"\"\"\n\n if option == \"persona\":\n directory = os.path.join(MEMGPT_DIR, \"personas\")\n elif option == \"human\":\n directory = os.path.join(MEMGPT_DIR, \"humans\")\n else:\n raise ValueError(f\"Unknown kind {option}\")\n\n if filename:\n assert text is None, f\"Cannot provide both filename and text\"\n # copy file to directory\n shutil.copyfile(filename, os.path.join(directory, name))\n if text:\n assert filename is None, f\"Cannot provide both filename and text\"\n # write text to file\n with open(os.path.join(directory, name), \"w\") as f:\n f.write(text)" }, { "identifier": "delete", "path": "memgpt/cli/cli_config.py", "snippet": "@app.command()\ndef delete(option: str, name: str):\n \"\"\"Delete a source from the archival memory.\"\"\"\n\n config = MemGPTConfig.load()\n user_id = uuid.UUID(config.anon_clientid)\n ms = MetadataStore(config)\n assert ms.get_user(user_id=user_id), f\"User {user_id} does not exist\"\n\n try:\n # delete from metadata\n if option == \"source\":\n # delete metadata\n source = ms.get_source(source_name=name, user_id=user_id)\n ms.delete_source(source_id=source.id)\n\n # delete from passages\n conn = StorageConnector.get_storage_connector(TableType.PASSAGES, config, user_id=user_id)\n conn.delete({\"data_source\": name})\n\n assert (\n conn.get_all({\"data_source\": name}) == []\n ), f\"Expected no passages with source {name}, but got {conn.get_all({'data_source': name})}\"\n\n # TODO: should we also delete from agents?\n elif option == \"agent\":\n agent = ms.get_agent(agent_name=name, user_id=user_id)\n\n # recall memory\n recall_conn = StorageConnector.get_storage_connector(TableType.RECALL_MEMORY, config, user_id=user_id, agent_id=agent.id)\n recall_conn.delete({\"agent_id\": agent.id})\n\n # archival memory\n archival_conn = StorageConnector.get_storage_connector(TableType.ARCHIVAL_MEMORY, config, user_id=user_id, agent_id=agent.id)\n archival_conn.delete({\"agent_id\": agent.id})\n\n # metadata\n ms.delete_agent(agent_id=agent.id)\n\n else:\n raise ValueError(f\"Option {option} not implemented\")\n\n typer.secho(f\"Deleted source '{name}'\", fg=typer.colors.GREEN)\n\n except Exception as e:\n typer.secho(f\"Failed to deleted source '{name}'\\n{e}\", fg=typer.colors.RED)" }, { "identifier": "app", "path": "memgpt/cli/cli_load.py", "snippet": "def insert_passages_into_source(passages: List[Passage], source_name: str, user_id: uuid.UUID, config: MemGPTConfig):\ndef insert_passages_into_source(passages: List[Passage], source_name: str, user_id: uuid.UUID, config: MemGPTConfig):\ndef store_docs(name, docs, user_id=None, show_progress=True):\ndef load_index(\n name: str = typer.Option(help=\"Name of dataset to load.\"),\n dir: str = typer.Option(help=\"Path to directory containing index.\"),\n user_id: uuid.UUID = None,\n):\ndef load_directory(\n name: str = typer.Option(help=\"Name of dataset to load.\"),\n input_dir: str = typer.Option(None, help=\"Path to directory containing dataset.\"),\n input_files: List[str] = typer.Option(None, help=\"List of paths to files containing dataset.\"),\n recursive: bool = typer.Option(False, help=\"Recursively search for files in directory.\"),\n extensions: str = typer.Option(default_extensions, help=\"Comma separated list of file extensions to load\"),\n user_id: str = typer.Option(None, help=\"User ID to associate with dataset.\"),\n):\ndef load_webpage(\n name: str = typer.Option(help=\"Name of dataset to load.\"),\n urls: List[str] = typer.Option(None, help=\"List of urls to load.\"),\n):\ndef load_database(\n name: str = typer.Option(help=\"Name of dataset to load.\"),\n query: str = typer.Option(help=\"Database query.\"),\n dump_path: str = typer.Option(None, help=\"Path to dump file.\"),\n scheme: str = typer.Option(None, help=\"Database scheme.\"),\n host: str = typer.Option(None, help=\"Database host.\"),\n port: int = typer.Option(None, help=\"Database port.\"),\n user: str = typer.Option(None, help=\"Database user.\"),\n password: str = typer.Option(None, help=\"Database password.\"),\n dbname: str = typer.Option(None, help=\"Database name.\"),\n):\ndef load_vector_database(\n name: str = typer.Option(help=\"Name of dataset to load.\"),\n uri: str = typer.Option(help=\"Database URI.\"),\n table_name: str = typer.Option(help=\"Name of table containing data.\"),\n text_column: str = typer.Option(help=\"Name of column containing text.\"),\n embedding_column: str = typer.Option(help=\"Name of column containing embedding.\"),\n user_id: uuid.UUID = None,\n):" }, { "identifier": "StorageConnector", "path": "memgpt/agent_store/storage.py", "snippet": "class StorageConnector:\n \"\"\"Defines a DB connection that is user-specific to access data: Documents, Passages, Archival/Recall Memory\"\"\"\n\n def __init__(self, table_type: TableType, config: MemGPTConfig, user_id, agent_id=None):\n self.user_id = user_id\n self.agent_id = agent_id\n self.table_type = table_type\n\n # get object type\n if table_type == TableType.ARCHIVAL_MEMORY:\n self.type = Passage\n self.table_name = ARCHIVAL_TABLE_NAME\n elif table_type == TableType.RECALL_MEMORY:\n self.type = Message\n self.table_name = RECALL_TABLE_NAME\n elif table_type == TableType.DOCUMENTS:\n self.type = Document\n self.table_name == DOCUMENT_TABLE_NAME\n elif table_type == TableType.PASSAGES:\n self.type = Passage\n self.table_name = PASSAGE_TABLE_NAME\n else:\n raise ValueError(f\"Table type {table_type} not implemented\")\n printd(f\"Using table name {self.table_name}\")\n\n # setup base filters for agent-specific tables\n if self.table_type == TableType.ARCHIVAL_MEMORY or self.table_type == TableType.RECALL_MEMORY:\n # agent-specific table\n assert agent_id is not None, \"Agent ID must be provided for agent-specific tables\"\n self.filters = {\"user_id\": self.user_id, \"agent_id\": self.agent_id}\n elif self.table_type == TableType.PASSAGES or self.table_type == TableType.DOCUMENTS:\n # setup base filters for user-specific tables\n assert agent_id is None, \"Agent ID must not be provided for user-specific tables\"\n self.filters = {\"user_id\": self.user_id}\n else:\n raise ValueError(f\"Table type {table_type} not implemented\")\n\n def get_filters(self, filters: Optional[Dict] = {}):\n # get all filters for query\n if filters is not None:\n filter_conditions = {**self.filters, **filters}\n else:\n filter_conditions = self.filters\n return filter_conditions\n\n @staticmethod\n def get_storage_connector(table_type: TableType, config: MemGPTConfig, user_id, agent_id=None):\n if table_type == TableType.ARCHIVAL_MEMORY or table_type == TableType.PASSAGES:\n storage_type = config.archival_storage_type\n elif table_type == TableType.RECALL_MEMORY:\n storage_type = config.recall_storage_type\n else:\n raise ValueError(f\"Table type {table_type} not implemented\")\n\n if storage_type == \"postgres\":\n from memgpt.agent_store.db import PostgresStorageConnector\n\n return PostgresStorageConnector(table_type, config, user_id, agent_id)\n elif storage_type == \"chroma\":\n from memgpt.agent_store.chroma import ChromaStorageConnector\n\n return ChromaStorageConnector(table_type, config, user_id, agent_id)\n\n # TODO: add back\n # elif storage_type == \"lancedb\":\n # from memgpt.agent_store.db import LanceDBConnector\n\n # return LanceDBConnector(agent_config=agent_config, table_type=table_type)\n\n elif storage_type == \"sqlite\":\n from memgpt.agent_store.db import SQLLiteStorageConnector\n\n return SQLLiteStorageConnector(table_type, config, user_id, agent_id)\n\n else:\n raise NotImplementedError(f\"Storage type {storage_type} not implemented\")\n\n @staticmethod\n def get_archival_storage_connector(user_id, agent_id):\n config = MemGPTConfig.load()\n return StorageConnector.get_storage_connector(TableType.ARCHIVAL_MEMORY, config, user_id, agent_id)\n\n @staticmethod\n def get_recall_storage_connector(user_id, agent_id):\n config = MemGPTConfig.load()\n return StorageConnector.get_storage_connector(TableType.RECALL_MEMORY, config, user_id, agent_id)\n\n @abstractmethod\n def get_filters(self, filters: Optional[Dict] = {}):\n pass\n\n @abstractmethod\n def get_all_paginated(self, filters: Optional[Dict] = {}, page_size: Optional[int] = 1000) -> Iterator[List[Record]]:\n pass\n\n @abstractmethod\n def get_all(self, filters: Optional[Dict] = {}, limit=10) -> List[Record]:\n pass\n\n @abstractmethod\n def get(self, id: str) -> Optional[Record]:\n pass\n\n @abstractmethod\n def size(self, filters: Optional[Dict] = {}) -> int:\n pass\n\n @abstractmethod\n def insert(self, record: Record):\n pass\n\n @abstractmethod\n def insert_many(self, records: List[Record], show_progress=False):\n pass\n\n @abstractmethod\n def query(self, query: str, query_vec: List[float], top_k: int = 10, filters: Optional[Dict] = {}) -> List[Record]:\n pass\n\n @abstractmethod\n def query_date(self, start_date, end_date):\n pass\n\n @abstractmethod\n def query_text(self, query):\n pass\n\n @abstractmethod\n def delete_table(self):\n pass\n\n @abstractmethod\n def delete(self, filters: Optional[Dict] = {}):\n pass\n\n @abstractmethod\n def save(self):\n pass" }, { "identifier": "TableType", "path": "memgpt/agent_store/storage.py", "snippet": "class TableType:\n ARCHIVAL_MEMORY = \"archival_memory\" # recall memory table: memgpt_agent_{agent_id}\n RECALL_MEMORY = \"recall_memory\" # archival memory table: memgpt_agent_recall_{agent_id}\n PASSAGES = \"passages\" # TODO\n DOCUMENTS = \"documents\" # TODO" }, { "identifier": "MetadataStore", "path": "memgpt/metadata.py", "snippet": "class MetadataStore:\n def __init__(self, config: MemGPTConfig):\n # TODO: get DB URI or path\n if config.metadata_storage_type == \"postgres\":\n self.uri = config.metadata_storage_uri\n elif config.metadata_storage_type == \"sqlite\":\n path = os.path.join(config.metadata_storage_path, \"sqlite.db\")\n self.uri = f\"sqlite:///{path}\"\n else:\n raise ValueError(f\"Invalid metadata storage type: {config.metadata_storage_type}\")\n\n # TODO: check to see if table(s) need to be greated or not\n\n self.engine = create_engine(self.uri)\n Base.metadata.create_all(\n self.engine, tables=[UserModel.__table__, AgentModel.__table__, SourceModel.__table__, AgentSourceMappingModel.__table__]\n )\n session_maker = sessionmaker(bind=self.engine)\n self.session = session_maker()\n\n @enforce_types\n def create_agent(self, agent: AgentState):\n # insert into agent table\n # make sure agent.name does not already exist for user user_id\n if self.session.query(AgentModel).filter(AgentModel.name == agent.name).filter(AgentModel.user_id == agent.user_id).count() > 0:\n raise ValueError(f\"Agent with name {agent.name} already exists\")\n self.session.add(AgentModel(**vars(agent)))\n self.session.commit()\n\n @enforce_types\n def create_source(self, source: Source):\n # make sure source.name does not already exist for user\n if (\n self.session.query(SourceModel).filter(SourceModel.name == source.name).filter(SourceModel.user_id == source.user_id).count()\n > 0\n ):\n raise ValueError(f\"Source with name {source.name} already exists\")\n self.session.add(SourceModel(**vars(source)))\n self.session.commit()\n\n @enforce_types\n def create_user(self, user: User):\n if self.session.query(UserModel).filter(UserModel.id == user.id).count() > 0:\n raise ValueError(f\"User with id {user.id} already exists\")\n self.session.add(UserModel(**vars(user)))\n self.session.commit()\n\n @enforce_types\n def update_agent(self, agent: AgentState):\n self.session.query(AgentModel).filter(AgentModel.id == agent.id).update(vars(agent))\n self.session.commit()\n\n @enforce_types\n def update_user(self, user: User):\n self.session.query(UserModel).filter(UserModel.id == user.id).update(vars(user))\n self.session.commit()\n\n @enforce_types\n def update_source(self, source: Source):\n self.session.query(SourceModel).filter(SourceModel.id == source.id).update(vars(source))\n self.session.commit()\n\n @enforce_types\n def delete_agent(self, agent_id: uuid.UUID):\n self.session.query(AgentModel).filter(AgentModel.id == agent_id).delete()\n self.session.commit()\n\n @enforce_types\n def delete_source(self, source_id: uuid.UUID):\n # delete from sources table\n self.session.query(SourceModel).filter(SourceModel.id == source_id).delete()\n\n # delete any mappings\n self.session.query(AgentSourceMappingModel).filter(AgentSourceMappingModel.source_id == source_id).delete()\n\n self.session.commit()\n\n @enforce_types\n def delete_user(self, user_id: uuid.UUID):\n # delete from users table\n self.session.query(UserModel).filter(UserModel.id == user_id).delete()\n\n # delete associated agents\n self.session.query(AgentModel).filter(AgentModel.user_id == user_id).delete()\n\n # delete associated sources\n self.session.query(SourceModel).filter(SourceModel.user_id == user_id).delete()\n\n # delete associated mappings\n self.session.query(AgentSourceMappingModel).filter(AgentSourceMappingModel.user_id == user_id).delete()\n\n self.session.commit()\n\n @enforce_types\n def list_agents(self, user_id: uuid.UUID) -> List[AgentState]:\n results = self.session.query(AgentModel).filter(AgentModel.user_id == user_id).all()\n return [r.to_record() for r in results]\n\n @enforce_types\n def list_sources(self, user_id: uuid.UUID) -> List[Source]:\n results = self.session.query(SourceModel).filter(SourceModel.user_id == user_id).all()\n return [r.to_record() for r in results]\n\n @enforce_types\n def get_agent(\n self, agent_id: Optional[uuid.UUID] = None, agent_name: Optional[str] = None, user_id: Optional[uuid.UUID] = None\n ) -> Optional[AgentState]:\n if agent_id:\n results = self.session.query(AgentModel).filter(AgentModel.id == agent_id).all()\n else:\n assert agent_name is not None and user_id is not None, \"Must provide either agent_id or agent_name\"\n results = self.session.query(AgentModel).filter(AgentModel.name == agent_name).filter(AgentModel.user_id == user_id).all()\n\n if len(results) == 0:\n return None\n assert len(results) == 1, f\"Expected 1 result, got {len(results)}\" # should only be one result\n return results[0].to_record()\n\n @enforce_types\n def get_user(self, user_id: uuid.UUID) -> Optional[User]:\n results = self.session.query(UserModel).filter(UserModel.id == user_id).all()\n if len(results) == 0:\n return None\n assert len(results) == 1, f\"Expected 1 result, got {len(results)}\"\n return results[0].to_record()\n\n @enforce_types\n def get_source(\n self, source_id: Optional[uuid.UUID] = None, user_id: Optional[uuid.UUID] = None, source_name: Optional[str] = None\n ) -> Optional[Source]:\n if source_id:\n results = self.session.query(SourceModel).filter(SourceModel.id == source_id).all()\n else:\n assert user_id is not None and source_name is not None\n results = self.session.query(SourceModel).filter(SourceModel.name == source_name).filter(SourceModel.user_id == user_id).all()\n if len(results) == 0:\n return None\n assert len(results) == 1, f\"Expected 1 result, got {len(results)}\"\n return results[0].to_record()\n\n # agent source metadata\n @enforce_types\n def attach_source(self, user_id: uuid.UUID, agent_id: uuid.UUID, source_id: uuid.UUID):\n self.session.add(AgentSourceMappingModel(user_id=user_id, agent_id=agent_id, source_id=source_id))\n self.session.commit()\n\n @enforce_types\n def list_attached_sources(self, agent_id: uuid.UUID) -> List[Column]:\n results = self.session.query(AgentSourceMappingModel).filter(AgentSourceMappingModel.agent_id == agent_id).all()\n return [r.source_id for r in results]\n\n @enforce_types\n def list_attached_agents(self, source_id: uuid.UUID):\n results = self.session.query(AgentSourceMappingModel).filter(AgentSourceMappingModel.source_id == source_id).all()\n return [r.agent_id for r in results]\n\n @enforce_types\n def detach_source(self, agent_id: uuid.UUID, source_id: uuid.UUID):\n self.session.query(AgentSourceMappingModel).filter(\n AgentSourceMappingModel.agent_id == agent_id, AgentSourceMappingModel.source_id == source_id\n ).delete()\n self.session.commit()" }, { "identifier": "save_agent", "path": "memgpt/metadata.py", "snippet": "def save_agent(agent: Agent, ms: MetadataStore):\n \"\"\"Save agent to metadata store\"\"\"\n\n agent.update_state()\n agent_state = agent.agent_state\n\n if ms.get_agent(agent_id=agent_state.id):\n ms.update_agent(agent_state)\n else:\n ms.create_agent(agent_state)" } ]
import shutil import configparser import uuid import logging import glob import os import sys import pickle import traceback import json import questionary import typer import memgpt.agent as agent import memgpt.system as system import memgpt.constants as constants import memgpt.errors as errors from rich.console import Console from prettytable import PrettyTable from memgpt.log import logger from memgpt.interface import CLIInterface as interface # for printing to terminal from memgpt.config import MemGPTConfig from memgpt.cli.cli import run, attach, version, server, open_folder, quickstart, migrate from memgpt.cli.cli_config import configure, list, add, delete from memgpt.cli.cli_load import app as load_app from memgpt.agent_store.storage import StorageConnector, TableType from memgpt.metadata import MetadataStore, save_agent
17,401
console = Console() app = typer.Typer(pretty_exceptions_enable=False) app.command(name="run")(run) app.command(name="version")(version) app.command(name="attach")(attach)
console = Console() app = typer.Typer(pretty_exceptions_enable=False) app.command(name="run")(run) app.command(name="version")(version) app.command(name="attach")(attach)
app.command(name="configure")(configure)
10
2023-10-11 07:38:37+00:00
24k
PixArt-alpha/PixArt-alpha
train_scripts/train_pixart_lcm.py
[ { "identifier": "IDDPM", "path": "diffusion/iddpm.py", "snippet": "def IDDPM(\n timestep_respacing,\n noise_schedule=\"linear\",\n use_kl=False,\n sigma_small=False,\n predict_xstart=False,\n learn_sigma=True,\n pred_sigma=True,\n rescale_learned_sigmas=False,\n diffusion_steps=1000,\n snr=False,\n return_startx=False,\n):\n betas = gd.get_named_beta_schedule(noise_schedule, diffusion_steps)\n if use_kl:\n loss_type = gd.LossType.RESCALED_KL\n elif rescale_learned_sigmas:\n loss_type = gd.LossType.RESCALED_MSE\n else:\n loss_type = gd.LossType.MSE\n if timestep_respacing is None or timestep_respacing == \"\":\n timestep_respacing = [diffusion_steps]\n return SpacedDiffusion(\n use_timesteps=space_timesteps(diffusion_steps, timestep_respacing),\n betas=betas,\n model_mean_type=(\n gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X\n ),\n model_var_type=(\n ((\n gd.ModelVarType.FIXED_LARGE\n if not sigma_small\n else gd.ModelVarType.FIXED_SMALL\n )\n if not learn_sigma\n else gd.ModelVarType.LEARNED_RANGE\n )\n if pred_sigma\n else None\n ),\n loss_type=loss_type,\n snr=snr,\n return_startx=return_startx,\n # rescale_timesteps=rescale_timesteps,\n )" }, { "identifier": "save_checkpoint", "path": "diffusion/utils/checkpoint.py", "snippet": "def save_checkpoint(work_dir,\n epoch,\n model,\n model_ema=None,\n optimizer=None,\n lr_scheduler=None,\n keep_last=False,\n step=None,\n ):\n os.makedirs(work_dir, exist_ok=True)\n state_dict = dict(state_dict=model.state_dict())\n if model_ema is not None:\n state_dict['state_dict_ema'] = model_ema.state_dict()\n if optimizer is not None:\n state_dict['optimizer'] = optimizer.state_dict()\n if lr_scheduler is not None:\n state_dict['scheduler'] = lr_scheduler.state_dict()\n if epoch is not None:\n state_dict['epoch'] = epoch\n file_path = os.path.join(work_dir, f\"epoch_{epoch}.pth\")\n if step is not None:\n file_path = file_path.split('.pth')[0] + f\"_step_{step}.pth\"\n logger = get_root_logger()\n torch.save(state_dict, file_path)\n logger.info(f'Saved checkpoint of epoch {epoch} to {file_path.format(epoch)}.')\n if keep_last:\n for i in range(epoch):\n previous_ckgt = file_path.format(i)\n if os.path.exists(previous_ckgt):\n os.remove(previous_ckgt)" }, { "identifier": "load_checkpoint", "path": "diffusion/utils/checkpoint.py", "snippet": "def load_checkpoint(checkpoint,\n model,\n model_ema=None,\n optimizer=None,\n lr_scheduler=None,\n load_ema=False,\n resume_optimizer=True,\n resume_lr_scheduler=True\n ):\n assert isinstance(checkpoint, str)\n ckpt_file = checkpoint\n checkpoint = torch.load(ckpt_file, map_location=\"cpu\")\n\n state_dict_keys = ['pos_embed', 'base_model.pos_embed', 'model.pos_embed']\n for key in state_dict_keys:\n if key in checkpoint['state_dict']:\n del checkpoint['state_dict'][key]\n if 'state_dict_ema' in checkpoint and key in checkpoint['state_dict_ema']:\n del checkpoint['state_dict_ema'][key]\n break\n\n if load_ema:\n state_dict = checkpoint['state_dict_ema']\n else:\n state_dict = checkpoint.get('state_dict', checkpoint) # to be compatible with the official checkpoint\n # model.load_state_dict(state_dict)\n missing, unexpect = model.load_state_dict(state_dict, strict=False)\n if model_ema is not None:\n model_ema.load_state_dict(checkpoint['state_dict_ema'], strict=False)\n if optimizer is not None and resume_optimizer:\n optimizer.load_state_dict(checkpoint['optimizer'])\n if lr_scheduler is not None and resume_lr_scheduler:\n lr_scheduler.load_state_dict(checkpoint['scheduler'])\n logger = get_root_logger()\n if optimizer is not None:\n epoch = checkpoint.get('epoch', re.match(r'.*epoch_(\\d*).*.pth', ckpt_file).group()[0])\n logger.info(f'Resume checkpoint of epoch {epoch} from {ckpt_file}. Load ema: {load_ema}, '\n f'resume optimizer: {resume_optimizer}, resume lr scheduler: {resume_lr_scheduler}.')\n return epoch, missing, unexpect\n logger.info(f'Load checkpoint from {ckpt_file}. Load ema: {load_ema}.')\n return missing, unexpect" }, { "identifier": "synchronize", "path": "diffusion/utils/dist_utils.py", "snippet": "def synchronize():\n \"\"\"\n Helper function to synchronize (barrier) among all processes when\n using distributed training\n \"\"\"\n if not dist.is_available():\n return\n if not dist.is_initialized():\n return\n world_size = dist.get_world_size()\n if world_size == 1:\n return\n dist.barrier()" }, { "identifier": "get_world_size", "path": "diffusion/utils/dist_utils.py", "snippet": "def get_world_size():\n if not dist.is_available():\n return 1\n if not dist.is_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "clip_grad_norm_", "path": "diffusion/utils/dist_utils.py", "snippet": "@torch.no_grad()\ndef clip_grad_norm_(\n self, max_norm: Union[float, int], norm_type: Union[float, int] = 2.0\n) -> None:\n self._lazy_init()\n self._wait_for_previous_optim_step()\n assert self._is_root, \"clip_grad_norm should only be called on the root (parent) instance\"\n self._assert_state(TrainingState_.IDLE)\n\n max_norm = float(max_norm)\n norm_type = float(norm_type)\n # Computes the max norm for this shard's gradients and sync's across workers\n local_norm = _calc_grad_norm(self.params_with_grad, norm_type).cuda() # type: ignore[arg-type]\n if norm_type == math.inf:\n total_norm = local_norm\n dist.all_reduce(total_norm, op=torch.distributed.ReduceOp.MAX, group=self.process_group)\n else:\n total_norm = local_norm ** norm_type\n dist.all_reduce(total_norm, group=self.process_group)\n total_norm = total_norm ** (1.0 / norm_type)\n\n clip_coef = torch.tensor(max_norm, dtype=total_norm.dtype, device=total_norm.device) / (total_norm + 1e-6)\n if clip_coef < 1:\n # multiply by clip_coef, aka, (max_norm/total_norm).\n for p in self.params_with_grad:\n assert p.grad is not None\n p.grad.detach().mul_(clip_coef.to(p.grad.device))\n return total_norm" }, { "identifier": "build_dataset", "path": "diffusion/data/builder.py", "snippet": "def build_dataset(cfg, resolution=224, **kwargs):\n logger = get_root_logger()\n\n dataset_type = cfg.get('type')\n logger.info(f\"Constructing dataset {dataset_type}...\")\n t = time.time()\n transform = cfg.pop('transform', 'default_train')\n transform = get_transform(transform, resolution)\n dataset = build_from_cfg(cfg, DATASETS, default_args=dict(transform=transform, resolution=resolution, **kwargs))\n logger.info(f\"Dataset {dataset_type} constructed. time: {(time.time() - t):.2f} s, length (use/ori): {len(dataset)}/{dataset.ori_imgs_nums}\")\n return dataset" }, { "identifier": "build_dataloader", "path": "diffusion/data/builder.py", "snippet": "def build_dataloader(dataset, batch_size=256, num_workers=4, shuffle=True, **kwargs):\n if 'batch_sampler' in kwargs:\n dataloader = DataLoader(dataset, batch_sampler=kwargs['batch_sampler'], num_workers=num_workers, pin_memory=True)\n else:\n dataloader = DataLoader(dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n pin_memory=True,\n **kwargs)\n return dataloader" }, { "identifier": "set_data_root", "path": "diffusion/data/builder.py", "snippet": "def set_data_root(data_root):\n global DATA_ROOT\n DATA_ROOT = data_root" }, { "identifier": "build_model", "path": "diffusion/model/builder.py", "snippet": "def build_model(cfg, use_grad_checkpoint=False, use_fp32_attention=False, gc_step=1, **kwargs):\n if isinstance(cfg, str):\n cfg = dict(type=cfg)\n model = MODELS.build(cfg, default_args=kwargs)\n if use_grad_checkpoint:\n set_grad_checkpoint(model, use_fp32_attention=use_fp32_attention, gc_step=gc_step)\n return model" }, { "identifier": "get_root_logger", "path": "diffusion/utils/logger.py", "snippet": "def get_root_logger(log_file=None, log_level=logging.INFO, name='PixArt'):\n \"\"\"Get root logger.\n\n Args:\n log_file (str, optional): File path of log. Defaults to None.\n log_level (int, optional): The level of logger.\n Defaults to logging.INFO.\n name (str): logger name\n Returns:\n :obj:`logging.Logger`: The obtained logger\n \"\"\"\n if log_file is None:\n log_file = '/dev/null'\n logger = get_logger(name=name, log_file=log_file, log_level=log_level)\n return logger" }, { "identifier": "set_random_seed", "path": "diffusion/utils/misc.py", "snippet": "def set_random_seed(seed, deterministic=False):\n \"\"\"Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False" }, { "identifier": "read_config", "path": "diffusion/utils/misc.py", "snippet": "def read_config(file):\n # solve config loading conflict when multi-processes\n import time\n while True:\n config = Config.fromfile(file)\n if len(config) == 0:\n time.sleep(0.1)\n continue\n break\n return config" }, { "identifier": "init_random_seed", "path": "diffusion/utils/misc.py", "snippet": "def init_random_seed(seed=None, device='cuda'):\n \"\"\"Initialize random seed.\n\n If the seed is not set, the seed will be automatically randomized,\n and then broadcast to all processes to prevent some potential bugs.\n\n Args:\n seed (int, Optional): The seed. Default to None.\n device (str): The device where the seed will be put on.\n Default to 'cuda'.\n\n Returns:\n int: Seed to be used.\n \"\"\"\n if seed is not None:\n return seed\n\n # Make sure all ranks share the same random seed to prevent\n # some potential bugs. Please refer to\n # https://github.com/open-mmlab/mmdetection/issues/6339\n rank, world_size = get_dist_info()\n seed = np.random.randint(2 ** 31)\n if world_size == 1:\n return seed\n\n if rank == 0:\n random_num = torch.tensor(seed, dtype=torch.int32, device=device)\n else:\n random_num = torch.tensor(0, dtype=torch.int32, device=device)\n dist.broadcast(random_num, src=0)\n return random_num.item()" }, { "identifier": "DebugUnderflowOverflow", "path": "diffusion/utils/misc.py", "snippet": "class DebugUnderflowOverflow:\n \"\"\"\n This debug class helps detect and understand where the model starts getting very large or very small, and more\n importantly `nan` or `inf` weight and activation elements.\n There are 2 working modes:\n 1. Underflow/overflow detection (default)\n 2. Specific batch absolute min/max tracing without detection\n Mode 1: Underflow/overflow detection\n To activate the underflow/overflow detection, initialize the object with the model :\n ```python\n debug_overflow = DebugUnderflowOverflow(model)\n ```\n then run the training as normal and if `nan` or `inf` gets detected in at least one of the weight, input or\n output elements this module will throw an exception and will print `max_frames_to_save` frames that lead to this\n event, each frame reporting\n 1. the fully qualified module name plus the class name whose `forward` was run\n 2. the absolute min and max value of all elements for each module weights, and the inputs and output\n For example, here is the header and the last few frames in detection report for `google/mt5-small` run in fp16 mixed precision :\n ```\n Detected inf/nan during batch_number=0\n Last 21 forward frames:\n abs min abs max metadata\n [...]\n encoder.block.2.layer.1.DenseReluDense.wi_0 Linear\n 2.17e-07 4.50e+00 weight\n 1.79e-06 4.65e+00 input[0]\n 2.68e-06 3.70e+01 output\n encoder.block.2.layer.1.DenseReluDense.wi_1 Linear\n 8.08e-07 2.66e+01 weight\n 1.79e-06 4.65e+00 input[0]\n 1.27e-04 2.37e+02 output\n encoder.block.2.layer.1.DenseReluDense.wo Linear\n 1.01e-06 6.44e+00 weight\n 0.00e+00 9.74e+03 input[0]\n 3.18e-04 6.27e+04 output\n encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense\n 1.79e-06 4.65e+00 input[0]\n 3.18e-04 6.27e+04 output\n encoder.block.2.layer.1.dropout Dropout\n 3.18e-04 6.27e+04 input[0]\n 0.00e+00 inf output\n ```\n You can see here, that `T5DenseGatedGeluDense.forward` resulted in output activations, whose absolute max value\n was around 62.7K, which is very close to fp16's top limit of 64K. In the next frame we have `Dropout` which\n renormalizes the weights, after it zeroed some of the elements, which pushes the absolute max value to more than\n 64K, and we get an overlow.\n As you can see it's the previous frames that we need to look into when the numbers start going into very large for\n fp16 numbers.\n The tracking is done in a forward hook, which gets invoked immediately after `forward` has completed.\n By default the last 21 frames are printed. You can change the default to adjust for your needs. For example :\n ```python\n debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100)\n ```\n To validate that you have set up this debugging feature correctly, and you intend to use it in a training that may\n take hours to complete, first run it with normal tracing enabled for one of a few batches as explained in the next\n section.\n Mode 2. Specific batch absolute min/max tracing without detection\n The second work mode is per-batch tracing with the underflow/overflow detection feature turned off.\n Let's say you want to watch the absolute min and max values for all the ingredients of each `forward` call of a\n given batch, and only do that for batches 1 and 3. Then you instantiate this class as :\n ```python\n debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1,3])\n ```\n And now full batches 1 and 3 will be traced using the same format as explained above. Batches are 0-indexed.\n This is helpful if you know that the program starts misbehaving after a certain batch number, so you can\n fast-forward right to that area.\n Early stopping:\n You can also specify the batch number after which to stop the training, with :\n ```python\n debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1,3], abort_after_batch_num=3)\n ```\n This feature is mainly useful in the tracing mode, but you can use it for any mode.\n **Performance**:\n As this module measures absolute `min`/``max` of each weight of the model on every forward it'll slow the\n training down. Therefore remember to turn it off once the debugging needs have been met.\n Args:\n model (`nn.Module`):\n The model to debug.\n max_frames_to_save (`int`, *optional*, defaults to 21):\n How many frames back to record\n trace_batch_nums(`List[int]`, *optional*, defaults to `[]`):\n Which batch numbers to trace (turns detection off)\n abort_after_batch_num (`int``, *optional*):\n Whether to abort after a certain batch number has finished\n \"\"\"\n\n def __init__(self, model, max_frames_to_save=21, trace_batch_nums=[], abort_after_batch_num=None):\n self.model = model\n self.trace_batch_nums = trace_batch_nums\n self.abort_after_batch_num = abort_after_batch_num\n\n # keep a LIFO buffer of frames to dump as soon as inf/nan is encountered to give context to the problem emergence\n self.frames = collections.deque([], max_frames_to_save)\n self.frame = []\n self.batch_number = 0\n self.total_calls = 0\n self.detected_overflow = False\n self.prefix = \" \"\n\n self.analyse_model()\n\n self.register_forward_hook()\n\n def save_frame(self, frame=None):\n if frame is not None:\n self.expand_frame(frame)\n self.frames.append(\"\\n\".join(self.frame))\n self.frame = [] # start a new frame\n\n def expand_frame(self, line):\n self.frame.append(line)\n\n def trace_frames(self):\n print(\"\\n\".join(self.frames))\n self.frames = []\n\n def reset_saved_frames(self):\n self.frames = []\n\n def dump_saved_frames(self):\n print(f\"\\nDetected inf/nan during batch_number={self.batch_number} \"\n f\"Last {len(self.frames)} forward frames:\"\n f\"{'abs min':8} {'abs max':8} metadata\"\n f\"'\\n'.join(self.frames)\"\n f\"\\n\\n\")\n self.frames = []\n\n def analyse_model(self):\n # extract the fully qualified module names, to be able to report at run time. e.g.:\n # encoder.block.2.layer.0.SelfAttention.o\n #\n # for shared weights only the first shared module name will be registered\n self.module_names = {m: name for name, m in self.model.named_modules()}\n # self.longest_module_name = max(len(v) for v in self.module_names.values())\n\n def analyse_variable(self, var, ctx):\n if torch.is_tensor(var):\n self.expand_frame(self.get_abs_min_max(var, ctx))\n if self.detect_overflow(var, ctx):\n self.detected_overflow = True\n elif var is None:\n self.expand_frame(f\"{'None':>17} {ctx}\")\n else:\n self.expand_frame(f\"{'not a tensor':>17} {ctx}\")\n\n def batch_start_frame(self):\n self.expand_frame(f\"\\n\\n{self.prefix} *** Starting batch number={self.batch_number} ***\")\n self.expand_frame(f\"{'abs min':8} {'abs max':8} metadata\")\n\n def batch_end_frame(self):\n self.expand_frame(f\"{self.prefix} *** Finished batch number={self.batch_number - 1} ***\\n\\n\")\n\n def create_frame(self, module, input, output):\n self.expand_frame(f\"{self.prefix} {self.module_names[module]} {module.__class__.__name__}\")\n\n # params\n for name, p in module.named_parameters(recurse=False):\n self.analyse_variable(p, name)\n\n # inputs\n if isinstance(input, tuple):\n for i, x in enumerate(input):\n self.analyse_variable(x, f\"input[{i}]\")\n else:\n self.analyse_variable(input, \"input\")\n\n # outputs\n if isinstance(output, tuple):\n for i, x in enumerate(output):\n # possibly a tuple of tuples\n if isinstance(x, tuple):\n for j, y in enumerate(x):\n self.analyse_variable(y, f\"output[{i}][{j}]\")\n else:\n self.analyse_variable(x, f\"output[{i}]\")\n else:\n self.analyse_variable(output, \"output\")\n\n self.save_frame()\n\n def register_forward_hook(self):\n self.model.apply(self._register_forward_hook)\n\n def _register_forward_hook(self, module):\n module.register_forward_hook(self.forward_hook)\n\n def forward_hook(self, module, input, output):\n # - input is a tuple of packed inputs (could be non-Tensors)\n # - output could be a Tensor or a tuple of Tensors and non-Tensors\n\n last_frame_of_batch = False\n\n trace_mode = True if self.batch_number in self.trace_batch_nums else False\n if trace_mode:\n self.reset_saved_frames()\n\n if self.total_calls == 0:\n self.batch_start_frame()\n self.total_calls += 1\n\n # count batch numbers - the very first forward hook of the batch will be called when the\n # batch completes - i.e. it gets called very last - we know this batch has finished\n if module == self.model:\n self.batch_number += 1\n last_frame_of_batch = True\n\n self.create_frame(module, input, output)\n\n # if last_frame_of_batch:\n # self.batch_end_frame()\n\n if trace_mode:\n self.trace_frames()\n\n if last_frame_of_batch:\n self.batch_start_frame()\n\n if self.detected_overflow and not trace_mode:\n self.dump_saved_frames()\n\n # now we can abort, as it's pointless to continue running\n raise ValueError(\n \"DebugUnderflowOverflow: inf/nan detected, aborting as there is no point running further. \"\n \"Please scroll up above this traceback to see the activation values prior to this event.\"\n )\n\n # abort after certain batch if requested to do so\n if self.abort_after_batch_num is not None and self.batch_number > self.abort_after_batch_num:\n raise ValueError(\n f\"DebugUnderflowOverflow: aborting after {self.batch_number} batches due to `abort_after_batch_num={self.abort_after_batch_num}` arg\"\n )\n\n @staticmethod\n def get_abs_min_max(var, ctx):\n abs_var = var.abs()\n return f\"{abs_var.min():8.2e} {abs_var.max():8.2e} {ctx}\"\n\n @staticmethod\n def detect_overflow(var, ctx):\n \"\"\"\n Report whether the tensor contains any `nan` or `inf` entries.\n This is useful for detecting overflows/underflows and best to call right after the function that did some math that\n modified the tensor in question.\n This function contains a few other helper features that you can enable and tweak directly if you want to track\n various other things.\n Args:\n var: the tensor variable to check\n ctx: the message to print as a context\n Return:\n `True` if `inf` or `nan` was detected, `False` otherwise\n \"\"\"\n detected = False\n if torch.isnan(var).any().item():\n detected = True\n print(f\"{ctx} has nans\")\n if torch.isinf(var).any().item():\n detected = True\n print(f\"{ctx} has infs\")\n if var.dtype == torch.float32 and torch.ge(var.abs(), 65535).any().item():\n detected = True\n print(f\"{ctx} has overflow values {var.abs().max().item()}.\")\n # if needed to monitor large elements can enable the following\n if 0: # and detected:\n n100 = var[torch.ge(var.abs(), 100)]\n if n100.numel() > 0:\n print(f\"{ctx}: n100={n100.numel()}\")\n n1000 = var[torch.ge(var.abs(), 1000)]\n if n1000.numel() > 0:\n print(f\"{ctx}: n1000={n1000.numel()}\")\n n10000 = var[torch.ge(var.abs(), 10000)]\n if n10000.numel() > 0:\n print(f\"{ctx}: n10000={n10000.numel()}\")\n\n if 0:\n print(f\"min={var.min():9.2e} max={var.max():9.2e}\")\n\n if 0:\n print(f\"min={var.min():9.2e} max={var.max():9.2e} var={var.var():9.2e} mean={var.mean():9.2e} ({ctx})\")\n\n return detected" }, { "identifier": "build_optimizer", "path": "diffusion/utils/optimizer.py", "snippet": "def build_optimizer(model, optimizer_cfg):\n # default parameter-wise config\n logger = get_root_logger()\n\n if hasattr(model, 'module'):\n model = model.module\n # set optimizer constructor\n optimizer_cfg.setdefault('constructor', 'MyOptimizerConstructor')\n # parameter-wise setting: cancel weight decay for some specific modules\n custom_keys = dict()\n for name, module in model.named_modules():\n if hasattr(module, 'zero_weight_decay'):\n custom_keys.update({(name, key): dict(decay_mult=0) for key in module.zero_weight_decay})\n\n paramwise_cfg = Config(dict(cfg=dict(custom_keys=custom_keys)))\n given_cfg = optimizer_cfg.get('paramwise_cfg')\n if given_cfg:\n paramwise_cfg.merge_from_dict(dict(cfg=given_cfg))\n optimizer_cfg['paramwise_cfg'] = paramwise_cfg.cfg\n # build optimizer\n optimizer = mm_build_optimizer(model, optimizer_cfg)\n\n weight_decay_groups = dict()\n lr_groups = dict()\n for group in optimizer.param_groups:\n if not group.get('requires_grad', True): continue\n lr_groups.setdefault(group['lr'], []).append(group)\n weight_decay_groups.setdefault(group['weight_decay'], []).append(group)\n\n learnable_count, fix_count = 0, 0\n for p in model.parameters():\n if p.requires_grad:\n learnable_count += 1\n else:\n fix_count += 1\n fix_info = f\"{learnable_count} are learnable, {fix_count} are fix\"\n lr_info = \"Lr group: \" + \", \".join([f'{len(group)} params with lr {lr:.5f}' for lr, group in lr_groups.items()])\n wd_info = \"Weight decay group: \" + \", \".join(\n [f'{len(group)} params with weight decay {wd}' for wd, group in weight_decay_groups.items()])\n opt_info = f\"Optimizer: total {len(optimizer.param_groups)} param groups, {fix_info}. {lr_info}; {wd_info}.\"\n logger.info(opt_info)\n\n return optimizer" }, { "identifier": "auto_scale_lr", "path": "diffusion/utils/optimizer.py", "snippet": "def auto_scale_lr(effective_bs, optimizer_cfg, rule='linear', base_batch_size=256):\n assert rule in ['linear', 'sqrt']\n logger = get_root_logger()\n # scale by world size\n if rule == 'sqrt':\n scale_ratio = math.sqrt(effective_bs / base_batch_size)\n elif rule == 'linear':\n scale_ratio = effective_bs / base_batch_size\n optimizer_cfg['lr'] *= scale_ratio\n logger.info(f'Automatically adapt lr to {optimizer_cfg[\"lr\"]:.7f} (using {rule} scaling rule).')\n return scale_ratio" }, { "identifier": "build_lr_scheduler", "path": "diffusion/utils/lr_scheduler.py", "snippet": "def build_lr_scheduler(config, optimizer, train_dataloader, lr_scale_ratio):\n if not config.get('lr_schedule_args', None):\n config.lr_schedule_args = dict()\n if config.get('lr_warmup_steps', None):\n config['num_warmup_steps'] = config.get('lr_warmup_steps') # for compatibility with old version\n\n logger = get_root_logger()\n logger.info(\n f'Lr schedule: {config.lr_schedule}, ' + \",\".join(\n [f\"{key}:{value}\" for key, value in config.lr_schedule_args.items()]) + '.')\n if config.lr_schedule == 'cosine':\n lr_scheduler = get_cosine_schedule_with_warmup(\n optimizer=optimizer,\n **config.lr_schedule_args,\n num_training_steps=(len(train_dataloader) * config.num_epochs),\n )\n elif config.lr_schedule == 'constant':\n lr_scheduler = get_constant_schedule_with_warmup(\n optimizer=optimizer,\n **config.lr_schedule_args,\n )\n elif config.lr_schedule == 'cosine_decay_to_constant':\n assert lr_scale_ratio >= 1\n lr_scheduler = get_cosine_decay_to_constant_with_warmup(\n optimizer=optimizer,\n **config.lr_schedule_args,\n final_lr=1 / lr_scale_ratio,\n num_training_steps=(len(train_dataloader) * config.num_epochs),\n )\n else:\n raise RuntimeError(f'Unrecognized lr schedule {config.lr_schedule}.')\n return lr_scheduler" }, { "identifier": "AspectRatioBatchSampler", "path": "diffusion/utils/data_sampler.py", "snippet": "class AspectRatioBatchSampler(BatchSampler):\n \"\"\"A sampler wrapper for grouping images with similar aspect ratio into a same batch.\n\n Args:\n sampler (Sampler): Base sampler.\n dataset (Dataset): Dataset providing data information.\n batch_size (int): Size of mini-batch.\n drop_last (bool): If ``True``, the sampler will drop the last batch if\n its size would be less than ``batch_size``.\n aspect_ratios (dict): The predefined aspect ratios.\n \"\"\"\n\n def __init__(self,\n sampler: Sampler,\n dataset: Dataset,\n batch_size: int,\n aspect_ratios: dict,\n drop_last: bool = False,\n config=None,\n valid_num=0, # take as valid aspect-ratio when sample number >= valid_num\n **kwargs) -> None:\n if not isinstance(sampler, Sampler):\n raise TypeError('sampler should be an instance of ``Sampler``, '\n f'but got {sampler}')\n if not isinstance(batch_size, int) or batch_size <= 0:\n raise ValueError('batch_size should be a positive integer value, '\n f'but got batch_size={batch_size}')\n self.sampler = sampler\n self.dataset = dataset\n self.batch_size = batch_size\n self.aspect_ratios = aspect_ratios\n self.drop_last = drop_last\n self.ratio_nums_gt = kwargs.get('ratio_nums', None)\n self.config = config\n assert self.ratio_nums_gt\n # buckets for each aspect ratio\n self._aspect_ratio_buckets = {ratio: [] for ratio in aspect_ratios.keys()}\n self.current_available_bucket_keys = [str(k) for k, v in self.ratio_nums_gt.items() if v >= valid_num]\n logger = get_root_logger() if config is None else get_root_logger(os.path.join(config.work_dir, 'train_log.log'))\n logger.warning(f\"Using valid_num={valid_num} in config file. Available {len(self.current_available_bucket_keys)} aspect_ratios: {self.current_available_bucket_keys}\")\n\n def __iter__(self) -> Sequence[int]:\n for idx in self.sampler:\n data_info = self.dataset.get_data_info(idx)\n height, width = data_info['height'], data_info['width']\n ratio = height / width\n # find the closest aspect ratio\n closest_ratio = min(self.aspect_ratios.keys(), key=lambda r: abs(float(r) - ratio))\n if closest_ratio not in self.current_available_bucket_keys:\n continue\n bucket = self._aspect_ratio_buckets[closest_ratio]\n bucket.append(idx)\n # yield a batch of indices in the same aspect ratio group\n if len(bucket) == self.batch_size:\n yield bucket[:]\n del bucket[:]\n\n # yield the rest data and reset the buckets\n for bucket in self._aspect_ratio_buckets.values():\n while len(bucket) > 0:\n if len(bucket) <= self.batch_size:\n if not self.drop_last:\n yield bucket[:]\n bucket = []\n else:\n yield bucket[:self.batch_size]\n bucket = bucket[self.batch_size:]" }, { "identifier": "BalancedAspectRatioBatchSampler", "path": "diffusion/utils/data_sampler.py", "snippet": "class BalancedAspectRatioBatchSampler(AspectRatioBatchSampler):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # Assign samples to each bucket\n self.ratio_nums_gt = kwargs.get('ratio_nums', None)\n assert self.ratio_nums_gt\n self._aspect_ratio_buckets = {float(ratio): [] for ratio in self.aspect_ratios.keys()}\n self.original_buckets = {}\n self.current_available_bucket_keys = [k for k, v in self.ratio_nums_gt.items() if v >= 3000]\n self.all_available_keys = deepcopy(self.current_available_bucket_keys)\n self.exhausted_bucket_keys = []\n self.total_batches = len(self.sampler) // self.batch_size\n self._aspect_ratio_count = {}\n for k in self.all_available_keys:\n self._aspect_ratio_count[float(k)] = 0\n self.original_buckets[float(k)] = []\n logger = get_root_logger(os.path.join(self.config.work_dir, 'train_log.log'))\n logger.warning(f\"Available {len(self.current_available_bucket_keys)} aspect_ratios: {self.current_available_bucket_keys}\")\n\n def __iter__(self) -> Sequence[int]:\n i = 0\n for idx in self.sampler:\n data_info = self.dataset.get_data_info(idx)\n height, width = data_info['height'], data_info['width']\n ratio = height / width\n closest_ratio = float(min(self.aspect_ratios.keys(), key=lambda r: abs(float(r) - ratio)))\n if closest_ratio not in self.all_available_keys:\n continue\n if self._aspect_ratio_count[closest_ratio] < self.ratio_nums_gt[closest_ratio]:\n self._aspect_ratio_count[closest_ratio] += 1\n self._aspect_ratio_buckets[closest_ratio].append(idx)\n self.original_buckets[closest_ratio].append(idx) # Save the original samples for each bucket\n if not self.current_available_bucket_keys:\n self.current_available_bucket_keys, self.exhausted_bucket_keys = self.exhausted_bucket_keys, []\n\n if closest_ratio not in self.current_available_bucket_keys:\n continue\n key = closest_ratio\n bucket = self._aspect_ratio_buckets[key]\n if len(bucket) == self.batch_size:\n yield bucket[:self.batch_size]\n del bucket[:self.batch_size]\n i += 1\n self.exhausted_bucket_keys.append(key)\n self.current_available_bucket_keys.remove(key)\n\n for _ in range(self.total_batches - i):\n key = choice(self.all_available_keys)\n bucket = self._aspect_ratio_buckets[key]\n if len(bucket) >= self.batch_size:\n yield bucket[:self.batch_size]\n del bucket[:self.batch_size]\n\n # If a bucket is exhausted\n if not bucket:\n self._aspect_ratio_buckets[key] = deepcopy(self.original_buckets[key][:])\n shuffle(self._aspect_ratio_buckets[key])\n else:\n self._aspect_ratio_buckets[key] = deepcopy(self.original_buckets[key][:])\n shuffle(self._aspect_ratio_buckets[key])" }, { "identifier": "LCMScheduler", "path": "diffusion/lcm_scheduler.py", "snippet": "class LCMScheduler(SchedulerMixin, ConfigMixin):\n \"\"\"\n `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with\n non-Markovian guidance.\n This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic\n methods the library implements for all schedulers such as loading and saving.\n Args:\n num_train_timesteps (`int`, defaults to 1000):\n The number of diffusion steps to train the model.\n beta_start (`float`, defaults to 0.0001):\n The starting `beta` value of inference.\n beta_end (`float`, defaults to 0.02):\n The final `beta` value.\n beta_schedule (`str`, defaults to `\"linear\"`):\n The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from\n `linear`, `scaled_linear`, or `squaredcos_cap_v2`.\n trained_betas (`np.ndarray`, *optional*):\n Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.\n clip_sample (`bool`, defaults to `True`):\n Clip the predicted sample for numerical stability.\n clip_sample_range (`float`, defaults to 1.0):\n The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.\n set_alpha_to_one (`bool`, defaults to `True`):\n Each diffusion step uses the alphas product value at that step and at the previous one. For the final step\n there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,\n otherwise it uses the alpha value at step 0.\n steps_offset (`int`, defaults to 0):\n An offset added to the inference steps. You can use a combination of `offset=1` and\n `set_alpha_to_one=False` to make the last step use step 0 for the previous alpha product like in Stable\n Diffusion.\n prediction_type (`str`, defaults to `epsilon`, *optional*):\n Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),\n `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen\n Video](https://imagen.research.google/video/paper.pdf) paper).\n thresholding (`bool`, defaults to `False`):\n Whether to use the \"dynamic thresholding\" method. This is unsuitable for latent-space diffusion models such\n as Stable Diffusion.\n dynamic_thresholding_ratio (`float`, defaults to 0.995):\n The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.\n sample_max_value (`float`, defaults to 1.0):\n The threshold value for dynamic thresholding. Valid only when `thresholding=True`.\n timestep_spacing (`str`, defaults to `\"leading\"`):\n The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and\n Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.\n rescale_betas_zero_snr (`bool`, defaults to `False`):\n Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and\n dark samples instead of limiting it to samples with medium brightness. Loosely related to\n [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).\n \"\"\"\n\n # _compatibles = [e.name for e in KarrasDiffusionSchedulers]\n order = 1\n\n @register_to_config\n def __init__(\n self,\n num_train_timesteps: int = 1000,\n beta_start: float = 0.0001,\n beta_end: float = 0.02,\n beta_schedule: str = \"linear\",\n trained_betas: Optional[Union[np.ndarray, List[float]]] = None,\n clip_sample: bool = True,\n set_alpha_to_one: bool = True,\n steps_offset: int = 0,\n prediction_type: str = \"epsilon\",\n thresholding: bool = False,\n dynamic_thresholding_ratio: float = 0.995,\n clip_sample_range: float = 1.0,\n sample_max_value: float = 1.0,\n timestep_spacing: str = \"leading\",\n rescale_betas_zero_snr: bool = False,\n ):\n if trained_betas is not None:\n self.betas = torch.tensor(trained_betas, dtype=torch.float32)\n elif beta_schedule == \"linear\":\n self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)\n elif beta_schedule == \"scaled_linear\":\n # this schedule is very specific to the latent diffusion model.\n self.betas = (\n torch.linspace(beta_start ** 0.5, beta_end ** 0.5, num_train_timesteps, dtype=torch.float32) ** 2\n )\n elif beta_schedule == \"squaredcos_cap_v2\":\n # Glide cosine schedule\n self.betas = betas_for_alpha_bar(num_train_timesteps)\n else:\n raise NotImplementedError(f\"{beta_schedule} does is not implemented for {self.__class__}\")\n\n # Rescale for zero SNR\n if rescale_betas_zero_snr:\n self.betas = rescale_zero_terminal_snr(self.betas)\n\n self.alphas = 1.0 - self.betas\n self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)\n\n # At every step in ddim, we are looking into the previous alphas_cumprod\n # For the final step, there is no previous alphas_cumprod because we are already at 0\n # `set_alpha_to_one` decides whether we set this parameter simply to one or\n # whether we use the final alpha of the \"non-previous\" one.\n self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]\n\n # standard deviation of the initial noise distribution\n self.init_noise_sigma = 1.0\n\n # setable values\n self.num_inference_steps = None\n self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64))\n\n def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:\n \"\"\"\n Ensures interchangeability with schedulers that need to scale the denoising model input depending on the\n current timestep.\n Args:\n sample (`torch.FloatTensor`):\n The input sample.\n timestep (`int`, *optional*):\n The current timestep in the diffusion chain.\n Returns:\n `torch.FloatTensor`:\n A scaled input sample.\n \"\"\"\n return sample\n\n def _get_variance(self, timestep, prev_timestep):\n alpha_prod_t = self.alphas_cumprod[timestep]\n alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod\n beta_prod_t = 1 - alpha_prod_t\n beta_prod_t_prev = 1 - alpha_prod_t_prev\n\n variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)\n\n return variance\n\n # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample\n def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"\n \"Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the\n prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by\n s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing\n pixels from saturation at each step. We find that dynamic thresholding results in significantly better\n photorealism as well as better image-text alignment, especially when using very large guidance weights.\"\n https://arxiv.org/abs/2205.11487\n \"\"\"\n dtype = sample.dtype\n batch_size, channels, height, width = sample.shape\n\n if dtype not in (torch.float32, torch.float64):\n sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half\n\n # Flatten sample for doing quantile calculation along each image\n sample = sample.reshape(batch_size, channels * height * width)\n\n abs_sample = sample.abs() # \"a certain percentile absolute pixel value\"\n\n s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)\n s = torch.clamp(\n s, min=1, max=self.config.sample_max_value\n ) # When clamped to min=1, equivalent to standard clipping to [-1, 1]\n\n s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0\n sample = torch.clamp(sample, -s, s) / s # \"we threshold xt0 to the range [-s, s] and then divide by s\"\n\n sample = sample.reshape(batch_size, channels, height, width)\n sample = sample.to(dtype)\n\n return sample\n\n def set_timesteps(self, num_inference_steps: int, lcm_origin_steps: int, device: Union[str, torch.device] = None):\n \"\"\"\n Sets the discrete timesteps used for the diffusion chain (to be run before inference).\n Args:\n num_inference_steps (`int`):\n The number of diffusion steps used when generating samples with a pre-trained model.\n \"\"\"\n\n if num_inference_steps > self.config.num_train_timesteps:\n raise ValueError(\n f\"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:\"\n f\" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle\"\n f\" maximal {self.config.num_train_timesteps} timesteps.\"\n )\n\n self.num_inference_steps = num_inference_steps\n\n # LCM Timesteps Setting: # Linear Spacing\n c = self.config.num_train_timesteps // lcm_origin_steps\n lcm_origin_timesteps = np.asarray(list(range(1, lcm_origin_steps + 1))) * c - 1 # LCM Training Steps Schedule\n skipping_step = len(lcm_origin_timesteps) // num_inference_steps\n timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps] # LCM Inference Steps Schedule\n\n self.timesteps = torch.from_numpy(timesteps.copy()).to(device)\n\n def get_scalings_for_boundary_condition_discrete(self, t):\n self.sigma_data = 0.5 # Default: 0.5\n\n # By dividing 0.1: This is almost a delta function at t=0.\n c_skip = self.sigma_data ** 2 / ((t / 0.1) ** 2 + self.sigma_data ** 2)\n c_out = ((t / 0.1) / ((t / 0.1) ** 2 + self.sigma_data ** 2) ** 0.5)\n return c_skip, c_out\n\n def step(\n self,\n model_output: torch.FloatTensor,\n timeindex: int,\n timestep: int,\n sample: torch.FloatTensor,\n eta: float = 0.0,\n use_clipped_model_output: bool = False,\n generator=None,\n variance_noise: Optional[torch.FloatTensor] = None,\n return_dict: bool = True,\n ) -> Union[LCMSchedulerOutput, Tuple]:\n \"\"\"\n Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion\n process from the learned model outputs (most often the predicted noise).\n Args:\n model_output (`torch.FloatTensor`):\n The direct output from learned diffusion model.\n timestep (`float`):\n The current discrete timestep in the diffusion chain.\n sample (`torch.FloatTensor`):\n A current instance of a sample created by the diffusion process.\n eta (`float`):\n The weight of noise for added noise in diffusion step.\n use_clipped_model_output (`bool`, defaults to `False`):\n If `True`, computes \"corrected\" `model_output` from the clipped predicted original sample. Necessary\n because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no\n clipping has happened, \"corrected\" `model_output` would coincide with the one provided as input and\n `use_clipped_model_output` has no effect.\n generator (`torch.Generator`, *optional*):\n A random number generator.\n variance_noise (`torch.FloatTensor`):\n Alternative to generating noise with `generator` by directly providing the noise for the variance\n itself. Useful for methods such as [`CycleDiffusion`].\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`.\n Returns:\n [`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`:\n If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a\n tuple is returned where the first element is the sample tensor.\n \"\"\"\n if self.num_inference_steps is None:\n raise ValueError(\n \"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler\"\n )\n\n # 1. get previous step value\n prev_timeindex = timeindex + 1\n if prev_timeindex < len(self.timesteps):\n prev_timestep = self.timesteps[prev_timeindex]\n else:\n prev_timestep = timestep\n\n # 2. compute alphas, betas\n alpha_prod_t = self.alphas_cumprod[timestep]\n alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod\n\n beta_prod_t = 1 - alpha_prod_t\n beta_prod_t_prev = 1 - alpha_prod_t_prev\n\n # 3. Get scalings for boundary conditions\n c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep)\n\n # 4. Different Parameterization:\n parameterization = self.config.prediction_type\n\n if parameterization == \"epsilon\": # noise-prediction\n pred_x0 = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt()\n\n elif parameterization == \"sample\": # x-prediction\n pred_x0 = model_output\n\n elif parameterization == \"v_prediction\": # v-prediction\n pred_x0 = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output\n\n # 4. Denoise model output using boundary conditions\n denoised = c_out * pred_x0 + c_skip * sample\n\n # 5. Sample z ~ N(0, I), For MultiStep Inference\n # Noise is not used for one-step sampling.\n if len(self.timesteps) > 1:\n noise = torch.randn(model_output.shape).to(model_output.device)\n prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise\n else:\n prev_sample = denoised\n\n if not return_dict:\n return (prev_sample, denoised)\n\n return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised)\n\n # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise\n def add_noise(\n self,\n original_samples: torch.FloatTensor,\n noise: torch.FloatTensor,\n timesteps: torch.IntTensor,\n ) -> torch.FloatTensor:\n # Make sure alphas_cumprod and timestep have same device and dtype as original_samples\n alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)\n timesteps = timesteps.to(original_samples.device)\n\n sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5\n sqrt_alpha_prod = sqrt_alpha_prod.flatten()\n while len(sqrt_alpha_prod.shape) < len(original_samples.shape):\n sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)\n\n sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5\n sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()\n while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):\n sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)\n\n noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise\n return noisy_samples\n\n # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity\n def get_velocity(\n self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor\n ) -> torch.FloatTensor:\n # Make sure alphas_cumprod and timestep have same device and dtype as sample\n alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)\n timesteps = timesteps.to(sample.device)\n\n sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5\n sqrt_alpha_prod = sqrt_alpha_prod.flatten()\n while len(sqrt_alpha_prod.shape) < len(sample.shape):\n sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)\n\n sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5\n sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()\n while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):\n sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)\n\n velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample\n return velocity\n\n def __len__(self):\n return self.config.num_train_timesteps" } ]
import os import sys import types import argparse import datetime import time import warnings import torch import torch.nn as nn import numpy as np import torch.nn.functional as F from pathlib import Path from accelerate import Accelerator, InitProcessGroupKwargs from accelerate.utils import DistributedType from diffusers.models import AutoencoderKL from torch.utils.data import RandomSampler from mmcv.runner import LogBuffer from copy import deepcopy from tqdm import tqdm from diffusion import IDDPM from diffusion.utils.checkpoint import save_checkpoint, load_checkpoint from diffusion.utils.dist_utils import synchronize, get_world_size, clip_grad_norm_ from diffusion.data.builder import build_dataset, build_dataloader, set_data_root from diffusion.model.builder import build_model from diffusion.utils.logger import get_root_logger from diffusion.utils.misc import set_random_seed, read_config, init_random_seed, DebugUnderflowOverflow from diffusion.utils.optimizer import build_optimizer, auto_scale_lr from diffusion.utils.lr_scheduler import build_lr_scheduler from diffusion.utils.data_sampler import AspectRatioBatchSampler, BalancedAspectRatioBatchSampler from diffusion.lcm_scheduler import LCMScheduler from torchvision.utils import save_image from accelerate import FullyShardedDataParallelPlugin from torch.distributed.fsdp.fully_sharded_data_parallel import FullStateDictConfig
16,418
) synchronize() def parse_args(): parser = argparse.ArgumentParser(description="Process some integers.") parser.add_argument("config", type=str, help="config") parser.add_argument("--cloud", action='store_true', default=False, help="cloud or local machine") parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument('--resume-from', help='the dir to resume the training') parser.add_argument('--load-from', default=None, help='the dir to load a ckpt for training') parser.add_argument('--local-rank', type=int, default=-1) parser.add_argument('--local_rank', type=int, default=-1) parser.add_argument('--debug', action='store_true') args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() config = read_config(args.config) if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None config.work_dir = args.work_dir if args.cloud: config.data_root = '/data/data' if args.resume_from is not None: config.load_from = None config.resume_from = dict( checkpoint=args.resume_from, load_ema=False, resume_optimizer=True, resume_lr_scheduler=True) if args.debug: config.log_interval = 1 config.train_batch_size = 11 config.valid_num = 100 config.load_from = None os.umask(0o000) os.makedirs(config.work_dir, exist_ok=True) init_handler = InitProcessGroupKwargs() init_handler.timeout = datetime.timedelta(seconds=5400) # change timeout to avoid a strange NCCL bug # Initialize accelerator and tensorboard logging if config.use_fsdp: init_train = 'FSDP' set_fsdp_env() fsdp_plugin = FullyShardedDataParallelPlugin(state_dict_config=FullStateDictConfig(offload_to_cpu=False, rank0_only=False),) else: init_train = 'DDP' fsdp_plugin = None even_batches = True if config.multi_scale: even_batches=False, accelerator = Accelerator( mixed_precision=config.mixed_precision, gradient_accumulation_steps=config.gradient_accumulation_steps, log_with="tensorboard", project_dir=os.path.join(config.work_dir, "logs"), fsdp_plugin=fsdp_plugin, even_batches=even_batches, kwargs_handlers=[init_handler] ) logger = get_root_logger(os.path.join(config.work_dir, 'train_log.log')) config.seed = init_random_seed(config.get('seed', None)) set_random_seed(config.seed) if accelerator.is_main_process: config.dump(os.path.join(config.work_dir, 'config.py')) logger.info(f"Config: \n{config.pretty_text}") logger.info(f"World_size: {get_world_size()}, seed: {config.seed}") logger.info(f"Initializing: {init_train} for training") image_size = config.image_size # @param [256, 512] latent_size = int(image_size) // 8 pred_sigma = getattr(config, 'pred_sigma', True) learn_sigma = getattr(config, 'learn_sigma', True) and pred_sigma model_kwargs={"window_block_indexes": config.window_block_indexes, "window_size": config.window_size, "use_rel_pos": config.use_rel_pos, "lewei_scale": config.lewei_scale, 'config':config, 'model_max_length': config.model_max_length} # build models train_diffusion = IDDPM(str(config.train_sampling_steps), learn_sigma=learn_sigma, pred_sigma=pred_sigma, snr=config.snr_loss, return_startx=True) model = build_model(config.model, config.grad_checkpointing, config.get('fp32_attention', False), input_size=latent_size, learn_sigma=learn_sigma, pred_sigma=pred_sigma, **model_kwargs).train() logger.info(f"{model.__class__.__name__} Model Parameters: {sum(p.numel() for p in model.parameters()):,}") if config.load_from is not None: if args.load_from is not None: config.load_from = args.load_from missing, unexpected = load_checkpoint(config.load_from, model, load_ema=config.get('load_ema', False)) logger.warning(f'Missing keys: {missing}') logger.warning(f'Unexpected keys: {unexpected}') model_ema = deepcopy(model).eval() model_teacher = deepcopy(model).eval() if not config.data.load_vae_feat: vae = AutoencoderKL.from_pretrained(config.vae_pretrained).cuda() # prepare for FSDP clip grad norm calculation if accelerator.distributed_type == DistributedType.FSDP: for m in accelerator._models: m.clip_grad_norm_ = types.MethodType(clip_grad_norm_, m) # build dataloader set_data_root(config.data_root) dataset = build_dataset(config.data, resolution=image_size, aspect_ratio_type=config.aspect_ratio_type) if config.multi_scale:
current_file_path = Path(__file__).resolve() sys.path.insert(0, str(current_file_path.parent.parent)) warnings.filterwarnings("ignore") # ignore warning def set_fsdp_env(): os.environ["ACCELERATE_USE_FSDP"] = 'true' os.environ["FSDP_AUTO_WRAP_POLICY"] = 'TRANSFORMER_BASED_WRAP' os.environ["FSDP_BACKWARD_PREFETCH"] = 'BACKWARD_PRE' os.environ["FSDP_TRANSFORMER_CLS_TO_WRAP"] = 'PixArtBlock' def ema_update(model_dest: nn.Module, model_src: nn.Module, rate): param_dict_src = dict(model_src.named_parameters()) for p_name, p_dest in model_dest.named_parameters(): p_src = param_dict_src[p_name] assert p_src is not p_dest p_dest.data.mul_(rate).add_((1 - rate) * p_src.data) def append_dims(x, target_dims): """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" dims_to_append = target_dims - x.ndim if dims_to_append < 0: raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less") return x[(...,) + (None,) * dims_to_append] # From LCMScheduler.get_scalings_for_boundary_condition_discrete def scalings_for_boundary_conditions(timestep, sigma_data=0.5, timestep_scaling=10.0): c_skip = sigma_data**2 / ((timestep / 0.1) ** 2 + sigma_data**2) c_out = (timestep / 0.1) / ((timestep / 0.1) ** 2 + sigma_data**2) ** 0.5 return c_skip, c_out def extract_into_tensor(a, t, x_shape): b, *_ = t.shape out = a.gather(-1, t) return out.reshape(b, *((1,) * (len(x_shape) - 1))) class DDIMSolver: def __init__(self, alpha_cumprods, timesteps=1000, ddim_timesteps=50): # DDIM sampling parameters step_ratio = timesteps // ddim_timesteps self.ddim_timesteps = (np.arange(1, ddim_timesteps + 1) * step_ratio).round().astype(np.int64) - 1 self.ddim_alpha_cumprods = alpha_cumprods[self.ddim_timesteps] self.ddim_alpha_cumprods_prev = np.asarray( [alpha_cumprods[0]] + alpha_cumprods[self.ddim_timesteps[:-1]].tolist() ) # convert to torch tensors self.ddim_timesteps = torch.from_numpy(self.ddim_timesteps).long() self.ddim_alpha_cumprods = torch.from_numpy(self.ddim_alpha_cumprods) self.ddim_alpha_cumprods_prev = torch.from_numpy(self.ddim_alpha_cumprods_prev) def to(self, device): self.ddim_timesteps = self.ddim_timesteps.to(device) self.ddim_alpha_cumprods = self.ddim_alpha_cumprods.to(device) self.ddim_alpha_cumprods_prev = self.ddim_alpha_cumprods_prev.to(device) return self def ddim_step(self, pred_x0, pred_noise, timestep_index): alpha_cumprod_prev = extract_into_tensor(self.ddim_alpha_cumprods_prev, timestep_index, pred_x0.shape) dir_xt = (1.0 - alpha_cumprod_prev).sqrt() * pred_noise x_prev = alpha_cumprod_prev.sqrt() * pred_x0 + dir_xt return x_prev @torch.no_grad() def log_validation(model, step, device): if hasattr(model, 'module'): model = model.module scheduler = LCMScheduler(beta_start=0.0001, beta_end=0.02, beta_schedule="linear", prediction_type="epsilon") scheduler.set_timesteps(4, 50) infer_timesteps = scheduler.timesteps dog_embed = torch.load('data/tmp/dog.pth', map_location='cpu') caption_embs, emb_masks = dog_embed['dog_text'].to(device), dog_embed['dog_mask'].to(device) hw = torch.tensor([[1024, 1024]], dtype=torch.float, device=device).repeat(1, 1) ar = torch.tensor([[1.]], device=device).repeat(1, 1) # Create sampling noise: infer_latents = torch.randn(1, 4, 1024, 1024, device=device) model_kwargs = dict(data_info={'img_hw': hw, 'aspect_ratio': ar}, mask=emb_masks) logger.info("Running validation... ") # 7. LCM MultiStep Sampling Loop: for i, t in tqdm(list(enumerate(infer_timesteps))): ts = torch.full((1,), t, device=device, dtype=torch.long) # model prediction (v-prediction, eps, x) model_pred = model(infer_latents, ts, caption_embs, **model_kwargs)[:, :4] # compute the previous noisy sample x_t -> x_t-1 infer_latents, denoised = scheduler.step(model_pred, i, t, infer_latents, return_dict=False) samples = vae.decode(denoised / 0.18215).sample torch.cuda.empty_cache() save_image(samples[0], f'output_cv/vis/{step}.jpg', nrow=1, normalize=True, value_range=(-1, 1)) def train(): if config.get('debug_nan', False): DebugUnderflowOverflow(model) logger.info('NaN debugger registered. Start to detect overflow during training.') time_start, last_tic = time.time(), time.time() log_buffer = LogBuffer() start_step = start_epoch * len(train_dataloader) global_step = 0 total_steps = len(train_dataloader) * config.num_epochs load_vae_feat = getattr(train_dataloader.dataset, 'load_vae_feat', False) # Create uncond embeds for classifier free guidance uncond_prompt_embeds = model.module.y_embedder.y_embedding.repeat(config.train_batch_size, 1, 1, 1) # Now you train the model for epoch in range(start_epoch + 1, config.num_epochs + 1): data_time_start= time.time() data_time_all = 0 for step, batch in enumerate(train_dataloader): data_time_all += time.time() - data_time_start if load_vae_feat: z = batch[0] else: with torch.no_grad(): with torch.cuda.amp.autocast(enabled=config.mixed_precision == 'fp16'): posterior = vae.encode(batch[0]).latent_dist if config.sample_posterior: z = posterior.sample() else: z = posterior.mode() latents = z * config.scale_factor y = batch[1] y_mask = batch[2] data_info = batch[3] # Sample a random timestep for each image grad_norm = None with accelerator.accumulate(model): # Predict the noise residual optimizer.zero_grad() # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image t_n ~ U[0, N - k - 1] without bias. topk = config.train_sampling_steps // config.num_ddim_timesteps index = torch.randint(0, config.num_ddim_timesteps, (bsz,), device=latents.device).long() start_timesteps = solver.ddim_timesteps[index] timesteps = start_timesteps - topk timesteps = torch.where(timesteps < 0, torch.zeros_like(timesteps), timesteps) # Get boundary scalings for start_timesteps and (end) timesteps. c_skip_start, c_out_start = scalings_for_boundary_conditions(start_timesteps) c_skip_start, c_out_start = [append_dims(x, latents.ndim) for x in [c_skip_start, c_out_start]] c_skip, c_out = scalings_for_boundary_conditions(timesteps) c_skip, c_out = [append_dims(x, latents.ndim) for x in [c_skip, c_out]] # Sample a random guidance scale w from U[w_min, w_max] and embed it # w = (config.w_max - config.w_min) * torch.rand((bsz,)) + config.w_min w = config.cfg_scale * torch.ones((bsz,)) w = w.reshape(bsz, 1, 1, 1) w = w.to(device=latents.device, dtype=latents.dtype) # Get online LCM prediction on z_{t_{n + k}}, w, c, t_{n + k} _, pred_x_0, noisy_model_input = train_diffusion.training_losses(model, latents, start_timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), noise=noise) model_pred = c_skip_start * noisy_model_input + c_out_start * pred_x_0 # Use the ODE solver to predict the kth step in the augmented PF-ODE trajectory after # noisy_latents with both the conditioning embedding c and unconditional embedding 0 # Get teacher model prediction on noisy_latents and conditional embedding with torch.no_grad(): with torch.autocast("cuda"): cond_teacher_output, cond_pred_x0, _ = train_diffusion.training_losses(model_teacher, latents, start_timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), noise=noise) # Get teacher model prediction on noisy_latents and unconditional embedding uncond_teacher_output, uncond_pred_x0, _ = train_diffusion.training_losses(model_teacher, latents, start_timesteps, model_kwargs=dict(y=uncond_prompt_embeds, mask=y_mask, data_info=data_info), noise=noise) # Perform "CFG" to get x_prev estimate (using the LCM paper's CFG formulation) pred_x0 = cond_pred_x0 + w * (cond_pred_x0 - uncond_pred_x0) pred_noise = cond_teacher_output + w * (cond_teacher_output - uncond_teacher_output) x_prev = solver.ddim_step(pred_x0, pred_noise, index) # Get target LCM prediction on x_prev, w, c, t_n with torch.no_grad(): with torch.autocast("cuda", enabled=True): _, pred_x_0, _ = train_diffusion.training_losses(model_ema, x_prev.float(), timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info), skip_noise=True) target = c_skip * x_prev + c_out * pred_x_0 # Calculate loss if config.loss_type == "l2": loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") elif config.loss_type == "huber": loss = torch.mean(torch.sqrt((model_pred.float() - target.float()) ** 2 + config.huber_c**2) - config.huber_c) # Backpropagation on the online student model (`model`) accelerator.backward(loss) if accelerator.sync_gradients: grad_norm = accelerator.clip_grad_norm_(model.parameters(), config.gradient_clip) optimizer.step() lr_scheduler.step() optimizer.zero_grad(set_to_none=True) if accelerator.sync_gradients: ema_update(model_ema, model, config.ema_decay) lr = lr_scheduler.get_last_lr()[0] logs = {"loss": accelerator.gather(loss).mean().item()} if grad_norm is not None: logs.update(grad_norm=accelerator.gather(grad_norm).mean().item()) log_buffer.update(logs) if (step + 1) % config.log_interval == 0 or (step + 1) == 1: t = (time.time() - last_tic) / config.log_interval t_d = data_time_all / config.log_interval avg_time = (time.time() - time_start) / (global_step + 1) eta = str(datetime.timedelta(seconds=int(avg_time * (total_steps - start_step - global_step - 1)))) eta_epoch = str(datetime.timedelta(seconds=int(avg_time * (len(train_dataloader) - step - 1)))) # avg_loss = sum(loss_buffer) / len(loss_buffer) log_buffer.average() info = f"Step/Epoch [{(epoch-1)*len(train_dataloader)+step+1}/{epoch}][{step + 1}/{len(train_dataloader)}]:total_eta: {eta}, " \ f"epoch_eta:{eta_epoch}, time_all:{t:.3f}, time_data:{t_d:.3f}, lr:{lr:.3e}, s:({data_info['resolution'][0][0].item()}, {data_info['resolution'][0][1].item()}), " info += ', '.join([f"{k}:{v:.4f}" for k, v in log_buffer.output.items()]) logger.info(info) last_tic = time.time() log_buffer.clear() data_time_all = 0 logs.update(lr=lr) accelerator.log(logs, step=global_step + start_step) global_step += 1 data_time_start= time.time() synchronize() torch.cuda.empty_cache() if accelerator.is_main_process: # log_validation(model_ema, step, model.device) if ((epoch - 1) * len(train_dataloader) + step + 1) % config.save_model_steps == 0: os.umask(0o000) save_checkpoint(os.path.join(config.work_dir, 'checkpoints'), epoch=epoch, step=(epoch - 1) * len(train_dataloader) + step + 1, model=accelerator.unwrap_model(model), model_ema=accelerator.unwrap_model(model_ema), optimizer=optimizer, lr_scheduler=lr_scheduler ) synchronize() synchronize() if accelerator.is_main_process: if epoch % config.save_model_epochs == 0 or epoch == config.num_epochs: os.umask(0o000) save_checkpoint(os.path.join(config.work_dir, 'checkpoints'), epoch=epoch, step=(epoch - 1) * len(train_dataloader) + step + 1, model=accelerator.unwrap_model(model), model_ema=accelerator.unwrap_model(model_ema), optimizer=optimizer, lr_scheduler=lr_scheduler ) synchronize() def parse_args(): parser = argparse.ArgumentParser(description="Process some integers.") parser.add_argument("config", type=str, help="config") parser.add_argument("--cloud", action='store_true', default=False, help="cloud or local machine") parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument('--resume-from', help='the dir to resume the training') parser.add_argument('--load-from', default=None, help='the dir to load a ckpt for training') parser.add_argument('--local-rank', type=int, default=-1) parser.add_argument('--local_rank', type=int, default=-1) parser.add_argument('--debug', action='store_true') args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() config = read_config(args.config) if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None config.work_dir = args.work_dir if args.cloud: config.data_root = '/data/data' if args.resume_from is not None: config.load_from = None config.resume_from = dict( checkpoint=args.resume_from, load_ema=False, resume_optimizer=True, resume_lr_scheduler=True) if args.debug: config.log_interval = 1 config.train_batch_size = 11 config.valid_num = 100 config.load_from = None os.umask(0o000) os.makedirs(config.work_dir, exist_ok=True) init_handler = InitProcessGroupKwargs() init_handler.timeout = datetime.timedelta(seconds=5400) # change timeout to avoid a strange NCCL bug # Initialize accelerator and tensorboard logging if config.use_fsdp: init_train = 'FSDP' set_fsdp_env() fsdp_plugin = FullyShardedDataParallelPlugin(state_dict_config=FullStateDictConfig(offload_to_cpu=False, rank0_only=False),) else: init_train = 'DDP' fsdp_plugin = None even_batches = True if config.multi_scale: even_batches=False, accelerator = Accelerator( mixed_precision=config.mixed_precision, gradient_accumulation_steps=config.gradient_accumulation_steps, log_with="tensorboard", project_dir=os.path.join(config.work_dir, "logs"), fsdp_plugin=fsdp_plugin, even_batches=even_batches, kwargs_handlers=[init_handler] ) logger = get_root_logger(os.path.join(config.work_dir, 'train_log.log')) config.seed = init_random_seed(config.get('seed', None)) set_random_seed(config.seed) if accelerator.is_main_process: config.dump(os.path.join(config.work_dir, 'config.py')) logger.info(f"Config: \n{config.pretty_text}") logger.info(f"World_size: {get_world_size()}, seed: {config.seed}") logger.info(f"Initializing: {init_train} for training") image_size = config.image_size # @param [256, 512] latent_size = int(image_size) // 8 pred_sigma = getattr(config, 'pred_sigma', True) learn_sigma = getattr(config, 'learn_sigma', True) and pred_sigma model_kwargs={"window_block_indexes": config.window_block_indexes, "window_size": config.window_size, "use_rel_pos": config.use_rel_pos, "lewei_scale": config.lewei_scale, 'config':config, 'model_max_length': config.model_max_length} # build models train_diffusion = IDDPM(str(config.train_sampling_steps), learn_sigma=learn_sigma, pred_sigma=pred_sigma, snr=config.snr_loss, return_startx=True) model = build_model(config.model, config.grad_checkpointing, config.get('fp32_attention', False), input_size=latent_size, learn_sigma=learn_sigma, pred_sigma=pred_sigma, **model_kwargs).train() logger.info(f"{model.__class__.__name__} Model Parameters: {sum(p.numel() for p in model.parameters()):,}") if config.load_from is not None: if args.load_from is not None: config.load_from = args.load_from missing, unexpected = load_checkpoint(config.load_from, model, load_ema=config.get('load_ema', False)) logger.warning(f'Missing keys: {missing}') logger.warning(f'Unexpected keys: {unexpected}') model_ema = deepcopy(model).eval() model_teacher = deepcopy(model).eval() if not config.data.load_vae_feat: vae = AutoencoderKL.from_pretrained(config.vae_pretrained).cuda() # prepare for FSDP clip grad norm calculation if accelerator.distributed_type == DistributedType.FSDP: for m in accelerator._models: m.clip_grad_norm_ = types.MethodType(clip_grad_norm_, m) # build dataloader set_data_root(config.data_root) dataset = build_dataset(config.data, resolution=image_size, aspect_ratio_type=config.aspect_ratio_type) if config.multi_scale:
batch_sampler = AspectRatioBatchSampler(sampler=RandomSampler(dataset), dataset=dataset,
18
2023-10-12 14:16:33+00:00
24k
NVlabs/EmerNeRF
train_emernerf.py
[ { "identifier": "metrics", "path": "datasets/metrics.py", "snippet": "def compute_valid_depth_rmse(prediction: Tensor, target: Tensor) -> float:\ndef compute_psnr(prediction: Tensor, target: Tensor) -> float:\ndef compute_ssim(\n prediction: Union[Tensor, np.ndarray], target: Union[Tensor, np.ndarray]\n) -> float:\ndef compute_scene_flow_metrics(pred: Tensor, labels: Tensor):\ndef knn_predict(\n queries: Tensor,\n memory_bank: Tensor,\n memory_labels: Tensor,\n n_classes: int,\n knn_k: int = 1,\n knn_t: float = 0.1,\n) -> Tensor:\ndef knn_predict(\n queries: Tensor,\n memory_bank: Tensor,\n memory_labels: Tensor,\n n_classes: int,\n knn_k: int = 1,\n knn_t: float = 0.1,\n similarity: str = \"cosine\",\n) -> Tensor:\ndef collect_centroids(\n train_indices: List[int],\n dataset, # a WaymoDataset object\n model: RadianceField,\n device: torch.device,\n):\ndef eval_few_shot_occ(\n test_indices: List[int],\n dataset, # a WaymoDataset object\n model: RadianceField,\n device: torch.device,\n centroids_bank: Tensor,\n label_bank: Tensor,\n):\n EPE3D = torch.mean(l2_norm).item() # Mean absolute distance error" }, { "identifier": "SceneDataset", "path": "datasets/base/scene_dataset.py", "snippet": "class SceneDataset(abc.ABC):\n \"\"\"\n Base class for scene dataset.\n \"\"\"\n\n data_cfg: OmegaConf = None\n pixel_source: ScenePixelSource = None\n lidar_source: SceneLidarSource = None\n # training and testing indices are indices into the full dataset\n # train_indices are img indices, so the length is num_cams * num_timesteps\n train_indices: List[int] = None\n test_indices: List[int] = None\n # train_timesteps are timesteps, so the length is num_timesteps (len(unique_timesteps))\n train_timesteps: Tensor = None\n test_timesteps: Tensor = None\n\n # dataset wrappers\n # full: includes all data\n full_pixel_set: SplitWrapper = None\n full_lidar_set: SplitWrapper = None\n # train: includes only training data\n train_pixel_set: SplitWrapper = None\n train_lidar_set: SplitWrapper = None\n # test: includes only testing data\n test_pixel_set: SplitWrapper = None\n test_lidar_set: SplitWrapper = None\n\n def __init__(\n self,\n data_config: OmegaConf,\n ):\n super().__init__()\n self.data_cfg = data_config\n\n @abc.abstractmethod\n def build_data_source(self):\n \"\"\"\n Create the data source for the dataset.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def build_split_wrapper(self):\n \"\"\"\n Makes each data source as a Pytorch Dataset.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def split_train_test(self):\n raise NotImplementedError\n\n def get_aabb(self) -> Tensor:\n if self.lidar_source is not None:\n aabb = self.lidar_source.get_aabb()\n else:\n aabb = self.pixel_source.get_aabb()\n return aabb\n\n @property\n def num_cams(self) -> int:\n return self.pixel_source.num_cams\n\n @property\n def scene_idx(self) -> int:\n return self.data_cfg.scene_idx\n\n @property\n def num_img_timesteps(self) -> int:\n return self.pixel_source.num_timesteps\n\n @property\n def num_lidar_timesteps(self) -> int:\n if self.lidar_source is None:\n logger.warning(\"No lidar source, returning num_img_timesteps\")\n return self.num_img_timesteps\n return self.lidar_source.num_timesteps\n\n @property\n def num_train_timesteps(self) -> int:\n return len(self.train_timesteps)\n\n @property\n def num_test_timesteps(self) -> int:\n return len(self.test_timesteps)\n\n @property\n def unique_normalized_training_timestamps(self) -> Tensor:\n return self.pixel_source.unique_normalized_timestamps[self.train_timesteps]\n\n @property\n def device(self):\n return self.data_cfg.preload_device" }, { "identifier": "DensityField", "path": "radiance_fields/radiance_field.py", "snippet": "class DensityField(nn.Module):\n def __init__(\n self,\n xyz_encoder: HashEncoder,\n aabb: Union[Tensor, List[float]] = [[-1.0, -1.0, -1.0, 1.0, 1.0, 1.0]],\n num_dims: int = 3,\n density_activation: Callable = lambda x: trunc_exp(x - 1),\n unbounded: bool = False,\n base_mlp_layer_width: int = 64,\n ) -> None:\n super().__init__()\n if not isinstance(aabb, Tensor):\n aabb = torch.tensor(aabb, dtype=torch.float32)\n self.register_buffer(\"aabb\", aabb)\n self.num_dims = num_dims\n self.density_activation = density_activation\n self.unbounded = unbounded\n self.xyz_encoder = xyz_encoder\n\n # density head\n self.base_mlp = nn.Sequential(\n nn.Linear(self.xyz_encoder.n_output_dims, base_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(base_mlp_layer_width, 1),\n )\n\n @property\n def device(self) -> torch.device:\n return self.aabb.device\n\n def set_aabb(self, aabb: Union[Tensor, List[float]]) -> None:\n if not isinstance(aabb, Tensor):\n aabb = torch.tensor(aabb, dtype=torch.float32)\n logger.info(f\"Set propnet aabb from {self.aabb} to {aabb}\")\n self.aabb.copy_(aabb)\n self.aabb = self.aabb.to(self.device)\n\n def forward(\n self, positions: Tensor, data_dict: Dict[str, Tensor] = None\n ) -> Dict[str, Tensor]:\n if self.unbounded:\n # use infinte norm to contract the positions for cuboid aabb\n positions = contract(positions, self.aabb, ord=float(\"inf\"))\n else:\n aabb_min, aabb_max = torch.split(self.aabb, 3, dim=-1)\n positions = (positions - aabb_min) / (aabb_max - aabb_min)\n selector = ((positions > 0.0) & (positions < 1.0)).all(dim=-1).to(positions)\n positions = positions * selector.unsqueeze(-1)\n xyz_encoding = self.xyz_encoder(positions.view(-1, self.num_dims))\n density_before_activation = self.base_mlp(xyz_encoding).view(\n list(positions.shape[:-1]) + [-1]\n )\n density = self.density_activation(density_before_activation)\n return {\"density\": density}" }, { "identifier": "RadianceField", "path": "radiance_fields/radiance_field.py", "snippet": "class RadianceField(nn.Module):\n def __init__(\n self,\n xyz_encoder: HashEncoder,\n dynamic_xyz_encoder: Optional[HashEncoder] = None,\n flow_xyz_encoder: Optional[HashEncoder] = None,\n aabb: Union[Tensor, List[float]] = [-1, -1, -1, 1, 1, 1],\n num_dims: int = 3,\n density_activation: Callable = lambda x: trunc_exp(x - 1),\n unbounded: bool = True,\n geometry_feature_dim: int = 15,\n base_mlp_layer_width: int = 64,\n head_mlp_layer_width: int = 64,\n enable_cam_embedding: bool = False,\n enable_img_embedding: bool = False,\n num_cams: int = 3,\n appearance_embedding_dim: int = 16,\n semantic_feature_dim: int = 64,\n feature_mlp_layer_width: int = 256,\n feature_embedding_dim: int = 768,\n enable_sky_head: bool = False,\n enable_shadow_head: bool = False,\n enable_feature_head: bool = False,\n num_train_timesteps: int = 0,\n interpolate_xyz_encoding: bool = False,\n enable_learnable_pe: bool = True,\n enable_temporal_interpolation: bool = False,\n ) -> None:\n super().__init__()\n # scene properties\n if not isinstance(aabb, Tensor):\n aabb = torch.tensor(aabb, dtype=torch.float32)\n self.register_buffer(\"aabb\", aabb)\n self.unbounded = unbounded\n self.num_cams = num_cams\n self.num_dims = num_dims\n self.density_activation = density_activation\n\n # appearance embedding\n self.enable_cam_embedding = enable_cam_embedding\n self.enable_img_embedding = enable_img_embedding\n self.appearance_embedding_dim = appearance_embedding_dim\n\n self.geometry_feature_dim = geometry_feature_dim\n # add semantic feature dim if feature head is enabled\n if not enable_feature_head:\n semantic_feature_dim = 0\n self.semantic_feature_dim = semantic_feature_dim\n\n # note: we use very conservative default values for mlps\n # usually you want to use larger ones\n\n # ======== Static Field ======== #\n self.xyz_encoder = xyz_encoder\n self.base_mlp = nn.Sequential(\n nn.Linear(self.xyz_encoder.n_output_dims, base_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(\n base_mlp_layer_width, geometry_feature_dim + semantic_feature_dim\n ),\n )\n\n # ======== Dynamic Field ======== #\n self.interpolate_xyz_encoding = interpolate_xyz_encoding\n self.dynamic_xyz_encoder = dynamic_xyz_encoder\n self.enable_temporal_interpolation = enable_temporal_interpolation\n if self.dynamic_xyz_encoder is not None:\n # for temporal interpolation\n self.register_buffer(\"training_timesteps\", torch.zeros(num_train_timesteps))\n self.dynamic_base_mlp = nn.Sequential(\n nn.Linear(self.dynamic_xyz_encoder.n_output_dims, base_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(\n base_mlp_layer_width,\n geometry_feature_dim + semantic_feature_dim,\n ),\n )\n\n # ======== Flow Field ======== #\n self.flow_xyz_encoder = flow_xyz_encoder\n if self.flow_xyz_encoder is not None:\n self.flow_mlp = nn.Sequential(\n nn.Linear(\n self.flow_xyz_encoder.n_output_dims,\n base_mlp_layer_width,\n ),\n nn.ReLU(),\n nn.Linear(base_mlp_layer_width, base_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(base_mlp_layer_width, 6), # 3 for forward, 3 for backward\n # no activation function for flow\n )\n\n # appearance embedding\n if self.enable_cam_embedding:\n # per-camera embedding\n self.appearance_embedding = nn.Embedding(num_cams, appearance_embedding_dim)\n elif self.enable_img_embedding:\n # per-image embedding\n self.appearance_embedding = nn.Embedding(\n num_train_timesteps * num_cams, appearance_embedding_dim\n )\n else:\n self.appearance_embedding = None\n\n # direction encoding\n self.direction_encoding = SinusoidalEncoder(\n n_input_dims=3, min_deg=0, max_deg=4\n )\n\n # ======== Color Head ======== #\n self.rgb_head = MLP(\n in_dims=geometry_feature_dim\n + self.direction_encoding.n_output_dims\n + (\n appearance_embedding_dim\n if self.enable_cam_embedding or self.enable_img_embedding\n else 0 # 2 or 0?\n ),\n out_dims=3,\n num_layers=3,\n hidden_dims=head_mlp_layer_width,\n skip_connections=[1],\n )\n\n # ======== Shadow Head ======== #\n self.enable_shadow_head = enable_shadow_head\n if self.enable_shadow_head:\n self.shadow_head = nn.Sequential(\n nn.Linear(geometry_feature_dim, base_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(base_mlp_layer_width, 1),\n nn.Sigmoid(),\n )\n\n # ======== Sky Head ======== #\n self.enable_sky_head = enable_sky_head\n if self.enable_sky_head:\n self.sky_head = MLP(\n in_dims=self.direction_encoding.n_output_dims\n + (\n appearance_embedding_dim\n if self.enable_cam_embedding or self.enable_img_embedding\n else 0\n ),\n out_dims=3,\n num_layers=3,\n hidden_dims=head_mlp_layer_width,\n skip_connections=[1],\n )\n if enable_feature_head:\n # feature sky head\n self.dino_sky_head = nn.Sequential(\n # TODO: remove appearance embedding from dino sky head\n nn.Linear(\n self.direction_encoding.n_output_dims\n + (\n appearance_embedding_dim\n if self.enable_cam_embedding or self.enable_img_embedding\n else 0\n ),\n feature_mlp_layer_width,\n ),\n nn.ReLU(),\n nn.Linear(feature_mlp_layer_width, feature_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(feature_mlp_layer_width, feature_embedding_dim),\n )\n\n # ======== Feature Head ======== #\n self.enable_feature_head = enable_feature_head\n if self.enable_feature_head:\n self.dino_head = nn.Sequential(\n nn.Linear(semantic_feature_dim, feature_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(feature_mlp_layer_width, feature_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(feature_mlp_layer_width, feature_embedding_dim),\n )\n # placeholders for visualization, will be registered when available\n self.register_buffer(\n \"feats_reduction_mat\", torch.zeros(feature_embedding_dim, 3)\n )\n self.register_buffer(\"feat_color_min\", torch.zeros(3, dtype=torch.float32))\n self.register_buffer(\"feat_color_max\", torch.ones(3, dtype=torch.float32))\n\n # positional embedding (PE) decomposition\n self.enable_learnable_pe = enable_learnable_pe\n if self.enable_learnable_pe:\n # globally-shared low-resolution learnable PE map\n self.learnable_pe_map = nn.Parameter(\n 0.05 * torch.randn(1, feature_embedding_dim // 2, 80, 120),\n requires_grad=True,\n )\n # a PE head to decode PE features\n self.pe_head = nn.Sequential(\n nn.Linear(feature_embedding_dim // 2, feature_embedding_dim),\n )\n\n def register_normalized_training_timesteps(\n self, normalized_timesteps: Tensor, time_diff: float = None\n ) -> None:\n \"\"\"\n register normalized timesteps for temporal interpolation\n\n Args:\n normalized_timesteps (Tensor): normalized timesteps in [0, 1]\n time_diff (float, optional): time difference between two consecutive timesteps. Defaults to None.\n \"\"\"\n if self.dynamic_xyz_encoder is not None:\n # register timesteps for temporal interpolation\n self.training_timesteps.copy_(normalized_timesteps)\n self.training_timesteps = self.training_timesteps.to(self.device)\n if time_diff is not None:\n # use the provided time difference if available\n self.time_diff = time_diff\n else:\n if len(self.training_timesteps) > 1:\n # otherwise, compute the time difference from the provided timesteps\n # it's important to make sure the provided timesteps are consecutive\n self.time_diff = (\n self.training_timesteps[1] - self.training_timesteps[0]\n )\n else:\n self.time_diff = 0\n\n def set_aabb(self, aabb: Union[Tensor, List[float]]) -> None:\n \"\"\"\n register aabb for scene space\n \"\"\"\n if not isinstance(aabb, Tensor):\n aabb = torch.tensor(aabb, dtype=torch.float32)\n logger.info(f\"Set aabb from {self.aabb} to {aabb}\")\n self.aabb.copy_(aabb)\n self.aabb = self.aabb.to(self.device)\n\n def register_feats_reduction_mat(\n self,\n feats_reduction_mat: Tensor,\n feat_color_min: Tensor,\n feat_color_max: Tensor,\n ) -> None:\n \"\"\"\n A placeholder for registering the PCA reduction matrix and min/max values for visualization.\n You may not want to compute PCA reduction matrix every time from the dataset.\n \"\"\"\n # for visualization\n self.feats_reduction_mat.copy_(feats_reduction_mat)\n self.feat_color_min.copy_(feat_color_min)\n self.feat_color_max.copy_(feat_color_max)\n self.feats_reduction_mat = self.feats_reduction_mat.to(self.device)\n self.feat_color_min = self.feat_color_min.to(self.device)\n self.feat_color_max = self.feat_color_max.to(self.device)\n\n @property\n def device(self) -> torch.device:\n return self.aabb.device\n\n def contract_points(\n self,\n positions: Tensor,\n ) -> Tensor:\n \"\"\"\n contract [-inf, inf] points to the range [0, 1] for hash encoding\n\n Returns:\n normed_positions: [..., 3] in [0, 1]\n \"\"\"\n if self.unbounded:\n # use infinte norm to contract the positions for cuboid aabb\n normed_positions = contract(positions, self.aabb, ord=float(\"inf\"))\n else:\n aabb_min, aabb_max = torch.split(self.aabb, 3, dim=-1)\n normed_positions = (positions - aabb_min) / (aabb_max - aabb_min)\n selector = (\n ((normed_positions > 0.0) & (normed_positions < 1.0))\n .all(dim=-1)\n .to(positions)\n )\n normed_positions = normed_positions * selector.unsqueeze(-1)\n return normed_positions\n\n def forward_static_hash(\n self,\n positions: Tensor,\n ) -> Tensor:\n \"\"\"\n forward pass for static hash encoding\n\n Returns:\n encoded_features: [..., geometry_feature_dim + (semantic_feature_dim)]\n normed_positions: [..., 3] in [0, 1]\n \"\"\"\n normed_positions = self.contract_points(positions)\n xyz_encoding = self.xyz_encoder(normed_positions.view(-1, self.num_dims))\n encoded_features = self.base_mlp(xyz_encoding).view(\n list(normed_positions.shape[:-1]) + [-1]\n )\n return encoded_features, normed_positions\n\n def forward_dynamic_hash(\n self,\n normed_positions: Tensor,\n normed_timestamps: Tensor,\n return_hash_encodings: bool = False,\n ) -> Union[Tuple[Tensor, Tensor], Tensor]:\n \"\"\"\n forward pass for dynamic hash encoding\n\n Returns:\n encoded_dynamic_feats: [..., geometry_feature_dim + (semantic_feature_dim)]\n dynamic_xyz_encoding: [..., n_output_dims] (optional)\n \"\"\"\n if normed_timestamps.shape[-1] != 1:\n normed_timestamps = normed_timestamps.unsqueeze(-1)\n # To be fixed.\n # if self.training or not self.enable_temporal_interpolation:\n if True:\n temporal_positions = torch.cat(\n [normed_positions, normed_timestamps], dim=-1\n )\n dynamic_xyz_encoding = self.dynamic_xyz_encoder(\n temporal_positions.view(-1, self.num_dims + 1)\n ).view(list(temporal_positions.shape[:-1]) + [-1])\n encoded_dynamic_feats = self.dynamic_base_mlp(dynamic_xyz_encoding)\n else:\n encoded_dynamic_feats = temporal_interpolation(\n normed_timestamps,\n self.training_timesteps,\n normed_positions,\n self.dynamic_xyz_encoder,\n self.dynamic_base_mlp,\n interpolate_xyz_encoding=self.interpolate_xyz_encoding,\n )\n if return_hash_encodings:\n return encoded_dynamic_feats, dynamic_xyz_encoding\n else:\n return encoded_dynamic_feats\n\n def forward_flow_hash(\n self,\n normed_positions: Tensor,\n normed_timestamps: Tensor,\n ) -> Tuple[Tensor, Tensor]:\n \"\"\"\n forward pass for flow hash encoding\n\n Returns:\n flow: [..., 6] (forward_flow, backward_flow)\n \"\"\"\n if normed_timestamps.shape[-1] != 1:\n normed_timestamps = normed_timestamps.unsqueeze(-1)\n if self.training or not self.enable_temporal_interpolation:\n temporal_positions = torch.cat(\n [normed_positions, normed_timestamps], dim=-1\n )\n flow_xyz_encoding = self.flow_xyz_encoder(\n temporal_positions.view(-1, self.num_dims + 1)\n ).view(list(temporal_positions.shape[:-1]) + [-1])\n flow = self.flow_mlp(flow_xyz_encoding)\n else:\n flow = temporal_interpolation(\n normed_timestamps,\n self.training_timesteps,\n normed_positions,\n self.flow_xyz_encoder,\n self.flow_mlp,\n interpolate_xyz_encoding=True,\n )\n return flow\n\n def forward(\n self,\n positions: Tensor,\n directions: Tensor = None,\n data_dict: Dict[str, Tensor] = {},\n return_density_only: bool = False,\n combine_static_dynamic: bool = False,\n query_feature_head: bool = True,\n query_pe_head: bool = True,\n ) -> Dict[str, Tensor]:\n \"\"\"\n Args:\n positions: [..., 3]\n directions: [..., 3]\n data_dict: a dictionary containing additional data\n return_density_only: if True, only return density without querying other heads\n combine_static_dynamic: if True, combine static and dynamic predictions based on static and dynamic density\n in addition to returning separate results for static and dynamic fields\n query_feature_head: if True, query feature head\n query_pe_head: if True, query PE head. Disable this if we want to directly query 3D features.\n Returns:\n results_dict: a dictionary containing everything\n \"\"\"\n results_dict = {}\n # forward static branch\n encoded_features, normed_positions = self.forward_static_hash(positions)\n geo_feats, semantic_feats = torch.split(\n encoded_features,\n [self.geometry_feature_dim, self.semantic_feature_dim],\n dim=-1,\n )\n static_density = self.density_activation(geo_feats[..., 0])\n\n has_timestamps = (\n \"normed_timestamps\" in data_dict or \"lidar_normed_timestamps\" in data_dict\n )\n if self.dynamic_xyz_encoder is not None and has_timestamps:\n # forward dynamic branch\n if \"normed_timestamps\" in data_dict:\n normed_timestamps = data_dict[\"normed_timestamps\"]\n elif \"lidar_normed_timestamps\" in data_dict:\n # we use `lidar_` prefix as an identifier to skip querying other heads\n normed_timestamps = data_dict[\"lidar_normed_timestamps\"]\n dynamic_feats, dynamic_hash_encodings = self.forward_dynamic_hash(\n normed_positions, normed_timestamps, return_hash_encodings=True\n )\n if self.flow_xyz_encoder is not None:\n flow = self.forward_flow_hash(normed_positions, normed_timestamps)\n forward_flow, backward_flow = flow[..., :3], flow[..., 3:]\n results_dict[\"forward_flow\"] = forward_flow\n results_dict[\"backward_flow\"] = backward_flow\n temporal_aggregation_results = self.temporal_aggregation(\n positions,\n normed_timestamps,\n forward_flow,\n backward_flow,\n dynamic_feats,\n )\n # overwrite dynamic feats using temporal aggregation results\n dynamic_feats = temporal_aggregation_results[\"dynamic_feats\"]\n # to be studied\n temporal_aggregation_results[\n \"current_dynamic_hash_encodings\"\n ] = dynamic_hash_encodings\n results_dict.update(temporal_aggregation_results)\n (dynamic_geo_feats, dynamic_semantic_feats,) = torch.split(\n dynamic_feats,\n [self.geometry_feature_dim, self.semantic_feature_dim],\n dim=-1,\n )\n dynamic_density = self.density_activation(dynamic_geo_feats[..., 0])\n # blend static and dynamic density to get the final density\n density = static_density + dynamic_density\n results_dict.update(\n {\n \"density\": density,\n \"static_density\": static_density,\n \"dynamic_density\": dynamic_density,\n }\n )\n if return_density_only:\n # skip querying other heads\n return results_dict\n\n if directions is not None:\n rgb_results = self.query_rgb(\n directions, geo_feats, dynamic_geo_feats, data_dict=data_dict\n )\n results_dict[\"dynamic_rgb\"] = rgb_results[\"dynamic_rgb\"]\n results_dict[\"static_rgb\"] = rgb_results[\"rgb\"]\n if combine_static_dynamic:\n static_ratio = static_density / (density + 1e-6)\n dynamic_ratio = dynamic_density / (density + 1e-6)\n results_dict[\"rgb\"] = (\n static_ratio[..., None] * results_dict[\"static_rgb\"]\n + dynamic_ratio[..., None] * results_dict[\"dynamic_rgb\"]\n )\n if self.enable_shadow_head:\n shadow_ratio = self.shadow_head(dynamic_geo_feats)\n results_dict[\"shadow_ratio\"] = shadow_ratio\n if combine_static_dynamic and \"rgb\" in results_dict:\n results_dict[\"rgb\"] = (\n static_ratio[..., None]\n * results_dict[\"rgb\"]\n * (1 - shadow_ratio)\n + dynamic_ratio[..., None] * results_dict[\"dynamic_rgb\"]\n )\n else:\n # if no dynamic branch, use static density\n results_dict[\"density\"] = static_density\n if return_density_only:\n # skip querying other heads\n return results_dict\n if directions is not None:\n rgb_results = self.query_rgb(directions, geo_feats, data_dict=data_dict)\n results_dict[\"rgb\"] = rgb_results[\"rgb\"]\n\n if self.enable_feature_head and query_feature_head:\n if self.enable_learnable_pe and query_pe_head:\n learnable_pe_map = (\n F.grid_sample(\n self.learnable_pe_map,\n # assume pixel coords have been normalize to [-1, 1]\n data_dict[\"pixel_coords\"].reshape(1, 1, -1, 2) * 2 - 1,\n align_corners=False, # didn't test with True\n mode=\"bilinear\", # didn't test with other modes\n )\n .squeeze(2)\n .squeeze(0)\n .permute(1, 0)\n )\n dino_pe = self.pe_head(learnable_pe_map)\n results_dict[\"dino_pe\"] = dino_pe\n dino_feats = self.dino_head(semantic_feats)\n\n if self.dynamic_xyz_encoder is not None and has_timestamps:\n dynamic_dino_feats = self.dino_head(dynamic_semantic_feats)\n results_dict[\"static_dino_feat\"] = dino_feats\n results_dict[\"dynamic_dino_feat\"] = dynamic_dino_feats\n if combine_static_dynamic:\n static_ratio = static_density / (density + 1e-6)\n dynamic_ratio = dynamic_density / (density + 1e-6)\n results_dict[\"dino_feat\"] = (\n static_ratio[..., None] * dino_feats\n + dynamic_ratio[..., None] * dynamic_dino_feats\n )\n else:\n results_dict[\"dino_feat\"] = dino_feats\n\n # query sky if not in lidar mode\n if (\n self.enable_sky_head\n and \"lidar_origin\" not in data_dict\n and directions is not None\n ):\n directions = directions[:, 0]\n reduced_data_dict = {k: v[:, 0] for k, v in data_dict.items()}\n sky_results = self.query_sky(directions, data_dict=reduced_data_dict)\n results_dict.update(sky_results)\n\n return results_dict\n\n def temporal_aggregation(\n self,\n positions: Tensor, # current world coordinates\n normed_timestamps: Tensor, # current normalized timestamps\n forward_flow: Tensor,\n backward_flow: Tensor,\n dynamic_feats: Tensor,\n ) -> Tensor:\n \"\"\"\n temporal aggregation for dynamic features\n Eq. (8) in the emernerf paper\n \"\"\"\n if normed_timestamps.shape[-1] != 1:\n normed_timestamps = normed_timestamps.unsqueeze(-1)\n if self.training:\n noise = torch.rand_like(forward_flow)[..., 0:1]\n else:\n noise = torch.ones_like(forward_flow)[..., 0:1]\n # forward and backward warped positions\n forward_warped_positions = self.contract_points(\n positions + forward_flow * noise\n )\n backward_warped_positions = self.contract_points(\n positions + backward_flow * noise\n )\n # forward and backward warped timestamps\n forward_warped_time = torch.clamp(\n normed_timestamps + self.time_diff * noise, 0, 1.0\n )\n backward_warped_time = torch.clamp(\n normed_timestamps - self.time_diff * noise, 0, 1.0\n )\n (\n forward_dynamic_feats,\n forward_dynamic_hash_encodings,\n ) = self.forward_dynamic_hash(\n forward_warped_positions,\n forward_warped_time,\n return_hash_encodings=True,\n )\n (\n backward_dynamic_feats,\n backward_dynamic_hash_encodings,\n ) = self.forward_dynamic_hash(\n backward_warped_positions,\n backward_warped_time,\n return_hash_encodings=True,\n )\n forward_pred_flow = self.forward_flow_hash(\n forward_warped_positions,\n forward_warped_time,\n )\n backward_pred_flow = self.forward_flow_hash(\n backward_warped_positions,\n backward_warped_time,\n )\n # simple weighted sum\n aggregated_dynamic_feats = (\n dynamic_feats + 0.5 * forward_dynamic_feats + 0.5 * backward_dynamic_feats\n ) / 2.0\n return {\n \"dynamic_feats\": aggregated_dynamic_feats,\n \"forward_pred_backward_flow\": forward_pred_flow[..., 3:],\n \"backward_pred_forward_flow\": backward_pred_flow[..., :3],\n # to be studied\n \"forward_dynamic_hash_encodings\": forward_dynamic_hash_encodings,\n \"backward_dynamic_hash_encodings\": backward_dynamic_hash_encodings,\n }\n\n def query_rgb(\n self,\n directions: Tensor,\n geo_feats: Tensor,\n dynamic_geo_feats: Tensor = None,\n data_dict: Dict[str, Tensor] = None,\n ) -> Tensor:\n directions = (directions + 1.0) / 2.0 # do we need this?\n h = self.direction_encoding(directions.reshape(-1, directions.shape[-1])).view(\n *directions.shape[:-1], -1\n )\n if self.enable_cam_embedding or self.enable_img_embedding:\n if \"cam_idx\" in data_dict and self.enable_cam_embedding:\n appearance_embedding = self.appearance_embedding(data_dict[\"cam_idx\"])\n elif \"img_idx\" in data_dict and self.enable_img_embedding:\n appearance_embedding = self.appearance_embedding(data_dict[\"img_idx\"])\n else:\n # use mean appearance embedding\n # print(\"using mean appearance embedding\")\n appearance_embedding = torch.ones(\n (*directions.shape[:-1], self.appearance_embedding_dim),\n device=directions.device,\n ) * self.appearance_embedding.weight.mean(dim=0)\n h = torch.cat([h, appearance_embedding], dim=-1)\n\n rgb = self.rgb_head(torch.cat([h, geo_feats], dim=-1))\n rgb = F.sigmoid(rgb)\n results = {\"rgb\": rgb}\n\n if self.dynamic_xyz_encoder is not None:\n assert (\n dynamic_geo_feats is not None\n ), \"Dynamic geometry features are not provided.\"\n dynamic_rgb = self.rgb_head(torch.cat([h, dynamic_geo_feats], dim=-1))\n dynamic_rgb = F.sigmoid(dynamic_rgb)\n results[\"dynamic_rgb\"] = dynamic_rgb\n return results\n\n def query_sky(\n self, directions: Tensor, data_dict: Dict[str, Tensor] = None\n ) -> Dict[str, Tensor]:\n if len(directions.shape) == 2:\n dd = self.direction_encoding(directions).to(directions)\n else:\n dd = self.direction_encoding(directions[:, 0]).to(directions)\n if self.enable_cam_embedding or self.enable_img_embedding:\n # optionally add appearance embedding\n if \"cam_idx\" in data_dict and self.enable_cam_embedding:\n appearance_embedding = self.appearance_embedding(data_dict[\"cam_idx\"])\n elif \"img_idx\" in data_dict and self.enable_img_embedding:\n appearance_embedding = self.appearance_embedding(data_dict[\"img_idx\"])\n else:\n # use mean appearance embedding\n appearance_embedding = torch.ones(\n (*directions.shape[:-1], self.appearance_embedding_dim),\n device=directions.device,\n ) * self.appearance_embedding.weight.mean(dim=0)\n dd = torch.cat([dd, appearance_embedding], dim=-1)\n rgb_sky = self.sky_head(dd).to(directions)\n rgb_sky = F.sigmoid(rgb_sky)\n results = {\"rgb_sky\": rgb_sky}\n if self.enable_feature_head:\n self.dino_sky_head(dd).to(directions)\n results[\"dino_sky_feat\"] = self.dino_sky_head(dd).to(directions)\n return results\n\n def query_flow(\n self, positions: Tensor, normed_timestamps: Tensor, query_density: bool = True\n ) -> Dict[str, Tensor]:\n \"\"\"\n query flow field\n \"\"\"\n normed_positions = self.contract_points(positions)\n flow = self.forward_flow_hash(normed_positions, normed_timestamps)\n results = {\n \"forward_flow\": flow[..., :3],\n \"backward_flow\": flow[..., 3:],\n }\n if query_density:\n # it's important to filter valid flows based on a dynamic density threshold.\n # flows are valid only if they are on dynamic points.\n dynamic_feats = self.forward_dynamic_hash(\n normed_positions, normed_timestamps\n )\n (dynamic_geo_feats, _,) = torch.split(\n dynamic_feats,\n [self.geometry_feature_dim, self.semantic_feature_dim],\n dim=-1,\n )\n dynamic_density = self.density_activation(dynamic_geo_feats[..., 0])\n results[\"dynamic_density\"] = dynamic_density\n return results\n\n def query_attributes(\n self,\n positions: Tensor,\n normed_timestamps: Tensor = None,\n query_feature_head: bool = True,\n ):\n \"\"\"\n query attributes (density, dino features, etc.)\n \"\"\"\n results_dict = {}\n encoded_features, normed_positions = self.forward_static_hash(positions)\n geo_feats, semantic_feats = torch.split(\n encoded_features,\n [self.geometry_feature_dim, self.semantic_feature_dim],\n dim=-1,\n )\n static_density = self.density_activation(geo_feats[..., 0])\n if self.dynamic_xyz_encoder is not None and normed_timestamps is not None:\n dynamic_feats, dynamic_hash_encodings = self.forward_dynamic_hash(\n normed_positions, normed_timestamps, return_hash_encodings=True\n )\n if self.flow_xyz_encoder is not None:\n flow = self.forward_flow_hash(normed_positions, normed_timestamps)\n forward_flow = flow[..., :3]\n backward_flow = flow[..., 3:]\n results_dict[\"forward_flow\"] = forward_flow\n results_dict[\"backward_flow\"] = backward_flow\n temporal_aggregation_results = self.temporal_aggregation(\n positions,\n normed_timestamps,\n forward_flow,\n backward_flow,\n dynamic_feats,\n )\n dynamic_feats = temporal_aggregation_results[\"dynamic_feats\"]\n temporal_aggregation_results[\n \"current_dynamic_hash_encodings\"\n ] = dynamic_hash_encodings\n results_dict.update(temporal_aggregation_results)\n\n (dynamic_geo_feats, dynamic_semantic_feats,) = torch.split(\n dynamic_feats,\n [self.geometry_feature_dim, self.semantic_feature_dim],\n dim=-1,\n )\n dynamic_density = self.density_activation(dynamic_geo_feats[..., 0])\n density = static_density + dynamic_density\n results_dict.update(\n {\n \"density\": density,\n \"static_density\": static_density,\n \"dynamic_density\": dynamic_density,\n # \"occupancy\": occupancy,\n }\n )\n else:\n results_dict[\"density\"] = static_density\n if self.enable_feature_head and query_feature_head:\n # query on demand\n dino_feats = self.dino_head(semantic_feats)\n if self.dynamic_xyz_encoder is not None and normed_timestamps is not None:\n dynamic_dino_feats = self.dino_head(dynamic_semantic_feats)\n results_dict[\"static_dino_feat\"] = dino_feats\n results_dict[\"dynamic_dino_feat\"] = dynamic_dino_feats\n results_dict[\"dino_feat\"] = (\n static_density.unsqueeze(-1) * dino_feats\n + dynamic_density.unsqueeze(-1) * dynamic_dino_feats\n ) / (density.unsqueeze(-1) + 1e-6)\n else:\n results_dict[\"dino_feat\"] = dino_feats\n return results_dict" }, { "identifier": "render_rays", "path": "radiance_fields/render_utils.py", "snippet": "def render_rays(\n # scene\n radiance_field: RadianceField = None,\n proposal_estimator: PropNetEstimator = None,\n proposal_networks: Optional[List[DensityField]] = None,\n data_dict: Dict[str, Tensor] = None,\n cfg: OmegaConf = None,\n proposal_requires_grad: bool = False,\n return_decomposition: bool = False,\n prefix=\"\",\n) -> Dict[str, Tensor]:\n \"\"\"Render some attributes of the scene along the rays.\"\"\"\n # reshape data_dict to be (num_rays, ...)\n rays_shape = data_dict[prefix + \"origins\"].shape\n if len(rays_shape) == 3:\n height, width, _ = rays_shape\n num_rays = height * width\n reshaped_data_dict = {}\n for k, v in data_dict.items():\n reshaped_data_dict[k] = v.reshape(num_rays, -1).squeeze()\n else:\n num_rays, _ = rays_shape\n reshaped_data_dict = data_dict.copy()\n\n def prop_sigma_fn(t_starts, t_ends, proposal_network):\n # query propsal networks for density\n t_origins = chunk_data_dict[prefix + \"origins\"][..., None, :]\n t_dirs = chunk_data_dict[prefix + \"viewdirs\"][..., None, :]\n positions = t_origins + t_dirs * (t_starts + t_ends)[..., None] / 2.0\n sub_dict = {\n k: v[..., None].repeat_interleave(t_starts.shape[-1], dim=-1)\n for k, v in chunk_data_dict.items()\n if \"time\" in k\n }\n return proposal_network(positions, sub_dict)\n\n def query_fn(t_starts, t_ends):\n # query the final nerf model for density and other information along the rays\n t_origins = chunk_data_dict[prefix + \"origins\"][..., None, :]\n t_dirs = chunk_data_dict[prefix + \"viewdirs\"][..., None, :].repeat_interleave(\n t_starts.shape[-1], dim=-2\n )\n sub_dict = {\n k: v[..., None].repeat_interleave(t_starts.shape[-1], dim=-1)\n for k, v in chunk_data_dict.items()\n if k not in [prefix + \"viewdirs\", prefix + \"origins\", \"pixel_coords\"]\n }\n sub_dict[\"t_starts\"], sub_dict[\"t_ends\"] = t_starts, t_ends\n if \"pixel_coords\" in chunk_data_dict:\n # use this for positional embedding decomposition\n sub_dict[\"pixel_coords\"] = chunk_data_dict[\"pixel_coords\"]\n positions = t_origins + t_dirs * (t_starts + t_ends)[..., None] / 2.0\n # return density only when rendering lidar, i.e., no rgb or sky or features are rendered\n results_dict: Dict[str, Tensor] = radiance_field(\n positions, t_dirs, sub_dict, return_density_only=(prefix == \"lidar_\")\n )\n results_dict[\"density\"] = results_dict[\"density\"].squeeze(-1)\n return results_dict\n\n results = []\n chunk = 2**24 if radiance_field.training else cfg.render.render_chunk_size\n for i in range(0, num_rays, chunk):\n chunk_data_dict = {k: v[i : i + chunk] for k, v in reshaped_data_dict.items()}\n assert proposal_networks is not None, \"proposal_networks is required.\"\n # obtain proposed intervals\n t_starts, t_ends = proposal_estimator.sampling(\n prop_sigma_fns=[\n lambda *args: prop_sigma_fn(*args, p) for p in proposal_networks\n ],\n num_samples=cfg.nerf.sampling.num_samples,\n prop_samples=cfg.nerf.propnet.num_samples_per_prop,\n n_rays=chunk_data_dict[prefix + \"origins\"].shape[0],\n near_plane=cfg.nerf.propnet.near_plane,\n far_plane=cfg.nerf.propnet.far_plane,\n sampling_type=cfg.nerf.propnet.sampling_type,\n stratified=radiance_field.training,\n requires_grad=proposal_requires_grad,\n )\n # render the scene\n chunk_results_dict = rendering(\n t_starts,\n t_ends,\n query_fn=query_fn,\n return_decomposition=return_decomposition,\n )\n extras = chunk_results_dict.pop(\"extras\")\n results.append(chunk_results_dict)\n render_results = collate(\n results,\n collate_fn_map={\n **default_collate_fn_map,\n Tensor: lambda x, **_: torch.cat(x, 0),\n },\n )\n extras[\"density\"] = render_results.pop(\"density\")\n for k, v in render_results.items():\n # recover the original shape\n render_results[k] = v.reshape(list(rays_shape[:-1]) + list(v.shape[1:]))\n render_results[\"extras\"] = extras\n return render_results" }, { "identifier": "render_pixels", "path": "radiance_fields/video_utils.py", "snippet": "def render_pixels(\n cfg: OmegaConf,\n model: RadianceField,\n proposal_estimator: PropNetEstimator,\n dataset: SplitWrapper,\n proposal_networks: Optional[List[DensityField]] = None,\n compute_metrics: bool = False,\n vis_indices: Optional[List[int]] = None,\n return_decomposition: bool = True,\n):\n \"\"\"\n Render pixel-related outputs from a model.\n\n Args:\n ....skip obvious args\n compute_metrics (bool, optional): Whether to compute metrics. Defaults to False.\n vis_indices (Optional[List[int]], optional): Indices to visualize. Defaults to None.\n return_decomposition (bool, optional): Whether to visualize the static-dynamic decomposition. Defaults to True.\n \"\"\"\n model.eval()\n if proposal_networks is not None:\n for p in proposal_networks:\n p.eval()\n if proposal_estimator is not None:\n proposal_estimator.eval()\n # set up render function\n render_func = lambda data_dict: render_rays(\n radiance_field=model,\n proposal_estimator=proposal_estimator,\n proposal_networks=proposal_networks,\n data_dict=data_dict,\n cfg=cfg,\n return_decomposition=return_decomposition, # return static-dynamic decomposition\n )\n render_results = render(\n dataset,\n render_func,\n model=model,\n compute_metrics=compute_metrics,\n vis_indices=vis_indices,\n )\n if compute_metrics:\n num_samples = len(dataset) if vis_indices is None else len(vis_indices)\n logger.info(f\"Eval over {num_samples} images:\")\n logger.info(f\"\\tPSNR: {render_results['psnr']:.4f}\")\n logger.info(f\"\\tSSIM: {render_results['ssim']:.4f}\")\n logger.info(f\"\\tFeature PSNR: {render_results['feat_psnr']:.4f}\")\n logger.info(f\"\\tMasked PSNR: {render_results['masked_psnr']:.4f}\")\n logger.info(f\"\\tMasked SSIM: {render_results['masked_ssim']:.4f}\")\n logger.info(f\"\\tMasked Feature PSNR: {render_results['masked_feat_psnr']:.4f}\")\n\n return render_results" }, { "identifier": "save_videos", "path": "radiance_fields/video_utils.py", "snippet": "def save_videos(\n render_results: Dict[str, List[Tensor]],\n save_pth: str,\n num_timestamps: int,\n keys: List[str] = [\"gt_rgbs\", \"rgbs\", \"depths\"],\n num_cams: int = 3,\n save_seperate_video: bool = False,\n save_images: bool = False,\n fps: int = 10,\n verbose: bool = True,\n):\n if save_seperate_video:\n return_frame = save_seperate_videos(\n render_results,\n save_pth,\n num_timestamps=num_timestamps,\n keys=keys,\n num_cams=num_cams,\n save_images=save_images,\n fps=fps,\n verbose=verbose,\n )\n else:\n return_frame = save_concatenated_videos(\n render_results,\n save_pth,\n num_timestamps=num_timestamps,\n keys=keys,\n num_cams=num_cams,\n save_images=save_images,\n fps=fps,\n verbose=verbose,\n )\n return return_frame" }, { "identifier": "PropNetEstimator", "path": "third_party/nerfacc_prop_net.py", "snippet": "class PropNetEstimator(AbstractEstimator):\n \"\"\"Proposal network transmittance estimator.\n\n References: \"Mip-NeRF 360: Unbounded Anti-Aliased Neural Radiance Fields.\"\n\n Args:\n optimizer: The optimizer to use for the proposal networks.\n scheduler: The learning rate scheduler to use for the proposal networks.\n \"\"\"\n\n def __init__(\n self,\n optimizer: Optional[torch.optim.Optimizer] = None,\n scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,\n enable_anti_aliasing_loss: Optional[bool] = True,\n anti_aliasing_pulse_width: Optional[List[float]] = [0.03, 0.003],\n ) -> None:\n super().__init__()\n self.optimizer = optimizer\n self.scheduler = scheduler\n self.prop_cache: List = []\n self.enable_anti_aliasing_loss = enable_anti_aliasing_loss\n self.pulse_width = anti_aliasing_pulse_width\n if self.enable_anti_aliasing_loss:\n logger.info(\"Enable anti-aliasing loss, pulse width: %s\", self.pulse_width)\n\n @torch.no_grad()\n def sampling(\n self,\n prop_sigma_fns: List[Callable],\n prop_samples: List[int],\n num_samples: int,\n # rendering options\n n_rays: int,\n near_plane: float,\n far_plane: float,\n sampling_type: Literal[\n \"uniform\", \"lindisp\", \"sqrt\", \"log\", \"uniform_lindisp\"\n ] = \"uniform_lindisp\",\n # training options\n stratified: bool = False,\n requires_grad: bool = False,\n ) -> Tuple[Tensor, Tensor]:\n \"\"\"Sampling with CDFs from proposal networks.\n\n Note:\n When `requires_grad` is `True`, the gradients are allowed to flow\n through the proposal networks, and the outputs of the proposal\n networks are cached to update them later when calling `update_every_n_steps()`\n\n Args:\n prop_sigma_fns: Proposal network evaluate functions. It should be a list\n of functions that take in samples {t_starts (n_rays, n_samples),\n t_ends (n_rays, n_samples)} and returns the post-activation densities\n (n_rays, n_samples).\n prop_samples: Number of samples to draw from each proposal network. Should\n be the same length as `prop_sigma_fns`.\n num_samples: Number of samples to draw in the end.\n n_rays: Number of rays.\n near_plane: Near plane.\n far_plane: Far plane.\n sampling_type: Sampling type. Either \"uniform\" or \"lindisp\". Default to\n \"lindisp\".\n stratified: Whether to use stratified sampling. Default to `False`.\n requires_grad: Whether to allow gradients to flow through the proposal\n networks. Default to `False`.\n\n Returns:\n A tuple of {Tensor, Tensor}:\n\n - **t_starts**: The starts of the samples. Shape (n_rays, num_samples).\n - **t_ends**: The ends of the samples. Shape (n_rays, num_samples).\n\n \"\"\"\n assert len(prop_sigma_fns) == len(prop_samples), (\n \"The number of proposal networks and the number of samples \"\n \"should be the same.\"\n )\n cdfs = torch.cat(\n [\n torch.zeros((n_rays, 1), device=self.device),\n torch.ones((n_rays, 1), device=self.device),\n ],\n dim=-1,\n )\n intervals = RayIntervals(vals=cdfs)\n\n for i, (level_fn, level_samples) in enumerate(\n zip(prop_sigma_fns, prop_samples)\n ):\n intervals, _ = importance_sampling(\n intervals, cdfs, level_samples, stratified\n )\n t_vals = _transform_stot(\n sampling_type, intervals.vals, near_plane, far_plane\n )\n t_starts = t_vals[..., :-1]\n t_ends = t_vals[..., 1:]\n\n with torch.set_grad_enabled(requires_grad):\n sigmas = level_fn(t_starts, t_ends)[\"density\"].squeeze(-1)\n assert sigmas.shape == t_starts.shape\n trans, _ = render_transmittance_from_density(t_starts, t_ends, sigmas)\n cdfs = 1.0 - torch.cat(\n [trans, torch.zeros_like(trans[..., :1])], dim=-1\n )\n if requires_grad:\n self.prop_cache.append((intervals, cdfs, i))\n\n intervals, _ = importance_sampling(intervals, cdfs, num_samples, stratified)\n t_vals = _transform_stot(sampling_type, intervals.vals, near_plane, far_plane)\n t_starts = t_vals[..., :-1]\n t_ends = t_vals[..., 1:]\n if requires_grad:\n self.prop_cache.append((intervals, None, None))\n\n return t_starts, t_ends\n\n @torch.enable_grad()\n def compute_loss(self, trans: Tensor, loss_scaler: float = 1.0) -> Tensor:\n \"\"\"Compute the loss for the proposal networks.\n\n Args:\n trans: The transmittance of all samples. Shape (n_rays, num_samples).\n loss_scaler: The loss scaler. Default to 1.0.\n\n Returns:\n The loss for the proposal networks.\n \"\"\"\n if len(self.prop_cache) == 0:\n return torch.zeros((), device=self.device)\n\n intervals, _, _ = self.prop_cache.pop()\n # get cdfs at all edges of intervals\n cdfs = 1.0 - torch.cat([trans, torch.zeros_like(trans[..., :1])], dim=-1)\n cdfs = cdfs.detach()\n loss = 0.0\n\n if self.enable_anti_aliasing_loss:\n w_normalize = (cdfs[..., 1:] - cdfs[..., :-1]) / (\n intervals.vals[..., 1:] - intervals.vals[..., :-1]\n )\n c1, w1 = blur_stepfun(intervals.vals, w_normalize, self.pulse_width[0])\n c2, w2 = blur_stepfun(intervals.vals, w_normalize, self.pulse_width[1])\n area1 = 0.5 * (w1[..., 1:] + w1[..., :-1]) * (c1[..., 1:] - c1[..., :-1])\n area2 = 0.5 * (w2[..., 1:] + w2[..., :-1]) * (c2[..., 1:] - c2[..., :-1])\n cdfs1 = torch.cat(\n [\n torch.zeros_like(area1[..., :1]),\n torch.cumsum(area1, dim=-1),\n ],\n dim=-1,\n )\n cdfs2 = torch.cat(\n [\n torch.zeros_like(area2[..., :1]),\n torch.cumsum(area2, dim=-1),\n ],\n dim=-1,\n )\n cs = [c1, c2]\n ws = [w1, w2]\n _cdfs = [cdfs1, cdfs2]\n while self.prop_cache:\n prop_intervals, prop_cdfs, prop_id = self.prop_cache.pop()\n wp = prop_cdfs[..., 1:] - prop_cdfs[..., :-1]\n cdf_interp = sorted_interp_quad(\n prop_intervals.vals, cs[prop_id], ws[prop_id], _cdfs[prop_id]\n )\n w_s = torch.diff(cdf_interp, dim=-1)\n loss += ((w_s - wp).clamp_min(0) ** 2 / (wp + 1e-5)).mean()\n else:\n while self.prop_cache:\n prop_intervals, prop_cdfs, _ = self.prop_cache.pop()\n loss += _pdf_loss(intervals, cdfs, prop_intervals, prop_cdfs).mean()\n return loss * loss_scaler\n\n @torch.enable_grad()\n def update_every_n_steps(\n self,\n trans: Tensor,\n requires_grad: bool = False,\n loss_scaler: float = 1.0,\n ) -> float:\n \"\"\"Update the estimator every n steps during training.\n\n Args:\n trans: The transmittance of all samples. Shape (n_rays, num_samples).\n requires_grad: Whether to allow gradients to flow through the proposal\n networks. Default to `False`.\n loss_scaler: The loss scaler to use. Default to 1.0.\n\n Returns:\n The loss of the proposal networks for logging (a float scalar).\n \"\"\"\n if requires_grad:\n return self._update(trans=trans, loss_scaler=loss_scaler)\n else:\n if self.scheduler is not None:\n self.scheduler.step()\n return 0.0\n\n @torch.enable_grad()\n def _update(self, trans: Tensor, loss_scaler: float = 1.0) -> float:\n assert len(self.prop_cache) > 0\n assert self.optimizer is not None, \"No optimizer is provided.\"\n\n loss = self.compute_loss(trans, loss_scaler)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n if self.scheduler is not None:\n self.scheduler.step()\n return loss.item()" }, { "identifier": "get_proposal_requires_grad_fn", "path": "third_party/nerfacc_prop_net.py", "snippet": "def get_proposal_requires_grad_fn(\n target: float = 5.0, num_steps: int = 1000\n) -> Callable:\n schedule = lambda s: min(s / num_steps, 1.0) * target\n\n steps_since_last_grad = 0\n\n def proposal_requires_grad_fn(step: int) -> bool:\n nonlocal steps_since_last_grad\n target_steps_since_last_grad = schedule(step)\n requires_grad = steps_since_last_grad > target_steps_since_last_grad\n if requires_grad:\n steps_since_last_grad = 0\n steps_since_last_grad += 1\n return requires_grad\n\n return proposal_requires_grad_fn" }, { "identifier": "MetricLogger", "path": "utils/logging.py", "snippet": "class MetricLogger(object):\n def __init__(self, delimiter=\"\\t\", output_file=None):\n self.meters = defaultdict(SmoothedValue)\n self.delimiter = delimiter\n self.output_file = output_file\n\n def update(self, **kwargs):\n for k, v in kwargs.items():\n if isinstance(v, torch.Tensor):\n v = v.item()\n assert isinstance(v, (float, int))\n self.meters[k].update(v)\n\n def __getattr__(self, attr):\n if attr in self.meters:\n return self.meters[attr]\n if attr in self.__dict__:\n return self.__dict__[attr]\n raise AttributeError(\n f\"'{type(self).__name__}' object has no attribute '{attr}'\"\n )\n\n def __str__(self):\n loss_str = []\n for name, meter in self.meters.items():\n loss_str.append(f\"{name}: {str(meter)}\")\n return self.delimiter.join(loss_str)\n\n def synchronize_between_processes(self):\n for meter in self.meters.values():\n meter.synchronize_between_processes()\n\n def add_meter(self, name, meter):\n self.meters[name] = meter\n\n def dump_in_output_file(self, iteration, iter_time, data_time):\n if self.output_file is None:\n return\n dict_to_dump = dict(\n iteration=iteration,\n iter_time=iter_time,\n data_time=data_time,\n )\n dict_to_dump.update({k: v.median for k, v in self.meters.items()})\n with open(self.output_file, \"a\") as f:\n f.write(json.dumps(dict_to_dump) + \"\\n\")\n pass\n\n def log_every(\n self, iterable, print_freq, header=None, n_iterations=None, start_iteration=0\n ):\n i = start_iteration\n if not header:\n header = \"\"\n start_time = time.time()\n end = time.time()\n iter_time = SmoothedValue(fmt=\"{avg:.6f}\")\n data_time = SmoothedValue(fmt=\"{avg:.6f}\")\n\n if n_iterations is None:\n n_iterations = len(iterable)\n\n space_fmt = \":\" + str(len(str(n_iterations))) + \"d\"\n\n log_list = [\n header,\n \"[{0\" + space_fmt + \"}/{1}]\",\n \"eta: {eta}\",\n \"elapsed: {elapsed_time_str}\",\n \"{meters}\",\n \"time: {time}\",\n \"data: {data}\",\n ]\n if torch.cuda.is_available():\n log_list += [\"max mem: {memory:.0f}\"]\n\n log_msg = self.delimiter.join(log_list)\n MB = 1024.0 * 1024.0\n for obj in iterable:\n data_time.update(time.time() - end)\n yield obj\n iter_time.update(time.time() - end)\n if i % print_freq == 0 or i == n_iterations - 1:\n self.dump_in_output_file(\n iteration=i, iter_time=iter_time.avg, data_time=data_time.avg\n )\n eta_seconds = iter_time.global_avg * (n_iterations - i)\n eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n elapsed_time = time.time() - start_time\n elapsed_time_str = str(datetime.timedelta(seconds=int(elapsed_time)))\n\n if torch.cuda.is_available():\n logger.info(\n log_msg.format(\n i,\n n_iterations,\n eta=eta_string,\n elapsed_time_str=elapsed_time_str,\n meters=str(self),\n time=str(iter_time),\n data=str(data_time),\n memory=torch.cuda.max_memory_allocated() / MB,\n )\n )\n else:\n logger.info(\n log_msg.format(\n i,\n n_iterations,\n eta=eta_string,\n meters=str(self),\n time=str(iter_time),\n data=str(data_time),\n )\n )\n i += 1\n end = time.time()\n if i >= n_iterations:\n break\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n logger.info(\n f\"{header} Total time: {total_time_str} ({total_time / n_iterations:.6f} s / it)\"\n )" }, { "identifier": "setup_logging", "path": "utils/logging.py", "snippet": "def setup_logging(\n output: Optional[str] = None,\n *,\n name: Optional[str] = None,\n level: int = logging.DEBUG,\n capture_warnings: bool = True,\n time_string: Optional[str] = None,\n) -> None:\n \"\"\"\n Setup logging.\n\n Args:\n output: A file name or a directory to save log files. If None, log\n files will not be saved. If output ends with \".txt\" or \".log\", it\n is assumed to be a file name.\n Otherwise, logs will be saved to `output/log.txt`.\n name: The name of the logger to configure, by default the root logger.\n level: The logging level to use.\n capture_warnings: Whether warnings should be captured as logs.\n \"\"\"\n logging.captureWarnings(capture_warnings)\n _configure_logger(name, level=level, output=output, time_string=time_string)" }, { "identifier": "visualize_voxels", "path": "utils/visualization_tools.py", "snippet": "def visualize_voxels(\n cfg: OmegaConf,\n model: RadianceField,\n proposal_estimator: PropNetEstimator = None,\n proposal_networks: DensityField = None,\n dataset: SceneDataset = None,\n device: str = \"cuda\",\n save_html: bool = True,\n is_dynamic: bool = False,\n):\n model.eval()\n for p in proposal_networks:\n p.eval()\n if proposal_estimator is not None:\n proposal_estimator.eval()\n if proposal_networks is not None:\n for p in proposal_networks:\n p.eval()\n\n vis_voxel_aabb = torch.tensor(model.aabb, device=device)\n # slightly expand the aabb to make sure all points are covered\n vis_voxel_aabb[1:3] -= 1\n vis_voxel_aabb[3:] += 1\n aabb_min, aabb_max = torch.split(vis_voxel_aabb, 3, dim=-1)\n aabb_length = aabb_max - aabb_min\n\n # compute the voxel resolution for visualization\n static_voxel_resolution = torch.ceil(\n (aabb_max - aabb_min) / cfg.render.vis_voxel_size\n ).long()\n empty_static_voxels = torch.zeros(*static_voxel_resolution, device=device)\n if is_dynamic:\n # use a slightly smaller voxel size for dynamic voxels\n dynamic_voxel_resolution = torch.ceil(\n (aabb_max - aabb_min) / cfg.render.vis_voxel_size * 0.8\n ).long()\n all_occupied_dynamic_points = []\n empty_dynamic_voxels = torch.zeros(*dynamic_voxel_resolution, device=device)\n\n # collect some patches for PCA\n to_compute_pca_patches = []\n\n pbar = tqdm(\n dataset.full_pixel_set,\n desc=\"querying depth\",\n dynamic_ncols=True,\n total=len(dataset.full_pixel_set),\n )\n for i, data_dict in enumerate(pbar):\n data_dict = dataset.full_pixel_set[i]\n for k, v in data_dict.items():\n data_dict[k] = v.to(device)\n if i < dataset.num_cams:\n # collect all patches from the first timestep\n with torch.no_grad():\n render_results = render_rays(\n radiance_field=model,\n proposal_estimator=proposal_estimator,\n proposal_networks=proposal_networks,\n data_dict=data_dict,\n cfg=cfg,\n proposal_requires_grad=False,\n )\n if \"dino_pe_free\" in render_results:\n dino_feats = render_results[\"dino_pe_free\"]\n else:\n dino_feats = render_results[\"dino_feat\"]\n dino_feats = dino_feats.reshape(-1, dino_feats.shape[-1])\n to_compute_pca_patches.append(dino_feats)\n # query the depth. we force a lidar mode here so that the renderer will skip\n # querying other features such as colors, features, etc.\n data_dict[\"lidar_origins\"] = data_dict[\"origins\"].to(device)\n data_dict[\"lidar_viewdirs\"] = data_dict[\"viewdirs\"].to(device)\n data_dict[\"lidar_normed_timestamps\"] = data_dict[\"normed_timestamps\"].to(device)\n with torch.no_grad():\n render_results = render_rays(\n radiance_field=model,\n proposal_estimator=proposal_estimator,\n proposal_networks=proposal_networks,\n data_dict=data_dict,\n cfg=cfg,\n proposal_requires_grad=False,\n prefix=\"lidar_\", # force lidar mode\n return_decomposition=True,\n )\n # ==== get the static voxels ======\n if is_dynamic:\n static_depth = render_results[\"static_depth\"]\n else:\n static_depth = render_results[\"depth\"]\n world_coords = (\n data_dict[\"lidar_origins\"] + data_dict[\"lidar_viewdirs\"] * static_depth\n )\n world_coords = world_coords[static_depth.squeeze() < 80]\n voxel_coords = world_coords_to_voxel_coords(\n world_coords, aabb_min, aabb_max, static_voxel_resolution\n )\n voxel_coords = voxel_coords.long()\n selector = (\n (voxel_coords[..., 0] >= 0)\n & (voxel_coords[..., 0] < static_voxel_resolution[0])\n & (voxel_coords[..., 1] >= 0)\n & (voxel_coords[..., 1] < static_voxel_resolution[1])\n & (voxel_coords[..., 2] >= 0)\n & (voxel_coords[..., 2] < static_voxel_resolution[2])\n )\n # split the voxel_coords into separate dimensions\n voxel_coords_x = voxel_coords[..., 0][selector]\n voxel_coords_y = voxel_coords[..., 1][selector]\n voxel_coords_z = voxel_coords[..., 2][selector]\n # index into empty_voxels using the separated coordinates\n empty_static_voxels[voxel_coords_x, voxel_coords_y, voxel_coords_z] = 1\n\n # ==== get the dynamic voxels ======\n if is_dynamic:\n dynamic_depth = render_results[\"dynamic_depth\"]\n world_coords = (\n data_dict[\"lidar_origins\"] + data_dict[\"lidar_viewdirs\"] * dynamic_depth\n )\n voxel_coords = world_coords_to_voxel_coords(\n world_coords, aabb_min, aabb_max, dynamic_voxel_resolution\n )\n voxel_coords = voxel_coords.long()\n selector = (\n (voxel_coords[..., 0] >= 0)\n & (voxel_coords[..., 0] < dynamic_voxel_resolution[0])\n & (voxel_coords[..., 1] >= 0)\n & (voxel_coords[..., 1] < dynamic_voxel_resolution[1])\n & (voxel_coords[..., 2] >= 0)\n & (voxel_coords[..., 2] < dynamic_voxel_resolution[2])\n )\n # split the voxel_coords into separate dimensions\n voxel_coords_x = voxel_coords[..., 0][selector]\n voxel_coords_y = voxel_coords[..., 1][selector]\n voxel_coords_z = voxel_coords[..., 2][selector]\n # index into empty_voxels using the separated coordinates\n empty_dynamic_voxels[voxel_coords_x, voxel_coords_y, voxel_coords_z] = 1\n if i % dataset.num_cams == 0 and i > 0:\n all_occupied_dynamic_points.append(\n voxel_coords_to_world_coords(\n aabb_min,\n aabb_max,\n dynamic_voxel_resolution,\n torch.nonzero(empty_dynamic_voxels),\n )\n )\n empty_dynamic_voxels = torch.zeros(\n *dynamic_voxel_resolution, device=device\n )\n # compute the pca reduction\n dummy_pca_reduction, color_min, color_max = get_robust_pca(\n torch.cat(to_compute_pca_patches, dim=0).to(device), m=2.5\n )\n # now let's query the features\n all_occupied_static_points = voxel_coords_to_world_coords(\n aabb_min, aabb_max, static_voxel_resolution, torch.nonzero(empty_static_voxels)\n )\n chunk = 2**18\n pca_colors = []\n occupied_points = []\n pbar = tqdm(\n range(0, all_occupied_static_points.shape[0], chunk),\n desc=\"querying static features\",\n dynamic_ncols=True,\n )\n for i in pbar:\n occupied_points_chunk = all_occupied_static_points[i : i + chunk]\n density_list = []\n # we need to accumulate the density from all proposal networks as well\n # to ensure reliable density estimation\n for p in proposal_networks:\n density_list.append(p(occupied_points_chunk)[\"density\"].squeeze(-1))\n with torch.no_grad():\n results = model.forward(\n occupied_points_chunk,\n query_feature_head=False,\n )\n density_list.append(results[\"density\"])\n density = torch.stack(density_list, dim=0)\n density = torch.mean(density, dim=0)\n # use a preset threshold to determine whether a voxel is occupied\n selector = density > 0.5\n occupied_points_chunk = occupied_points_chunk[selector]\n if len(occupied_points_chunk) == 0:\n # skip if no occupied points in this chunk\n continue\n with torch.no_grad():\n feats = model.forward(\n occupied_points_chunk,\n query_feature_head=True,\n query_pe_head=False,\n )[\"dino_feat\"]\n colors = feats @ dummy_pca_reduction\n del feats\n colors = (colors - color_min) / (color_max - color_min)\n pca_colors.append(torch.clamp(colors, 0, 1))\n occupied_points.append(occupied_points_chunk)\n\n pca_colors = torch.cat(pca_colors, dim=0)\n occupied_points = torch.cat(occupied_points, dim=0)\n if is_dynamic:\n dynamic_pca_colors = []\n dynamic_occupied_points = []\n unq_timestamps = dataset.pixel_source.unique_normalized_timestamps.to(device)\n # query every 10 frames\n pbar = tqdm(\n range(0, len(all_occupied_dynamic_points), 10),\n desc=\"querying dynamic fields\",\n dynamic_ncols=True,\n )\n for i in pbar:\n occupied_points_chunk = all_occupied_dynamic_points[i]\n normed_timestamps = unq_timestamps[i].repeat(\n occupied_points_chunk.shape[0], 1\n )\n with torch.no_grad():\n results = model.forward(\n occupied_points_chunk,\n data_dict={\"normed_timestamps\": normed_timestamps},\n query_feature_head=False,\n )\n selector = results[\"dynamic_density\"].squeeze() > 0.1\n occupied_points_chunk = occupied_points_chunk[selector]\n if len(occupied_points_chunk) == 0:\n continue\n # query some features\n normed_timestamps = unq_timestamps[i].repeat(\n occupied_points_chunk.shape[0], 1\n )\n with torch.no_grad():\n feats = model.forward(\n occupied_points_chunk,\n data_dict={\"normed_timestamps\": normed_timestamps},\n query_feature_head=True,\n query_pe_head=False,\n )[\"dynamic_dino_feat\"]\n colors = feats @ dummy_pca_reduction\n del feats\n colors = (colors - color_min) / (color_max - color_min)\n dynamic_pca_colors.append(torch.clamp(colors, 0, 1))\n dynamic_occupied_points.append(occupied_points_chunk)\n dynamic_coords = [x.cpu().numpy() for x in dynamic_occupied_points]\n dynamic_colors = [x.cpu().numpy() for x in dynamic_pca_colors]\n else:\n dynamic_coords = None\n dynamic_colors = None\n\n figure = vis_occ_plotly(\n vis_aabb=vis_voxel_aabb.cpu().numpy().tolist(),\n coords=occupied_points.cpu().numpy(),\n colors=pca_colors.cpu().numpy(),\n dynamic_coords=dynamic_coords,\n dynamic_colors=dynamic_colors,\n x_ratio=1,\n y_ratio=(aabb_length[1] / aabb_length[0]).item(),\n z_ratio=(aabb_length[2] / aabb_length[0]).item(),\n size=3,\n black_bg=True,\n title=f\"Lifted {cfg.data.pixel_source.feature_model_type} Features, PE_removed: {cfg.nerf.model.head.enable_learnable_pe}\",\n )\n # for plotly\n data = figure.to_dict()[\"data\"]\n layout = figure.to_dict()[\"layout\"]\n output_path = os.path.join(cfg.log_dir, f\"feature_field.json\")\n with open(output_path, \"w\") as f:\n json.dump({\"data\": data, \"layout\": layout}, f, cls=NumpyEncoder)\n logger.info(f\"Saved to {output_path}\")\n output_path = os.path.join(cfg.log_dir, f\"feature_field.html\")\n if save_html:\n figure.write_html(output_path)\n logger.info(f\"Query result saved to {output_path}\")" }, { "identifier": "visualize_scene_flow", "path": "utils/visualization_tools.py", "snippet": "def visualize_scene_flow(\n cfg: OmegaConf,\n model: RadianceField,\n dataset: SceneDataset = None,\n device: str = \"cuda\",\n save_html: bool = True,\n):\n pbar = tqdm(\n range(0, len(dataset.full_lidar_set) - 1, 10),\n desc=\"querying flow\",\n dynamic_ncols=True,\n )\n predicted_flow_colors, gt_flow_colors = [], []\n dynamic_coords = []\n for i in pbar:\n data_dict = dataset.full_lidar_set[i].copy()\n lidar_flow_class = data_dict[\"lidar_flow_class\"]\n for k, v in data_dict.items():\n # remove invalid flow (the information is from GT)\n data_dict[k] = v[lidar_flow_class != -1]\n\n if data_dict[k].shape[0] == 0:\n logger.info(f\"no valid points, skipping...\")\n continue\n # filter out ground points\n # for k, v in data_dict.items():\n # data_dict[k] = v[~data_dict[\"lidar_ground\"]]\n valid_lidar_mask = dataset.get_valid_lidar_mask(i, data_dict)\n for k, v in data_dict.items():\n data_dict[k] = v[valid_lidar_mask]\n lidar_points = (\n data_dict[\"lidar_origins\"]\n + data_dict[\"lidar_ranges\"] * data_dict[\"lidar_viewdirs\"]\n )\n normalized_timestamps = data_dict[\"lidar_normed_timestamps\"]\n with torch.no_grad():\n pred_results = model.query_flow(\n positions=lidar_points,\n normed_timestamps=normalized_timestamps,\n )\n pred_flow = pred_results[\"forward_flow\"]\n # flow is only valid when the point is not static\n pred_flow[pred_results[\"dynamic_density\"] < 0.2] *= 0\n\n predicted_flow_colors.append(\n scene_flow_to_rgb(pred_flow, flow_max_radius=2.0, background=\"bright\")\n .cpu()\n .numpy()\n )\n gt_flow_colors.append(\n scene_flow_to_rgb(\n data_dict[\"lidar_flow\"], flow_max_radius=2.0, background=\"bright\"\n )\n .cpu()\n .numpy()\n )\n dynamic_coords.append(lidar_points.cpu().numpy())\n\n vis_voxel_aabb = torch.tensor(model.aabb, device=device)\n # slightly expand the aabb to make sure all points are covered\n vis_voxel_aabb[1:3] -= 1\n vis_voxel_aabb[3:] += 1\n aabb_min, aabb_max = torch.split(vis_voxel_aabb, 3, dim=-1)\n aabb_length = aabb_max - aabb_min\n pred_figure = vis_occ_plotly(\n vis_aabb=vis_voxel_aabb.cpu().numpy().tolist(),\n dynamic_coords=dynamic_coords,\n dynamic_colors=predicted_flow_colors,\n x_ratio=1,\n y_ratio=(aabb_length[1] / aabb_length[0]).item(),\n z_ratio=(aabb_length[2] / aabb_length[0]).item(),\n size=2,\n black_bg=True,\n title=f\"Predicted Flow\",\n )\n gt_figure = vis_occ_plotly(\n vis_aabb=vis_voxel_aabb.cpu().numpy().tolist(),\n dynamic_coords=dynamic_coords,\n dynamic_colors=gt_flow_colors,\n x_ratio=1,\n y_ratio=(aabb_length[1] / aabb_length[0]).item(),\n z_ratio=(aabb_length[2] / aabb_length[0]).item(),\n size=2,\n black_bg=True,\n title=f\"GT Flow\",\n )\n if save_html:\n output_path = os.path.join(cfg.log_dir, f\"predicted_flow.html\")\n pred_figure.write_html(output_path)\n logger.info(f\"Predicted flow result saved to {output_path}\")\n output_path = os.path.join(cfg.log_dir, f\"gt_flow.html\")\n gt_figure.write_html(output_path)\n logger.info(f\"GT flow saved to {output_path}\")" } ]
import argparse import json import logging import os import time import imageio import numpy as np import torch import torch.utils.data import builders import loss import utils.misc as misc import wandb from typing import List, Optional from omegaconf import OmegaConf from tqdm import tqdm from datasets import metrics from datasets.base import SceneDataset from radiance_fields import DensityField, RadianceField from radiance_fields.render_utils import render_rays from radiance_fields.video_utils import render_pixels, save_videos from third_party.nerfacc_prop_net import PropNetEstimator, get_proposal_requires_grad_fn from utils.logging import MetricLogger, setup_logging from utils.visualization_tools import visualize_voxels, visualize_scene_flow from datasets.waymo import WaymoDataset from datasets.nuscenes import NuScenesDataset
19,797
) parser.add_argument( "--render_data_video_only", action="store_true", help="Quit after rendering a data video", ) parser.add_argument( "--render_video_postfix", type=str, default=None, help="an optional postfix for video", ) parser.add_argument( "--output_root", default="./work_dirs/", help="path to save checkpoints and logs", type=str, ) # wandb logging part parser.add_argument( "--enable_wandb", action="store_true", help="enable wandb logging" ) parser.add_argument( "--entity", default="YOUR ENTITY NAME", type=str, help="wandb entity name", required=False, ) parser.add_argument( "--project", default="emernerf", type=str, help="wandb project name, also used to enhance log_dir", required=True, ) parser.add_argument( "--run_name", default="debug", type=str, help="wandb run name, also used to enhance log_dir", required=True, ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) return parser def setup(args): # ------ get config from args -------- # default_config = OmegaConf.create(OmegaConf.load("configs/default_config.yaml")) cfg = OmegaConf.load(args.config_file) cfg = OmegaConf.merge(default_config, cfg, OmegaConf.from_cli(args.opts)) log_dir = os.path.join(args.output_root, args.project, args.run_name) cfg.log_dir = log_dir cfg.nerf.model.num_cams = cfg.data.pixel_source.num_cams cfg.nerf.model.unbounded = cfg.nerf.unbounded cfg.nerf.propnet.unbounded = cfg.nerf.unbounded cfg.nerf.model.resume_from = cfg.resume_from os.makedirs(log_dir, exist_ok=True) for folder in [ "images", "full_videos", "test_videos", "lowres_videos", "metrics", "configs_bk", "buffer_maps", ]: os.makedirs(os.path.join(log_dir, folder), exist_ok=True) # ------ setup logging -------- # if args.enable_wandb: # sometimes wandb fails to init in cloud machines, so we give it several (many) tries while ( wandb.init( project=args.project, entity=args.entity, sync_tensorboard=True, settings=wandb.Settings(start_method="fork"), ) is not wandb.run ): continue wandb.run.name = args.run_name wandb.run.save() wandb.config.update(OmegaConf.to_container(cfg, resolve=True)) wandb.config.update(args) misc.fix_random_seeds(cfg.optim.seed) global logger setup_logging(output=log_dir, level=logging.INFO, time_string=current_time) logger.info( "\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())) ) # -------- write config -------- # logger.info(f"Config:\n{OmegaConf.to_yaml(cfg)}") saved_cfg_path = os.path.join(log_dir, "config.yaml") with open(saved_cfg_path, "w") as f: OmegaConf.save(config=cfg, f=f) # also save a backup copy saved_cfg_path_bk = os.path.join( log_dir, "configs_bk", f"config_{current_time}.yaml" ) with open(saved_cfg_path_bk, "w") as f: OmegaConf.save(config=cfg, f=f) logger.info(f"Full config saved to {saved_cfg_path}, and {saved_cfg_path_bk}") return cfg @torch.no_grad() def do_evaluation( step: int = 0, cfg: OmegaConf = None, model: RadianceField = None, proposal_networks: Optional[List[DensityField]] = None,
logger = logging.getLogger() current_time = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime()) # a global list of keys to render, # comment out the keys you don't want to render or uncomment the keys you want to render render_keys = [ "gt_rgbs", "rgbs", "depths", # "median_depths", "gt_dino_feats", "dino_feats", "dynamic_rgbs", "dynamic_depths", "static_rgbs", "static_depths", "forward_flows", "backward_flows", "dynamic_rgb_on_static_dinos", "dino_pe", "dino_feats_pe_free", # "dynamic_dino_on_static_rgbs", # "shadow_reduced_static_rgbs", # "shadow_only_static_rgbs", # "shadows", # "gt_sky_masks", # "sky_masks", ] def get_args_parser(): parser = argparse.ArgumentParser("Train EmernNerf for a single scene") parser.add_argument("--config_file", help="path to config file", type=str) parser.add_argument( "--eval_only", action="store_true", help="perform evaluation only" ) parser.add_argument( "--visualize_voxel", action="store_true", help="perform evaluation only" ) parser.add_argument( "--render_data_video", action="store_true", help="Render a data video", ) parser.add_argument( "--render_data_video_only", action="store_true", help="Quit after rendering a data video", ) parser.add_argument( "--render_video_postfix", type=str, default=None, help="an optional postfix for video", ) parser.add_argument( "--output_root", default="./work_dirs/", help="path to save checkpoints and logs", type=str, ) # wandb logging part parser.add_argument( "--enable_wandb", action="store_true", help="enable wandb logging" ) parser.add_argument( "--entity", default="YOUR ENTITY NAME", type=str, help="wandb entity name", required=False, ) parser.add_argument( "--project", default="emernerf", type=str, help="wandb project name, also used to enhance log_dir", required=True, ) parser.add_argument( "--run_name", default="debug", type=str, help="wandb run name, also used to enhance log_dir", required=True, ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) return parser def setup(args): # ------ get config from args -------- # default_config = OmegaConf.create(OmegaConf.load("configs/default_config.yaml")) cfg = OmegaConf.load(args.config_file) cfg = OmegaConf.merge(default_config, cfg, OmegaConf.from_cli(args.opts)) log_dir = os.path.join(args.output_root, args.project, args.run_name) cfg.log_dir = log_dir cfg.nerf.model.num_cams = cfg.data.pixel_source.num_cams cfg.nerf.model.unbounded = cfg.nerf.unbounded cfg.nerf.propnet.unbounded = cfg.nerf.unbounded cfg.nerf.model.resume_from = cfg.resume_from os.makedirs(log_dir, exist_ok=True) for folder in [ "images", "full_videos", "test_videos", "lowres_videos", "metrics", "configs_bk", "buffer_maps", ]: os.makedirs(os.path.join(log_dir, folder), exist_ok=True) # ------ setup logging -------- # if args.enable_wandb: # sometimes wandb fails to init in cloud machines, so we give it several (many) tries while ( wandb.init( project=args.project, entity=args.entity, sync_tensorboard=True, settings=wandb.Settings(start_method="fork"), ) is not wandb.run ): continue wandb.run.name = args.run_name wandb.run.save() wandb.config.update(OmegaConf.to_container(cfg, resolve=True)) wandb.config.update(args) misc.fix_random_seeds(cfg.optim.seed) global logger setup_logging(output=log_dir, level=logging.INFO, time_string=current_time) logger.info( "\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())) ) # -------- write config -------- # logger.info(f"Config:\n{OmegaConf.to_yaml(cfg)}") saved_cfg_path = os.path.join(log_dir, "config.yaml") with open(saved_cfg_path, "w") as f: OmegaConf.save(config=cfg, f=f) # also save a backup copy saved_cfg_path_bk = os.path.join( log_dir, "configs_bk", f"config_{current_time}.yaml" ) with open(saved_cfg_path_bk, "w") as f: OmegaConf.save(config=cfg, f=f) logger.info(f"Full config saved to {saved_cfg_path}, and {saved_cfg_path_bk}") return cfg @torch.no_grad() def do_evaluation( step: int = 0, cfg: OmegaConf = None, model: RadianceField = None, proposal_networks: Optional[List[DensityField]] = None,
proposal_estimator: PropNetEstimator = None,
7
2023-10-11 20:56:27+00:00
24k